]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915/guc: Ensure G2H response has space in buffer
authorMatthew Brost <matthew.brost@intel.com>
Wed, 21 Jul 2021 21:50:57 +0000 (14:50 -0700)
committerJohn Harrison <John.C.Harrison@Intel.com>
Thu, 22 Jul 2021 17:07:21 +0000 (10:07 -0700)
Ensure G2H response has space in the buffer before sending H2G CTB as
the GuC can't handle any backpressure on the G2H interface.

v2:
 (Matthew)
  - s/INTEL_GUC_SEND/INTEL_GUC_CT_SEND
v3:
 (Matthew)
  - Add G2H credit accounting to blocking path, add g2h_release_space
    helper
 (John H)
  - CTB_G2H_BUFFER_SIZE / 4 == G2H_ROOM_BUFFER_SIZE

Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210721215101.139794-15-matthew.brost@intel.com
drivers/gpu/drm/i915/gt/uc/intel_guc.h
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c

index 4d470ebeda95e9174234be7c5420bc9ebbd7249e..451797c62b41be925486570c550faa4c95014150 100644 (file)
@@ -96,10 +96,11 @@ inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
 }
 
 static
-inline int intel_guc_send_nb(struct intel_guc *guc, const u32 *action, u32 len)
+inline int intel_guc_send_nb(struct intel_guc *guc, const u32 *action, u32 len,
+                            u32 g2h_len_dw)
 {
        return intel_guc_ct_send(&guc->ct, action, len, NULL, 0,
-                                INTEL_GUC_CT_SEND_NB);
+                                MAKE_SEND_FLAGS(g2h_len_dw));
 }
 
 static inline int
@@ -113,6 +114,7 @@ intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
 static inline int intel_guc_send_busy_loop(struct intel_guc *guc,
                                           const u32 *action,
                                           u32 len,
+                                          u32 g2h_len_dw,
                                           bool loop)
 {
        int err;
@@ -130,7 +132,7 @@ static inline int intel_guc_send_busy_loop(struct intel_guc *guc,
        might_sleep_if(loop && not_atomic);
 
 retry:
-       err = intel_guc_send_nb(guc, action, len);
+       err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
        if (unlikely(err == -EBUSY && loop)) {
                if (likely(not_atomic)) {
                        if (msleep_interruptible(sleep_period_ms))
index 019b25ff18888aaf965a5faa34795b45561ef654..75f69c28056e9261bec1c4fa056cff9e08c6d05f 100644 (file)
@@ -73,6 +73,7 @@ static inline struct drm_device *ct_to_drm(struct intel_guc_ct *ct)
 #define CTB_DESC_SIZE          ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
 #define CTB_H2G_BUFFER_SIZE    (SZ_4K)
 #define CTB_G2H_BUFFER_SIZE    (4 * CTB_H2G_BUFFER_SIZE)
+#define G2H_ROOM_BUFFER_SIZE   (CTB_G2H_BUFFER_SIZE / 4)
 
 struct ct_request {
        struct list_head link;
@@ -129,23 +130,27 @@ static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc)
 
 static void guc_ct_buffer_reset(struct intel_guc_ct_buffer *ctb)
 {
+       u32 space;
+
        ctb->broken = false;
        ctb->tail = 0;
        ctb->head = 0;
-       ctb->space = CIRC_SPACE(ctb->tail, ctb->head, ctb->size);
+       space = CIRC_SPACE(ctb->tail, ctb->head, ctb->size) - ctb->resv_space;
+       atomic_set(&ctb->space, space);
 
        guc_ct_buffer_desc_init(ctb->desc);
 }
 
 static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb,
                               struct guc_ct_buffer_desc *desc,
-                              u32 *cmds, u32 size_in_bytes)
+                              u32 *cmds, u32 size_in_bytes, u32 resv_space)
 {
        GEM_BUG_ON(size_in_bytes % 4);
 
        ctb->desc = desc;
        ctb->cmds = cmds;
        ctb->size = size_in_bytes / 4;
+       ctb->resv_space = resv_space / 4;
 
        guc_ct_buffer_reset(ctb);
 }
@@ -226,6 +231,7 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
        struct guc_ct_buffer_desc *desc;
        u32 blob_size;
        u32 cmds_size;
+       u32 resv_space;
        void *blob;
        u32 *cmds;
        int err;
@@ -250,19 +256,23 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
        desc = blob;
        cmds = blob + 2 * CTB_DESC_SIZE;
        cmds_size = CTB_H2G_BUFFER_SIZE;
-       CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "send",
-                ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size);
+       resv_space = 0;
+       CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "send",
+                ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
+                resv_space);
 
-       guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size);
+       guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size, resv_space);
 
        /* store pointers to desc and cmds for recv ctb */
        desc = blob + CTB_DESC_SIZE;
        cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE;
        cmds_size = CTB_G2H_BUFFER_SIZE;
-       CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "recv",
-                ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size);
+       resv_space = G2H_ROOM_BUFFER_SIZE;
+       CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "recv",
+                ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
+                resv_space);
 
-       guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size);
+       guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size, resv_space);
 
        return 0;
 }
@@ -461,8 +471,8 @@ static int ct_write(struct intel_guc_ct *ct,
 
        /* update local copies */
        ctb->tail = tail;
-       GEM_BUG_ON(ctb->space < len + GUC_CTB_HDR_LEN);
-       ctb->space -= len + GUC_CTB_HDR_LEN;
+       GEM_BUG_ON(atomic_read(&ctb->space) < len + GUC_CTB_HDR_LEN);
+       atomic_sub(len + GUC_CTB_HDR_LEN, &ctb->space);
 
        /* now update descriptor */
        WRITE_ONCE(desc->tail, tail);
@@ -537,6 +547,32 @@ static inline bool ct_deadlocked(struct intel_guc_ct *ct)
        return ret;
 }
 
+static inline bool g2h_has_room(struct intel_guc_ct *ct, u32 g2h_len_dw)
+{
+       struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
+
+       /*
+        * We leave a certain amount of space in the G2H CTB buffer for
+        * unexpected G2H CTBs (e.g. logging, engine hang, etc...)
+        */
+       return !g2h_len_dw || atomic_read(&ctb->space) >= g2h_len_dw;
+}
+
+static inline void g2h_reserve_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
+{
+       lockdep_assert_held(&ct->ctbs.send.lock);
+
+       GEM_BUG_ON(!g2h_has_room(ct, g2h_len_dw));
+
+       if (g2h_len_dw)
+               atomic_sub(g2h_len_dw, &ct->ctbs.recv.space);
+}
+
+static inline void g2h_release_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
+{
+       atomic_add(g2h_len_dw, &ct->ctbs.recv.space);
+}
+
 static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw)
 {
        struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
@@ -544,7 +580,7 @@ static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw)
        u32 head;
        u32 space;
 
-       if (ctb->space >= len_dw)
+       if (atomic_read(&ctb->space) >= len_dw)
                return true;
 
        head = READ_ONCE(desc->head);
@@ -557,16 +593,16 @@ static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw)
        }
 
        space = CIRC_SPACE(ctb->tail, head, ctb->size);
-       ctb->space = space;
+       atomic_set(&ctb->space, space);
 
        return space >= len_dw;
 }
 
-static int has_room_nb(struct intel_guc_ct *ct, u32 len_dw)
+static int has_room_nb(struct intel_guc_ct *ct, u32 h2g_dw, u32 g2h_dw)
 {
        lockdep_assert_held(&ct->ctbs.send.lock);
 
-       if (unlikely(!h2g_has_room(ct, len_dw))) {
+       if (unlikely(!h2g_has_room(ct, h2g_dw) || !g2h_has_room(ct, g2h_dw))) {
                if (ct->stall_time == KTIME_MAX)
                        ct->stall_time = ktime_get();
 
@@ -580,6 +616,12 @@ static int has_room_nb(struct intel_guc_ct *ct, u32 len_dw)
        return 0;
 }
 
+#define G2H_LEN_DW(f) ({ \
+       typeof(f) f_ = (f); \
+       FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) ? \
+       FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) + \
+       GUC_CTB_HXG_MSG_MIN_LEN : 0; \
+})
 static int ct_send_nb(struct intel_guc_ct *ct,
                      const u32 *action,
                      u32 len,
@@ -587,12 +629,13 @@ static int ct_send_nb(struct intel_guc_ct *ct,
 {
        struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
        unsigned long spin_flags;
+       u32 g2h_len_dw = G2H_LEN_DW(flags);
        u32 fence;
        int ret;
 
        spin_lock_irqsave(&ctb->lock, spin_flags);
 
-       ret = has_room_nb(ct, len + GUC_CTB_HDR_LEN);
+       ret = has_room_nb(ct, len + GUC_CTB_HDR_LEN, g2h_len_dw);
        if (unlikely(ret))
                goto out;
 
@@ -601,6 +644,7 @@ static int ct_send_nb(struct intel_guc_ct *ct,
        if (unlikely(ret))
                goto out;
 
+       g2h_reserve_space(ct, g2h_len_dw);
        intel_guc_notify(ct_to_guc(ct));
 
 out:
@@ -632,11 +676,13 @@ static int ct_send(struct intel_guc_ct *ct,
        /*
         * We use a lazy spin wait loop here as we believe that if the CT
         * buffers are sized correctly the flow control condition should be
-        * rare.
+        * rare. Reserving the maximum size in the G2H credits as we don't know
+        * how big the response is going to be.
         */
 retry:
        spin_lock_irqsave(&ctb->lock, flags);
-       if (unlikely(!h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) {
+       if (unlikely(!h2g_has_room(ct, len + GUC_CTB_HDR_LEN) ||
+                    !g2h_has_room(ct, GUC_CTB_HXG_MSG_MAX_LEN))) {
                if (ct->stall_time == KTIME_MAX)
                        ct->stall_time = ktime_get();
                spin_unlock_irqrestore(&ctb->lock, flags);
@@ -664,6 +710,7 @@ retry:
        spin_unlock(&ct->requests.lock);
 
        err = ct_write(ct, action, len, fence, 0);
+       g2h_reserve_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
 
        spin_unlock_irqrestore(&ctb->lock, flags);
 
@@ -673,6 +720,7 @@ retry:
        intel_guc_notify(ct_to_guc(ct));
 
        err = wait_for_ct_request_update(&request, status);
+       g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
        if (unlikely(err))
                goto unlink;
 
@@ -992,10 +1040,22 @@ static void ct_incoming_request_worker_func(struct work_struct *w)
 static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
 {
        const u32 *hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
+       u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
        unsigned long flags;
 
        GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT);
 
+       /*
+        * Adjusting the space must be done in IRQ or deadlock can occur as the
+        * CTB processing in the below workqueue can send CTBs which creates a
+        * circular dependency if the space was returned there.
+        */
+       switch (action) {
+       case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
+       case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
+               g2h_release_space(ct, request->size);
+       }
+
        spin_lock_irqsave(&ct->requests.lock, flags);
        list_add_tail(&request->link, &ct->requests.incoming);
        spin_unlock_irqrestore(&ct->requests.lock, flags);
index edd1bba0445d327c52c155274a3dd2eb7542bc2a..dda2d6a75392cd67786ffc27f236baae53a0a211 100644 (file)
@@ -33,6 +33,7 @@ struct intel_guc;
  * @desc: pointer to the buffer descriptor
  * @cmds: pointer to the commands buffer
  * @size: size of the commands buffer in dwords
+ * @resv_space: reserved space in buffer in dwords
  * @head: local shadow copy of head in dwords
  * @tail: local shadow copy of tail in dwords
  * @space: local shadow copy of space in dwords
@@ -43,9 +44,10 @@ struct intel_guc_ct_buffer {
        struct guc_ct_buffer_desc *desc;
        u32 *cmds;
        u32 size;
+       u32 resv_space;
        u32 tail;
        u32 head;
-       u32 space;
+       atomic_t space;
        bool broken;
 };
 
@@ -97,6 +99,13 @@ static inline bool intel_guc_ct_enabled(struct intel_guc_ct *ct)
 }
 
 #define INTEL_GUC_CT_SEND_NB           BIT(31)
+#define INTEL_GUC_CT_SEND_G2H_DW_SHIFT 0
+#define INTEL_GUC_CT_SEND_G2H_DW_MASK  (0xff << INTEL_GUC_CT_SEND_G2H_DW_SHIFT)
+#define MAKE_SEND_FLAGS(len) ({ \
+       typeof(len) len_ = (len); \
+       GEM_BUG_ON(!FIELD_FIT(INTEL_GUC_CT_SEND_G2H_DW_MASK, len_)); \
+       (FIELD_PREP(INTEL_GUC_CT_SEND_G2H_DW_MASK, len_) | INTEL_GUC_CT_SEND_NB); \
+})
 int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
                      u32 *response_buf, u32 response_buf_size, u32 flags);
 void intel_guc_ct_event_handler(struct intel_guc_ct *ct);
index 3489b390ae774f873fccea8e81f5fb0274b2756d..82534259b7adcae2f8168690449f69594388aa66 100644 (file)
 #include "abi/guc_communication_ctb_abi.h"
 #include "abi/guc_messages_abi.h"
 
+/* Payload length only i.e. don't include G2H header length */
+#define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET      2
+#define G2H_LEN_DW_DEREGISTER_CONTEXT          1
+
 #define GUC_CONTEXT_DISABLE            0
 #define GUC_CONTEXT_ENABLE             1
 
index fc0b36ab1e683ea633563c7cedc7c6a411a95ae8..cba38f05e44d3240fcb87ba25030ecd27add6853 100644 (file)
@@ -258,6 +258,7 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
        struct intel_context *ce = rq->context;
        u32 action[3];
        int len = 0;
+       u32 g2h_len_dw = 0;
        bool enabled = context_enabled(ce);
 
        GEM_BUG_ON(!atomic_read(&ce->guc_id_ref));
@@ -269,13 +270,13 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
                action[len++] = GUC_CONTEXT_ENABLE;
                set_context_pending_enable(ce);
                intel_context_get(ce);
+               g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
        } else {
                action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT;
                action[len++] = ce->guc_id;
        }
 
-       err = intel_guc_send_nb(guc, action, len);
-
+       err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
        if (!enabled && !err) {
                set_context_enabled(ce);
        } else if (!enabled) {
@@ -734,7 +735,7 @@ static int __guc_action_register_context(struct intel_guc *guc,
                offset,
        };
 
-       return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), true);
+       return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
 }
 
 static int register_context(struct intel_context *ce)
@@ -754,7 +755,8 @@ static int __guc_action_deregister_context(struct intel_guc *guc,
                guc_id,
        };
 
-       return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), true);
+       return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action),
+                                       G2H_LEN_DW_DEREGISTER_CONTEXT, true);
 }
 
 static int deregister_context(struct intel_context *ce, u32 guc_id)
@@ -899,7 +901,8 @@ static void __guc_context_sched_disable(struct intel_guc *guc,
 
        intel_context_get(ce);
 
-       intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), true);
+       intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action),
+                                G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
 }
 
 static u16 prep_context_pending_disable(struct intel_context *ce)