]> git.baikalelectronics.ru Git - kernel.git/commitdiff
RDMA/siw: Fix broken RDMA Read Fence/Resume logic.
authorBernard Metzler <bmt@zurich.ibm.com>
Sun, 30 Jan 2022 17:08:15 +0000 (18:08 +0100)
committerJason Gunthorpe <jgg@nvidia.com>
Tue, 1 Feb 2022 13:54:28 +0000 (09:54 -0400)
Code unconditionally resumed fenced SQ processing after next RDMA Read
completion, even if other RDMA Read responses are still outstanding, or
ORQ is full. Also adds comments for better readability of fence
processing, and removes orq_get_tail() helper, which is not needed
anymore.

Fixes: 8b6a361b8c48 ("rdma/siw: receive path")
Fixes: a531975279f3 ("rdma/siw: main include file")
Link: https://lore.kernel.org/r/20220130170815.1940-1-bmt@zurich.ibm.com
Reported-by: Jared Holzman <jared.holzman@excelero.com>
Signed-off-by: Bernard Metzler <bmt@zurich.ibm.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/sw/siw/siw.h
drivers/infiniband/sw/siw/siw_qp_rx.c

index 368959ae9a8cc382bb0bf89f66bce731ddbf3f24..df03d84c6868ace3d5fe68c47e62cdd33e8b44d2 100644 (file)
@@ -644,14 +644,9 @@ static inline struct siw_sqe *orq_get_current(struct siw_qp *qp)
        return &qp->orq[qp->orq_get % qp->attrs.orq_size];
 }
 
-static inline struct siw_sqe *orq_get_tail(struct siw_qp *qp)
-{
-       return &qp->orq[qp->orq_put % qp->attrs.orq_size];
-}
-
 static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
 {
-       struct siw_sqe *orq_e = orq_get_tail(qp);
+       struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size];
 
        if (READ_ONCE(orq_e->flags) == 0)
                return orq_e;
index 60116f20653c776c3de38cbb3c954d9bdffbb112..875ea6f1b04a29034b6b41f8bfedaefc01af28c6 100644 (file)
@@ -1153,11 +1153,12 @@ static int siw_check_tx_fence(struct siw_qp *qp)
 
        spin_lock_irqsave(&qp->orq_lock, flags);
 
-       rreq = orq_get_current(qp);
-
        /* free current orq entry */
+       rreq = orq_get_current(qp);
        WRITE_ONCE(rreq->flags, 0);
 
+       qp->orq_get++;
+
        if (qp->tx_ctx.orq_fence) {
                if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) {
                        pr_warn("siw: [QP %u]: fence resume: bad status %d\n",
@@ -1165,10 +1166,12 @@ static int siw_check_tx_fence(struct siw_qp *qp)
                        rv = -EPROTO;
                        goto out;
                }
-               /* resume SQ processing */
+               /* resume SQ processing, if possible */
                if (tx_waiting->sqe.opcode == SIW_OP_READ ||
                    tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
-                       rreq = orq_get_tail(qp);
+
+                       /* SQ processing was stopped because of a full ORQ */
+                       rreq = orq_get_free(qp);
                        if (unlikely(!rreq)) {
                                pr_warn("siw: [QP %u]: no ORQE\n", qp_id(qp));
                                rv = -EPROTO;
@@ -1181,15 +1184,14 @@ static int siw_check_tx_fence(struct siw_qp *qp)
                        resume_tx = 1;
 
                } else if (siw_orq_empty(qp)) {
+                       /*
+                        * SQ processing was stopped by fenced work request.
+                        * Resume since all previous Read's are now completed.
+                        */
                        qp->tx_ctx.orq_fence = 0;
                        resume_tx = 1;
-               } else {
-                       pr_warn("siw: [QP %u]: fence resume: orq idx: %d:%d\n",
-                               qp_id(qp), qp->orq_get, qp->orq_put);
-                       rv = -EPROTO;
                }
        }
-       qp->orq_get++;
 out:
        spin_unlock_irqrestore(&qp->orq_lock, flags);