]> git.baikalelectronics.ru Git - kernel.git/commitdiff
xprtrdma: Remove atomic send completion counting
authorChuck Lever <chuck.lever@oracle.com>
Fri, 20 Oct 2017 14:48:45 +0000 (10:48 -0400)
committerAnna Schumaker <Anna.Schumaker@Netapp.com>
Fri, 17 Nov 2017 18:47:58 +0000 (13:47 -0500)
The sendctx circular queue now guarantees that xprtrdma cannot
overflow the Send Queue, so remove the remaining bits of the
original Send WQE counting mechanism.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
net/sunrpc/xprtrdma/frwr_ops.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h

index 3053fb0f5cb3811e4a3e9585256019372f275888..404166ac958feda6a566c2380c29628841058bc8 100644 (file)
@@ -419,7 +419,6 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
                         IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
                         IB_ACCESS_REMOTE_READ;
 
-       rpcrdma_set_signaled(&r_xprt->rx_ep, &reg_wr->wr);
        rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
        if (rc)
                goto out_senderr;
@@ -507,12 +506,6 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
        f->fr_cqe.done = frwr_wc_localinv_wake;
        reinit_completion(&f->fr_linv_done);
 
-       /* Initialize CQ count, since there is always a signaled
-        * WR being posted here.  The new cqcount depends on how
-        * many SQEs are about to be consumed.
-        */
-       rpcrdma_init_cqcount(&r_xprt->rx_ep, count);
-
        /* Transport disconnect drains the receive CQ before it
         * replaces the QP. The RPC reply handler won't call us
         * unless ri_id->qp is a valid pointer.
@@ -545,7 +538,6 @@ reset_mrs:
        /* Find and reset the MRs in the LOCAL_INV WRs that did not
         * get posted.
         */
-       rpcrdma_init_cqcount(&r_xprt->rx_ep, -count);
        while (bad_wr) {
                f = container_of(bad_wr, struct rpcrdma_frmr,
                                 fr_invwr);
index 9a824fe8ffc27d8a859efb01535750342a019f50..22128a81da6323774bb415cdb915c7876b14121e 100644 (file)
@@ -553,10 +553,6 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
        ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH,
                                   cdata->max_requests >> 2);
        ep->rep_send_count = ep->rep_send_batch;
-       ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
-       if (ep->rep_cqinit <= 2)
-               ep->rep_cqinit = 0;     /* always signal? */
-       rpcrdma_init_cqcount(ep, 0);
        init_waitqueue_head(&ep->rep_connect_wait);
        INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
 
index bccd5d8b93840870946fd8376d1f59110120c1bc..6e64c8259d34751d32422a254fb5e9957acb5828 100644 (file)
@@ -95,8 +95,6 @@ enum {
 struct rpcrdma_ep {
        unsigned int            rep_send_count;
        unsigned int            rep_send_batch;
-       atomic_t                rep_cqcount;
-       int                     rep_cqinit;
        int                     rep_connected;
        struct ib_qp_init_attr  rep_attr;
        wait_queue_head_t       rep_connect_wait;
@@ -106,25 +104,6 @@ struct rpcrdma_ep {
        struct delayed_work     rep_connect_worker;
 };
 
-static inline void
-rpcrdma_init_cqcount(struct rpcrdma_ep *ep, int count)
-{
-       atomic_set(&ep->rep_cqcount, ep->rep_cqinit - count);
-}
-
-/* To update send queue accounting, provider must take a
- * send completion every now and then.
- */
-static inline void
-rpcrdma_set_signaled(struct rpcrdma_ep *ep, struct ib_send_wr *send_wr)
-{
-       send_wr->send_flags = 0;
-       if (unlikely(atomic_sub_return(1, &ep->rep_cqcount) <= 0)) {
-               rpcrdma_init_cqcount(ep, 0);
-               send_wr->send_flags = IB_SEND_SIGNALED;
-       }
-}
-
 /* Pre-allocate extra Work Requests for handling backward receives
  * and sends. This is a fixed value because the Work Queues are
  * allocated when the forward channel is set up.