]> git.baikalelectronics.ru Git - kernel.git/commitdiff
svcrdma: Remove max_sge check at connect time
authorChuck Lever <chuck.lever@oracle.com>
Fri, 25 Jan 2019 21:54:54 +0000 (16:54 -0500)
committerJ. Bruce Fields <bfields@redhat.com>
Wed, 6 Feb 2019 20:32:34 +0000 (15:32 -0500)
Two and a half years ago, the client was changed to use gathered
Send for larger inline messages, in commit eedfbf4c449 ("xprtrdma:
Use gathered Send for large inline messages"). Several fixes were
required because there are a few in-kernel device drivers whose
max_sge is 3, and these were broken by the change.

Apparently my memory is going, because some time later, I submitted
commit 78fa0a1da32c ("svcrdma: Don't overrun the SGE array in
svc_rdma_send_ctxt"), and after that, commit 8fd74e9acf8a ("svcrdma:
Reduce max_send_sges"). These too incorrectly assumed in-kernel
device drivers would have more than a few Send SGEs available.

The fix for the server side is not the same. This is because the
fundamental problem on the server is that, whether or not the client
has provisioned a chunk for the RPC reply, the server must squeeze
even the most complex RPC replies into a single RDMA Send. Failing
in the send path because of Send SGE exhaustion should never be an
option.

Therefore, instead of failing when the send path runs out of SGEs,
switch to using a bounce buffer mechanism to handle RPC replies that
are too complex for the device to send directly. That allows us to
remove the max_sge check to enable drivers with small max_sge to
work again.

Reported-by: Don Dutile <ddutile@redhat.com>
Fixes: 78fa0a1da32c ("svcrdma: Don't overrun the SGE array in ...")
Cc: stable@vger.kernel.org
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
net/sunrpc/xprtrdma/svc_rdma_sendto.c
net/sunrpc/xprtrdma/svc_rdma_transport.c

index cf51b8f9b15f5d2806366b116c1226fff1b2fd8b..1f200119268ccdff5674f49eadebbde0de86fdd1 100644 (file)
@@ -537,6 +537,99 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
                                      DMA_TO_DEVICE);
 }
 
+/* If the xdr_buf has more elements than the device can
+ * transmit in a single RDMA Send, then the reply will
+ * have to be copied into a bounce buffer.
+ */
+static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
+                                   struct xdr_buf *xdr,
+                                   __be32 *wr_lst)
+{
+       int elements;
+
+       /* xdr->head */
+       elements = 1;
+
+       /* xdr->pages */
+       if (!wr_lst) {
+               unsigned int remaining;
+               unsigned long pageoff;
+
+               pageoff = xdr->page_base & ~PAGE_MASK;
+               remaining = xdr->page_len;
+               while (remaining) {
+                       ++elements;
+                       remaining -= min_t(u32, PAGE_SIZE - pageoff,
+                                          remaining);
+                       pageoff = 0;
+               }
+       }
+
+       /* xdr->tail */
+       if (xdr->tail[0].iov_len)
+               ++elements;
+
+       /* assume 1 SGE is needed for the transport header */
+       return elements >= rdma->sc_max_send_sges;
+}
+
+/* The device is not capable of sending the reply directly.
+ * Assemble the elements of @xdr into the transport header
+ * buffer.
+ */
+static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
+                                     struct svc_rdma_send_ctxt *ctxt,
+                                     struct xdr_buf *xdr, __be32 *wr_lst)
+{
+       unsigned char *dst, *tailbase;
+       unsigned int taillen;
+
+       dst = ctxt->sc_xprt_buf;
+       dst += ctxt->sc_sges[0].length;
+
+       memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
+       dst += xdr->head[0].iov_len;
+
+       tailbase = xdr->tail[0].iov_base;
+       taillen = xdr->tail[0].iov_len;
+       if (wr_lst) {
+               u32 xdrpad;
+
+               xdrpad = xdr_padsize(xdr->page_len);
+               if (taillen && xdrpad) {
+                       tailbase += xdrpad;
+                       taillen -= xdrpad;
+               }
+       } else {
+               unsigned int len, remaining;
+               unsigned long pageoff;
+               struct page **ppages;
+
+               ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
+               pageoff = xdr->page_base & ~PAGE_MASK;
+               remaining = xdr->page_len;
+               while (remaining) {
+                       len = min_t(u32, PAGE_SIZE - pageoff, remaining);
+
+                       memcpy(dst, page_address(*ppages), len);
+                       remaining -= len;
+                       dst += len;
+                       pageoff = 0;
+               }
+       }
+
+       if (taillen)
+               memcpy(dst, tailbase, taillen);
+
+       ctxt->sc_sges[0].length += xdr->len;
+       ib_dma_sync_single_for_device(rdma->sc_pd->device,
+                                     ctxt->sc_sges[0].addr,
+                                     ctxt->sc_sges[0].length,
+                                     DMA_TO_DEVICE);
+
+       return 0;
+}
+
 /* svc_rdma_map_reply_msg - Map the buffer holding RPC message
  * @rdma: controlling transport
  * @ctxt: send_ctxt for the Send WR
@@ -559,8 +652,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
        u32 xdr_pad;
        int ret;
 
-       if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
-               return -EIO;
+       if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
+               return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
+
+       ++ctxt->sc_cur_sge_no;
        ret = svc_rdma_dma_map_buf(rdma, ctxt,
                                   xdr->head[0].iov_base,
                                   xdr->head[0].iov_len);
@@ -591,8 +686,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
        while (remaining) {
                len = min_t(u32, PAGE_SIZE - page_off, remaining);
 
-               if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
-                       return -EIO;
+               ++ctxt->sc_cur_sge_no;
                ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
                                            page_off, len);
                if (ret < 0)
@@ -606,8 +700,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
        len = xdr->tail[0].iov_len;
 tail:
        if (len) {
-               if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
-                       return -EIO;
+               ++ctxt->sc_cur_sge_no;
                ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
                if (ret < 0)
                        return ret;
index 924c17d46903c9ead94c6a2c33d926ef53469392..57f86c63a46369537c3362dae062da82b554c271 100644 (file)
@@ -419,12 +419,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
        /* Transport header, head iovec, tail iovec */
        newxprt->sc_max_send_sges = 3;
        /* Add one SGE per page list entry */
-       newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE;
-       if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) {
-               pr_err("svcrdma: too few Send SGEs available (%d needed)\n",
-                      newxprt->sc_max_send_sges);
-               goto errout;
-       }
+       newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
+       if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
+               newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
        newxprt->sc_max_req_size = svcrdma_max_req_size;
        newxprt->sc_max_requests = svcrdma_max_requests;
        newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;