TP_STRUCT__entry(
__field(const void *, req)
+ __field(const void *, sc)
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(int, num_sge)
__entry->client_id = rqst->rq_task->tk_client ?
rqst->rq_task->tk_client->cl_clid : -1;
__entry->req = req;
+ __entry->sc = req->rl_sendctx;
__entry->num_sge = req->rl_wr.num_sge;
__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
__entry->status = status;
),
- TP_printk("task:%u@%u req=%p (%d SGE%s) %sstatus=%d",
+ TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %sstatus=%d",
__entry->task_id, __entry->client_id,
- __entry->req, __entry->num_sge,
+ __entry->req, __entry->sc, __entry->num_sge,
(__entry->num_sge == 1 ? "" : "s"),
(__entry->signaled ? "signaled " : ""),
__entry->status
TP_STRUCT__entry(
__field(const void *, req)
+ __field(const void *, sc)
__field(unsigned int, unmap_count)
__field(unsigned int, status)
__field(unsigned int, vendor_err)
TP_fast_assign(
__entry->req = sc->sc_req;
+ __entry->sc = sc;
__entry->unmap_count = sc->sc_unmap_count;
__entry->status = wc->status;
__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
),
- TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
- __entry->req, __entry->unmap_count,
+ TP_printk("req=%p sc=%p unmapped=%u: %s (%u/0x%x)",
+ __entry->req, __entry->sc, __entry->unmap_count,
rdma_show_wc_status(__entry->status),
__entry->status, __entry->vendor_err
)
/*
* internal functions
*/
+static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt);
+static void rpcrdma_sendctxs_destroy(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_sendctx *sc);
static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt);
rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
}
rpcrdma_mrs_destroy(r_xprt);
+ rpcrdma_sendctxs_destroy(r_xprt);
ib_dealloc_pd(ia->ri_pd);
ia->ri_pd = NULL;
rpcrdma_reset_cwnd(r_xprt);
rpcrdma_post_recvs(r_xprt, true);
+ rc = rpcrdma_sendctxs_create(r_xprt);
+ if (rc)
+ goto out;
+
rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
if (rc)
goto out;
rpcrdma_xprt_drain(r_xprt);
rpcrdma_reqs_reset(r_xprt);
rpcrdma_mrs_destroy(r_xprt);
+ rpcrdma_sendctxs_destroy(r_xprt);
}
/* Fixed-size circular FIFO queue. This implementation is wait-free and
* queue activity, and rpcrdma_xprt_drain has flushed all remaining
* Send requests.
*/
-static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf)
+static void rpcrdma_sendctxs_destroy(struct rpcrdma_xprt *r_xprt)
{
+ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
unsigned long i;
+ if (!buf->rb_sc_ctxs)
+ return;
for (i = 0; i <= buf->rb_sc_last; i++)
kfree(buf->rb_sc_ctxs[i]);
kfree(buf->rb_sc_ctxs);
+ buf->rb_sc_ctxs = NULL;
}
static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ep *ep)
* Sends are posted.
*/
i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS;
- dprintk("RPC: %s: allocating %lu send_ctxs\n", __func__, i);
buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
if (!buf->rb_sc_ctxs)
return -ENOMEM;
buf->rb_sc_ctxs[i] = sc;
}
+ buf->rb_sc_head = 0;
+ buf->rb_sc_tail = 0;
return 0;
}
init_llist_head(&buf->rb_free_reps);
- rc = rpcrdma_sendctxs_create(r_xprt);
- if (rc)
- goto out;
-
return 0;
out:
rpcrdma_buffer_destroy(buf);
void
rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
{
- rpcrdma_sendctxs_destroy(buf);
rpcrdma_reps_destroy(buf);
while (!list_empty(&buf->rb_send_bufs)) {