1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 */
6
7 /* Lightweight memory registration using Fast Registration Work
8 * Requests (FRWR).
9 *
10 * FRWR features ordered asynchronous registration and invalidation
11 * of arbitrarily-sized memory regions. This is the fastest and safest
12 * but most complex memory registration mode.
13 */
14
15 /* Normal operation
16 *
17 * A Memory Region is prepared for RDMA Read or Write using a FAST_REG
18 * Work Request (frwr_map). When the RDMA operation is finished, this
19 * Memory Region is invalidated using a LOCAL_INV Work Request
20 * (frwr_unmap_async and frwr_unmap_sync).
21 *
22 * Typically FAST_REG Work Requests are not signaled, and neither are
23 * RDMA Send Work Requests (with the exception of signaling occasionally
24 * to prevent provider work queue overflows). This greatly reduces HCA
25 * interrupt workload.
26 */
27
28 /* Transport recovery
29 *
30 * frwr_map and frwr_unmap_* cannot run at the same time the transport
31 * connect worker is running. The connect worker holds the transport
32 * send lock, just as ->send_request does. This prevents frwr_map and
33 * the connect worker from running concurrently. When a connection is
34 * closed, the Receive completion queue is drained before the allowing
35 * the connect worker to get control. This prevents frwr_unmap and the
36 * connect worker from running concurrently.
37 *
38 * When the underlying transport disconnects, MRs that are in flight
39 * are flushed and are likely unusable. Thus all MRs are destroyed.
40 * New MRs are created on demand.
41 */
42
43 #include <linux/sunrpc/svc_rdma.h>
44
45 #include "xprt_rdma.h"
46 #include <trace/events/rpcrdma.h>
47
frwr_cid_init(struct rpcrdma_ep * ep,struct rpcrdma_mr * mr)48 static void frwr_cid_init(struct rpcrdma_ep *ep,
49 struct rpcrdma_mr *mr)
50 {
51 struct rpc_rdma_cid *cid = &mr->mr_cid;
52
53 cid->ci_queue_id = ep->re_attr.send_cq->res.id;
54 cid->ci_completion_id = mr->mr_ibmr->res.id;
55 }
56
frwr_mr_unmap(struct rpcrdma_xprt * r_xprt,struct rpcrdma_mr * mr)57 static void frwr_mr_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
58 {
59 if (mr->mr_device) {
60 trace_xprtrdma_mr_unmap(mr);
61 ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents,
62 mr->mr_dir);
63 mr->mr_device = NULL;
64 }
65 }
66
67 /**
68 * frwr_mr_release - Destroy one MR
69 * @mr: MR allocated by frwr_mr_init
70 *
71 */
frwr_mr_release(struct rpcrdma_mr * mr)72 void frwr_mr_release(struct rpcrdma_mr *mr)
73 {
74 int rc;
75
76 frwr_mr_unmap(mr->mr_xprt, mr);
77
78 rc = ib_dereg_mr(mr->mr_ibmr);
79 if (rc)
80 trace_xprtrdma_frwr_dereg(mr, rc);
81 kfree(mr->mr_sg);
82 kfree(mr);
83 }
84
frwr_mr_put(struct rpcrdma_mr * mr)85 static void frwr_mr_put(struct rpcrdma_mr *mr)
86 {
87 frwr_mr_unmap(mr->mr_xprt, mr);
88
89 /* The MR is returned to the req's MR free list instead
90 * of to the xprt's MR free list. No spinlock is needed.
91 */
92 rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
93 }
94
95 /* frwr_reset - Place MRs back on the free list
96 * @req: request to reset
97 *
98 * Used after a failed marshal. For FRWR, this means the MRs
99 * don't have to be fully released and recreated.
100 *
101 * NB: This is safe only as long as none of @req's MRs are
102 * involved with an ongoing asynchronous FAST_REG or LOCAL_INV
103 * Work Request.
104 */
frwr_reset(struct rpcrdma_req * req)105 void frwr_reset(struct rpcrdma_req *req)
106 {
107 struct rpcrdma_mr *mr;
108
109 while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
110 frwr_mr_put(mr);
111 }
112
113 /**
114 * frwr_mr_init - Initialize one MR
115 * @r_xprt: controlling transport instance
116 * @mr: generic MR to prepare for FRWR
117 *
118 * Returns zero if successful. Otherwise a negative errno
119 * is returned.
120 */
frwr_mr_init(struct rpcrdma_xprt * r_xprt,struct rpcrdma_mr * mr)121 int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
122 {
123 struct rpcrdma_ep *ep = r_xprt->rx_ep;
124 unsigned int depth = ep->re_max_fr_depth;
125 struct scatterlist *sg;
126 struct ib_mr *frmr;
127 int rc;
128
129 frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth);
130 if (IS_ERR(frmr))
131 goto out_mr_err;
132
133 sg = kmalloc_array(depth, sizeof(*sg), GFP_KERNEL);
134 if (!sg)
135 goto out_list_err;
136
137 mr->mr_xprt = r_xprt;
138 mr->mr_ibmr = frmr;
139 mr->mr_device = NULL;
140 INIT_LIST_HEAD(&mr->mr_list);
141 init_completion(&mr->mr_linv_done);
142 frwr_cid_init(ep, mr);
143
144 sg_init_table(sg, depth);
145 mr->mr_sg = sg;
146 return 0;
147
148 out_mr_err:
149 rc = PTR_ERR(frmr);
150 trace_xprtrdma_frwr_alloc(mr, rc);
151 return rc;
152
153 out_list_err:
154 ib_dereg_mr(frmr);
155 return -ENOMEM;
156 }
157
158 /**
159 * frwr_query_device - Prepare a transport for use with FRWR
160 * @ep: endpoint to fill in
161 * @device: RDMA device to query
162 *
163 * On success, sets:
164 * ep->re_attr
165 * ep->re_max_requests
166 * ep->re_max_rdma_segs
167 * ep->re_max_fr_depth
168 * ep->re_mrtype
169 *
170 * Return values:
171 * On success, returns zero.
172 * %-EINVAL - the device does not support FRWR memory registration
173 * %-ENOMEM - the device is not sufficiently capable for NFS/RDMA
174 */
frwr_query_device(struct rpcrdma_ep * ep,const struct ib_device * device)175 int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device)
176 {
177 const struct ib_device_attr *attrs = &device->attrs;
178 int max_qp_wr, depth, delta;
179 unsigned int max_sge;
180
181 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) ||
182 attrs->max_fast_reg_page_list_len == 0) {
183 pr_err("rpcrdma: 'frwr' mode is not supported by device %s\n",
184 device->name);
185 return -EINVAL;
186 }
187
188 max_sge = min_t(unsigned int, attrs->max_send_sge,
189 RPCRDMA_MAX_SEND_SGES);
190 if (max_sge < RPCRDMA_MIN_SEND_SGES) {
191 pr_err("rpcrdma: HCA provides only %u send SGEs\n", max_sge);
192 return -ENOMEM;
193 }
194 ep->re_attr.cap.max_send_sge = max_sge;
195 ep->re_attr.cap.max_recv_sge = 1;
196
197 ep->re_mrtype = IB_MR_TYPE_MEM_REG;
198 if (attrs->kernel_cap_flags & IBK_SG_GAPS_REG)
199 ep->re_mrtype = IB_MR_TYPE_SG_GAPS;
200
201 /* Quirk: Some devices advertise a large max_fast_reg_page_list_len
202 * capability, but perform optimally when the MRs are not larger
203 * than a page.
204 */
205 if (attrs->max_sge_rd > RPCRDMA_MAX_HDR_SEGS)
206 ep->re_max_fr_depth = attrs->max_sge_rd;
207 else
208 ep->re_max_fr_depth = attrs->max_fast_reg_page_list_len;
209 if (ep->re_max_fr_depth > RPCRDMA_MAX_DATA_SEGS)
210 ep->re_max_fr_depth = RPCRDMA_MAX_DATA_SEGS;
211
212 /* Add room for frwr register and invalidate WRs.
213 * 1. FRWR reg WR for head
214 * 2. FRWR invalidate WR for head
215 * 3. N FRWR reg WRs for pagelist
216 * 4. N FRWR invalidate WRs for pagelist
217 * 5. FRWR reg WR for tail
218 * 6. FRWR invalidate WR for tail
219 * 7. The RDMA_SEND WR
220 */
221 depth = 7;
222
223 /* Calculate N if the device max FRWR depth is smaller than
224 * RPCRDMA_MAX_DATA_SEGS.
225 */
226 if (ep->re_max_fr_depth < RPCRDMA_MAX_DATA_SEGS) {
227 delta = RPCRDMA_MAX_DATA_SEGS - ep->re_max_fr_depth;
228 do {
229 depth += 2; /* FRWR reg + invalidate */
230 delta -= ep->re_max_fr_depth;
231 } while (delta > 0);
232 }
233
234 max_qp_wr = attrs->max_qp_wr;
235 max_qp_wr -= RPCRDMA_BACKWARD_WRS;
236 max_qp_wr -= 1;
237 if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
238 return -ENOMEM;
239 if (ep->re_max_requests > max_qp_wr)
240 ep->re_max_requests = max_qp_wr;
241 ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
242 if (ep->re_attr.cap.max_send_wr > max_qp_wr) {
243 ep->re_max_requests = max_qp_wr / depth;
244 if (!ep->re_max_requests)
245 return -ENOMEM;
246 ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
247 }
248 ep->re_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
249 ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
250 ep->re_attr.cap.max_recv_wr = ep->re_max_requests;
251 ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
252 ep->re_attr.cap.max_recv_wr += RPCRDMA_MAX_RECV_BATCH;
253 ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
254
255 ep->re_max_rdma_segs =
256 DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ep->re_max_fr_depth);
257 /* Reply chunks require segments for head and tail buffers */
258 ep->re_max_rdma_segs += 2;
259 if (ep->re_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS)
260 ep->re_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS;
261
262 /* Ensure the underlying device is capable of conveying the
263 * largest r/wsize NFS will ask for. This guarantees that
264 * failing over from one RDMA device to another will not
265 * break NFS I/O.
266 */
267 if ((ep->re_max_rdma_segs * ep->re_max_fr_depth) < RPCRDMA_MAX_SEGS)
268 return -ENOMEM;
269
270 return 0;
271 }
272
273 /**
274 * frwr_map - Register a memory region
275 * @r_xprt: controlling transport
276 * @seg: memory region co-ordinates
277 * @nsegs: number of segments remaining
278 * @writing: true when RDMA Write will be used
279 * @xid: XID of RPC using the registered memory
280 * @mr: MR to fill in
281 *
282 * Prepare a REG_MR Work Request to register a memory region
283 * for remote access via RDMA READ or RDMA WRITE.
284 *
285 * Returns the next segment or a negative errno pointer.
286 * On success, @mr is filled in.
287 */
frwr_map(struct rpcrdma_xprt * r_xprt,struct rpcrdma_mr_seg * seg,int nsegs,bool writing,__be32 xid,struct rpcrdma_mr * mr)288 struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
289 struct rpcrdma_mr_seg *seg,
290 int nsegs, bool writing, __be32 xid,
291 struct rpcrdma_mr *mr)
292 {
293 struct rpcrdma_ep *ep = r_xprt->rx_ep;
294 struct ib_reg_wr *reg_wr;
295 int i, n, dma_nents;
296 struct ib_mr *ibmr;
297 u8 key;
298
299 if (nsegs > ep->re_max_fr_depth)
300 nsegs = ep->re_max_fr_depth;
301 for (i = 0; i < nsegs;) {
302 sg_set_page(&mr->mr_sg[i], seg->mr_page,
303 seg->mr_len, seg->mr_offset);
304
305 ++seg;
306 ++i;
307 if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS)
308 continue;
309 if ((i < nsegs && seg->mr_offset) ||
310 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
311 break;
312 }
313 mr->mr_dir = rpcrdma_data_dir(writing);
314 mr->mr_nents = i;
315
316 dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents,
317 mr->mr_dir);
318 if (!dma_nents)
319 goto out_dmamap_err;
320 mr->mr_device = ep->re_id->device;
321
322 ibmr = mr->mr_ibmr;
323 n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
324 if (n != dma_nents)
325 goto out_mapmr_err;
326
327 ibmr->iova &= 0x00000000ffffffff;
328 ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32;
329 key = (u8)(ibmr->rkey & 0x000000FF);
330 ib_update_fast_reg_key(ibmr, ++key);
331
332 reg_wr = &mr->mr_regwr;
333 reg_wr->mr = ibmr;
334 reg_wr->key = ibmr->rkey;
335 reg_wr->access = writing ?
336 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
337 IB_ACCESS_REMOTE_READ;
338
339 mr->mr_handle = ibmr->rkey;
340 mr->mr_length = ibmr->length;
341 mr->mr_offset = ibmr->iova;
342 trace_xprtrdma_mr_map(mr);
343
344 return seg;
345
346 out_dmamap_err:
347 trace_xprtrdma_frwr_sgerr(mr, i);
348 return ERR_PTR(-EIO);
349
350 out_mapmr_err:
351 trace_xprtrdma_frwr_maperr(mr, n);
352 return ERR_PTR(-EIO);
353 }
354
355 /**
356 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
357 * @cq: completion queue
358 * @wc: WCE for a completed FastReg WR
359 *
360 * Each flushed MR gets destroyed after the QP has drained.
361 */
frwr_wc_fastreg(struct ib_cq * cq,struct ib_wc * wc)362 static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
363 {
364 struct ib_cqe *cqe = wc->wr_cqe;
365 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
366
367 /* WARNING: Only wr_cqe and status are reliable at this point */
368 trace_xprtrdma_wc_fastreg(wc, &mr->mr_cid);
369
370 rpcrdma_flush_disconnect(cq->cq_context, wc);
371 }
372
373 /**
374 * frwr_send - post Send WRs containing the RPC Call message
375 * @r_xprt: controlling transport instance
376 * @req: prepared RPC Call
377 *
378 * For FRWR, chain any FastReg WRs to the Send WR. Only a
379 * single ib_post_send call is needed to register memory
380 * and then post the Send WR.
381 *
382 * Returns the return code from ib_post_send.
383 *
384 * Caller must hold the transport send lock to ensure that the
385 * pointers to the transport's rdma_cm_id and QP are stable.
386 */
frwr_send(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req)387 int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
388 {
389 struct ib_send_wr *post_wr, *send_wr = &req->rl_wr;
390 struct rpcrdma_ep *ep = r_xprt->rx_ep;
391 struct rpcrdma_mr *mr;
392 unsigned int num_wrs;
393 int ret;
394
395 num_wrs = 1;
396 post_wr = send_wr;
397 list_for_each_entry(mr, &req->rl_registered, mr_list) {
398 trace_xprtrdma_mr_fastreg(mr);
399
400 mr->mr_cqe.done = frwr_wc_fastreg;
401 mr->mr_regwr.wr.next = post_wr;
402 mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe;
403 mr->mr_regwr.wr.num_sge = 0;
404 mr->mr_regwr.wr.opcode = IB_WR_REG_MR;
405 mr->mr_regwr.wr.send_flags = 0;
406 post_wr = &mr->mr_regwr.wr;
407 ++num_wrs;
408 }
409
410 if ((kref_read(&req->rl_kref) > 1) || num_wrs > ep->re_send_count) {
411 send_wr->send_flags |= IB_SEND_SIGNALED;
412 ep->re_send_count = min_t(unsigned int, ep->re_send_batch,
413 num_wrs - ep->re_send_count);
414 } else {
415 send_wr->send_flags &= ~IB_SEND_SIGNALED;
416 ep->re_send_count -= num_wrs;
417 }
418
419 trace_xprtrdma_post_send(req);
420 ret = ib_post_send(ep->re_id->qp, post_wr, NULL);
421 if (ret)
422 trace_xprtrdma_post_send_err(r_xprt, req, ret);
423 return ret;
424 }
425
426 /**
427 * frwr_reminv - handle a remotely invalidated mr on the @mrs list
428 * @rep: Received reply
429 * @mrs: list of MRs to check
430 *
431 */
frwr_reminv(struct rpcrdma_rep * rep,struct list_head * mrs)432 void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
433 {
434 struct rpcrdma_mr *mr;
435
436 list_for_each_entry(mr, mrs, mr_list)
437 if (mr->mr_handle == rep->rr_inv_rkey) {
438 list_del_init(&mr->mr_list);
439 trace_xprtrdma_mr_reminv(mr);
440 frwr_mr_put(mr);
441 break; /* only one invalidated MR per RPC */
442 }
443 }
444
frwr_mr_done(struct ib_wc * wc,struct rpcrdma_mr * mr)445 static void frwr_mr_done(struct ib_wc *wc, struct rpcrdma_mr *mr)
446 {
447 if (likely(wc->status == IB_WC_SUCCESS))
448 frwr_mr_put(mr);
449 }
450
451 /**
452 * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC
453 * @cq: completion queue
454 * @wc: WCE for a completed LocalInv WR
455 *
456 */
frwr_wc_localinv(struct ib_cq * cq,struct ib_wc * wc)457 static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
458 {
459 struct ib_cqe *cqe = wc->wr_cqe;
460 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
461
462 /* WARNING: Only wr_cqe and status are reliable at this point */
463 trace_xprtrdma_wc_li(wc, &mr->mr_cid);
464 frwr_mr_done(wc, mr);
465
466 rpcrdma_flush_disconnect(cq->cq_context, wc);
467 }
468
469 /**
470 * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC
471 * @cq: completion queue
472 * @wc: WCE for a completed LocalInv WR
473 *
474 * Awaken anyone waiting for an MR to finish being fenced.
475 */
frwr_wc_localinv_wake(struct ib_cq * cq,struct ib_wc * wc)476 static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
477 {
478 struct ib_cqe *cqe = wc->wr_cqe;
479 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
480
481 /* WARNING: Only wr_cqe and status are reliable at this point */
482 trace_xprtrdma_wc_li_wake(wc, &mr->mr_cid);
483 frwr_mr_done(wc, mr);
484 complete(&mr->mr_linv_done);
485
486 rpcrdma_flush_disconnect(cq->cq_context, wc);
487 }
488
489 /**
490 * frwr_unmap_sync - invalidate memory regions that were registered for @req
491 * @r_xprt: controlling transport instance
492 * @req: rpcrdma_req with a non-empty list of MRs to process
493 *
494 * Sleeps until it is safe for the host CPU to access the previously mapped
495 * memory regions. This guarantees that registered MRs are properly fenced
496 * from the server before the RPC consumer accesses the data in them. It
497 * also ensures proper Send flow control: waking the next RPC waits until
498 * this RPC has relinquished all its Send Queue entries.
499 */
frwr_unmap_sync(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req)500 void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
501 {
502 struct ib_send_wr *first, **prev, *last;
503 struct rpcrdma_ep *ep = r_xprt->rx_ep;
504 const struct ib_send_wr *bad_wr;
505 struct rpcrdma_mr *mr;
506 int rc;
507
508 /* ORDER: Invalidate all of the MRs first
509 *
510 * Chain the LOCAL_INV Work Requests and post them with
511 * a single ib_post_send() call.
512 */
513 prev = &first;
514 mr = rpcrdma_mr_pop(&req->rl_registered);
515 do {
516 trace_xprtrdma_mr_localinv(mr);
517 r_xprt->rx_stats.local_inv_needed++;
518
519 last = &mr->mr_invwr;
520 last->next = NULL;
521 last->wr_cqe = &mr->mr_cqe;
522 last->sg_list = NULL;
523 last->num_sge = 0;
524 last->opcode = IB_WR_LOCAL_INV;
525 last->send_flags = IB_SEND_SIGNALED;
526 last->ex.invalidate_rkey = mr->mr_handle;
527
528 last->wr_cqe->done = frwr_wc_localinv;
529
530 *prev = last;
531 prev = &last->next;
532 } while ((mr = rpcrdma_mr_pop(&req->rl_registered)));
533
534 mr = container_of(last, struct rpcrdma_mr, mr_invwr);
535
536 /* Strong send queue ordering guarantees that when the
537 * last WR in the chain completes, all WRs in the chain
538 * are complete.
539 */
540 last->wr_cqe->done = frwr_wc_localinv_wake;
541 reinit_completion(&mr->mr_linv_done);
542
543 /* Transport disconnect drains the receive CQ before it
544 * replaces the QP. The RPC reply handler won't call us
545 * unless re_id->qp is a valid pointer.
546 */
547 bad_wr = NULL;
548 rc = ib_post_send(ep->re_id->qp, first, &bad_wr);
549
550 /* The final LOCAL_INV WR in the chain is supposed to
551 * do the wake. If it was never posted, the wake will
552 * not happen, so don't wait in that case.
553 */
554 if (bad_wr != first)
555 wait_for_completion(&mr->mr_linv_done);
556 if (!rc)
557 return;
558
559 /* On error, the MRs get destroyed once the QP has drained. */
560 trace_xprtrdma_post_linv_err(req, rc);
561
562 /* Force a connection loss to ensure complete recovery.
563 */
564 rpcrdma_force_disconnect(ep);
565 }
566
567 /**
568 * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC
569 * @cq: completion queue
570 * @wc: WCE for a completed LocalInv WR
571 *
572 */
frwr_wc_localinv_done(struct ib_cq * cq,struct ib_wc * wc)573 static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
574 {
575 struct ib_cqe *cqe = wc->wr_cqe;
576 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
577 struct rpcrdma_rep *rep;
578
579 /* WARNING: Only wr_cqe and status are reliable at this point */
580 trace_xprtrdma_wc_li_done(wc, &mr->mr_cid);
581
582 /* Ensure that @rep is generated before the MR is released */
583 rep = mr->mr_req->rl_reply;
584 smp_rmb();
585
586 if (wc->status != IB_WC_SUCCESS) {
587 if (rep)
588 rpcrdma_unpin_rqst(rep);
589 rpcrdma_flush_disconnect(cq->cq_context, wc);
590 return;
591 }
592 frwr_mr_put(mr);
593 rpcrdma_complete_rqst(rep);
594 }
595
596 /**
597 * frwr_unmap_async - invalidate memory regions that were registered for @req
598 * @r_xprt: controlling transport instance
599 * @req: rpcrdma_req with a non-empty list of MRs to process
600 *
601 * This guarantees that registered MRs are properly fenced from the
602 * server before the RPC consumer accesses the data in them. It also
603 * ensures proper Send flow control: waking the next RPC waits until
604 * this RPC has relinquished all its Send Queue entries.
605 */
frwr_unmap_async(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req)606 void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
607 {
608 struct ib_send_wr *first, *last, **prev;
609 struct rpcrdma_ep *ep = r_xprt->rx_ep;
610 struct rpcrdma_mr *mr;
611 int rc;
612
613 /* Chain the LOCAL_INV Work Requests and post them with
614 * a single ib_post_send() call.
615 */
616 prev = &first;
617 mr = rpcrdma_mr_pop(&req->rl_registered);
618 do {
619 trace_xprtrdma_mr_localinv(mr);
620 r_xprt->rx_stats.local_inv_needed++;
621
622 last = &mr->mr_invwr;
623 last->next = NULL;
624 last->wr_cqe = &mr->mr_cqe;
625 last->sg_list = NULL;
626 last->num_sge = 0;
627 last->opcode = IB_WR_LOCAL_INV;
628 last->send_flags = IB_SEND_SIGNALED;
629 last->ex.invalidate_rkey = mr->mr_handle;
630
631 last->wr_cqe->done = frwr_wc_localinv;
632
633 *prev = last;
634 prev = &last->next;
635 } while ((mr = rpcrdma_mr_pop(&req->rl_registered)));
636
637 /* Strong send queue ordering guarantees that when the
638 * last WR in the chain completes, all WRs in the chain
639 * are complete. The last completion will wake up the
640 * RPC waiter.
641 */
642 last->wr_cqe->done = frwr_wc_localinv_done;
643
644 /* Transport disconnect drains the receive CQ before it
645 * replaces the QP. The RPC reply handler won't call us
646 * unless re_id->qp is a valid pointer.
647 */
648 rc = ib_post_send(ep->re_id->qp, first, NULL);
649 if (!rc)
650 return;
651
652 /* On error, the MRs get destroyed once the QP has drained. */
653 trace_xprtrdma_post_linv_err(req, rc);
654
655 /* The final LOCAL_INV WR in the chain is supposed to
656 * do the wake. If it was never posted, the wake does
657 * not happen. Unpin the rqst in preparation for its
658 * retransmission.
659 */
660 rpcrdma_unpin_rqst(req->rl_reply);
661
662 /* Force a connection loss to ensure complete recovery.
663 */
664 rpcrdma_force_disconnect(ep);
665 }
666
667 /**
668 * frwr_wp_create - Create an MR for padding Write chunks
669 * @r_xprt: transport resources to use
670 *
671 * Return 0 on success, negative errno on failure.
672 */
frwr_wp_create(struct rpcrdma_xprt * r_xprt)673 int frwr_wp_create(struct rpcrdma_xprt *r_xprt)
674 {
675 struct rpcrdma_ep *ep = r_xprt->rx_ep;
676 struct rpcrdma_mr_seg seg;
677 struct rpcrdma_mr *mr;
678
679 mr = rpcrdma_mr_get(r_xprt);
680 if (!mr)
681 return -EAGAIN;
682 mr->mr_req = NULL;
683 ep->re_write_pad_mr = mr;
684
685 seg.mr_len = XDR_UNIT;
686 seg.mr_page = virt_to_page(ep->re_write_pad);
687 seg.mr_offset = offset_in_page(ep->re_write_pad);
688 if (IS_ERR(frwr_map(r_xprt, &seg, 1, true, xdr_zero, mr)))
689 return -EIO;
690 trace_xprtrdma_mr_fastreg(mr);
691
692 mr->mr_cqe.done = frwr_wc_fastreg;
693 mr->mr_regwr.wr.next = NULL;
694 mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe;
695 mr->mr_regwr.wr.num_sge = 0;
696 mr->mr_regwr.wr.opcode = IB_WR_REG_MR;
697 mr->mr_regwr.wr.send_flags = 0;
698
699 return ib_post_send(ep->re_id->qp, &mr->mr_regwr.wr, NULL);
700 }
701