1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
11 * license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 *
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
24 *
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
28 * permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 * Author: Tom Tucker <tom@opengridcomputing.com>
43 */
44
45 /* Operation
46 *
47 * The main entry point is svc_rdma_sendto. This is called by the
48 * RPC server when an RPC Reply is ready to be transmitted to a client.
49 *
50 * The passed-in svc_rqst contains a struct xdr_buf which holds an
51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
52 * transport header, post all Write WRs needed for this Reply, then post
53 * a Send WR conveying the transport header and the RPC message itself to
54 * the client.
55 *
56 * svc_rdma_sendto must fully transmit the Reply before returning, as
57 * the svc_rqst will be recycled as soon as sendto returns. Remaining
58 * resources referred to by the svc_rqst are also recycled at that time.
59 * Therefore any resources that must remain longer must be detached
60 * from the svc_rqst and released later.
61 *
62 * Page Management
63 *
64 * The I/O that performs Reply transmission is asynchronous, and may
65 * complete well after sendto returns. Thus pages under I/O must be
66 * removed from the svc_rqst before sendto returns.
67 *
68 * The logic here depends on Send Queue and completion ordering. Since
69 * the Send WR is always posted last, it will always complete last. Thus
70 * when it completes, it is guaranteed that all previous Write WRs have
71 * also completed.
72 *
73 * Write WRs are constructed and posted. Each Write segment gets its own
74 * svc_rdma_rw_ctxt, allowing the Write completion handler to find and
75 * DMA-unmap the pages under I/O for that Write segment. The Write
76 * completion handler does not release any pages.
77 *
78 * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
79 * The ownership of all of the Reply's pages are transferred into that
80 * ctxt, the Send WR is posted, and sendto returns.
81 *
82 * The svc_rdma_send_ctxt is presented when the Send WR completes. The
83 * Send completion handler finally releases the Reply's pages.
84 *
85 * This mechanism also assumes that completions on the transport's Send
86 * Completion Queue do not run in parallel. Otherwise a Write completion
87 * and Send completion running at the same time could release pages that
88 * are still DMA-mapped.
89 *
90 * Error Handling
91 *
92 * - If the Send WR is posted successfully, it will either complete
93 * successfully, or get flushed. Either way, the Send completion
94 * handler releases the Reply's pages.
95 * - If the Send WR cannot be not posted, the forward path releases
96 * the Reply's pages.
97 *
98 * This handles the case, without the use of page reference counting,
99 * where two different Write segments send portions of the same page.
100 */
101
102 #include <linux/spinlock.h>
103 #include <asm/unaligned.h>
104
105 #include <rdma/ib_verbs.h>
106 #include <rdma/rdma_cm.h>
107
108 #include <linux/sunrpc/debug.h>
109 #include <linux/sunrpc/svc_rdma.h>
110
111 #include "xprt_rdma.h"
112 #include <trace/events/rpcrdma.h>
113
114 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
115
svc_rdma_send_cid_init(struct svcxprt_rdma * rdma,struct rpc_rdma_cid * cid)116 static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
117 struct rpc_rdma_cid *cid)
118 {
119 cid->ci_queue_id = rdma->sc_sq_cq->res.id;
120 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
121 }
122
123 static struct svc_rdma_send_ctxt *
svc_rdma_send_ctxt_alloc(struct svcxprt_rdma * rdma)124 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
125 {
126 struct svc_rdma_send_ctxt *ctxt;
127 dma_addr_t addr;
128 void *buffer;
129 size_t size;
130 int i;
131
132 size = sizeof(*ctxt);
133 size += rdma->sc_max_send_sges * sizeof(struct ib_sge);
134 ctxt = kmalloc(size, GFP_KERNEL);
135 if (!ctxt)
136 goto fail0;
137 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
138 if (!buffer)
139 goto fail1;
140 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
141 rdma->sc_max_req_size, DMA_TO_DEVICE);
142 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
143 goto fail2;
144
145 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
146
147 ctxt->sc_send_wr.next = NULL;
148 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
149 ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
150 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
151 init_completion(&ctxt->sc_done);
152 ctxt->sc_cqe.done = svc_rdma_wc_send;
153 ctxt->sc_xprt_buf = buffer;
154 xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
155 rdma->sc_max_req_size);
156 ctxt->sc_sges[0].addr = addr;
157
158 for (i = 0; i < rdma->sc_max_send_sges; i++)
159 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
160 return ctxt;
161
162 fail2:
163 kfree(buffer);
164 fail1:
165 kfree(ctxt);
166 fail0:
167 return NULL;
168 }
169
170 /**
171 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
172 * @rdma: svcxprt_rdma being torn down
173 *
174 */
svc_rdma_send_ctxts_destroy(struct svcxprt_rdma * rdma)175 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
176 {
177 struct svc_rdma_send_ctxt *ctxt;
178 struct llist_node *node;
179
180 while ((node = llist_del_first(&rdma->sc_send_ctxts)) != NULL) {
181 ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
182 ib_dma_unmap_single(rdma->sc_pd->device,
183 ctxt->sc_sges[0].addr,
184 rdma->sc_max_req_size,
185 DMA_TO_DEVICE);
186 kfree(ctxt->sc_xprt_buf);
187 kfree(ctxt);
188 }
189 }
190
191 /**
192 * svc_rdma_send_ctxt_get - Get a free send_ctxt
193 * @rdma: controlling svcxprt_rdma
194 *
195 * Returns a ready-to-use send_ctxt, or NULL if none are
196 * available and a fresh one cannot be allocated.
197 */
svc_rdma_send_ctxt_get(struct svcxprt_rdma * rdma)198 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
199 {
200 struct svc_rdma_send_ctxt *ctxt;
201 struct llist_node *node;
202
203 spin_lock(&rdma->sc_send_lock);
204 node = llist_del_first(&rdma->sc_send_ctxts);
205 if (!node)
206 goto out_empty;
207 ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
208 spin_unlock(&rdma->sc_send_lock);
209
210 out:
211 rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
212 xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf,
213 ctxt->sc_xprt_buf, NULL);
214
215 ctxt->sc_send_wr.num_sge = 0;
216 ctxt->sc_cur_sge_no = 0;
217 return ctxt;
218
219 out_empty:
220 spin_unlock(&rdma->sc_send_lock);
221 ctxt = svc_rdma_send_ctxt_alloc(rdma);
222 if (!ctxt)
223 return NULL;
224 goto out;
225 }
226
227 /**
228 * svc_rdma_send_ctxt_put - Return send_ctxt to free list
229 * @rdma: controlling svcxprt_rdma
230 * @ctxt: object to return to the free list
231 */
svc_rdma_send_ctxt_put(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt)232 void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
233 struct svc_rdma_send_ctxt *ctxt)
234 {
235 struct ib_device *device = rdma->sc_cm_id->device;
236 unsigned int i;
237
238 /* The first SGE contains the transport header, which
239 * remains mapped until @ctxt is destroyed.
240 */
241 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
242 ib_dma_unmap_page(device,
243 ctxt->sc_sges[i].addr,
244 ctxt->sc_sges[i].length,
245 DMA_TO_DEVICE);
246 trace_svcrdma_dma_unmap_page(rdma,
247 ctxt->sc_sges[i].addr,
248 ctxt->sc_sges[i].length);
249 }
250
251 llist_add(&ctxt->sc_node, &rdma->sc_send_ctxts);
252 }
253
254 /**
255 * svc_rdma_wake_send_waiters - manage Send Queue accounting
256 * @rdma: controlling transport
257 * @avail: Number of additional SQEs that are now available
258 *
259 */
svc_rdma_wake_send_waiters(struct svcxprt_rdma * rdma,int avail)260 void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail)
261 {
262 atomic_add(avail, &rdma->sc_sq_avail);
263 smp_mb__after_atomic();
264 if (unlikely(waitqueue_active(&rdma->sc_send_wait)))
265 wake_up(&rdma->sc_send_wait);
266 }
267
268 /**
269 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
270 * @cq: Completion Queue context
271 * @wc: Work Completion object
272 *
273 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
274 * the Send completion handler could be running.
275 */
svc_rdma_wc_send(struct ib_cq * cq,struct ib_wc * wc)276 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
277 {
278 struct svcxprt_rdma *rdma = cq->cq_context;
279 struct ib_cqe *cqe = wc->wr_cqe;
280 struct svc_rdma_send_ctxt *ctxt =
281 container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
282
283 svc_rdma_wake_send_waiters(rdma, 1);
284 complete(&ctxt->sc_done);
285
286 if (unlikely(wc->status != IB_WC_SUCCESS))
287 goto flushed;
288
289 trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
290 return;
291
292 flushed:
293 if (wc->status != IB_WC_WR_FLUSH_ERR)
294 trace_svcrdma_wc_send_err(wc, &ctxt->sc_cid);
295 else
296 trace_svcrdma_wc_send_flush(wc, &ctxt->sc_cid);
297 svc_xprt_deferred_close(&rdma->sc_xprt);
298 }
299
300 /**
301 * svc_rdma_send - Post a single Send WR
302 * @rdma: transport on which to post the WR
303 * @ctxt: send ctxt with a Send WR ready to post
304 *
305 * Returns zero if the Send WR was posted successfully. Otherwise, a
306 * negative errno is returned.
307 */
svc_rdma_send(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt)308 int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
309 {
310 struct ib_send_wr *wr = &ctxt->sc_send_wr;
311 int ret;
312
313 reinit_completion(&ctxt->sc_done);
314
315 /* Sync the transport header buffer */
316 ib_dma_sync_single_for_device(rdma->sc_pd->device,
317 wr->sg_list[0].addr,
318 wr->sg_list[0].length,
319 DMA_TO_DEVICE);
320
321 /* If the SQ is full, wait until an SQ entry is available */
322 while (1) {
323 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
324 percpu_counter_inc(&svcrdma_stat_sq_starve);
325 trace_svcrdma_sq_full(rdma);
326 atomic_inc(&rdma->sc_sq_avail);
327 wait_event(rdma->sc_send_wait,
328 atomic_read(&rdma->sc_sq_avail) > 1);
329 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
330 return -ENOTCONN;
331 trace_svcrdma_sq_retry(rdma);
332 continue;
333 }
334
335 trace_svcrdma_post_send(ctxt);
336 ret = ib_post_send(rdma->sc_qp, wr, NULL);
337 if (ret)
338 break;
339 return 0;
340 }
341
342 trace_svcrdma_sq_post_err(rdma, ret);
343 svc_xprt_deferred_close(&rdma->sc_xprt);
344 wake_up(&rdma->sc_send_wait);
345 return ret;
346 }
347
348 /**
349 * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list
350 * @sctxt: Send context for the RPC Reply
351 *
352 * Return values:
353 * On success, returns length in bytes of the Reply XDR buffer
354 * that was consumed by the Reply Read list
355 * %-EMSGSIZE on XDR buffer overflow
356 */
svc_rdma_encode_read_list(struct svc_rdma_send_ctxt * sctxt)357 static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt)
358 {
359 /* RPC-over-RDMA version 1 replies never have a Read list. */
360 return xdr_stream_encode_item_absent(&sctxt->sc_stream);
361 }
362
363 /**
364 * svc_rdma_encode_write_segment - Encode one Write segment
365 * @sctxt: Send context for the RPC Reply
366 * @chunk: Write chunk to push
367 * @remaining: remaining bytes of the payload left in the Write chunk
368 * @segno: which segment in the chunk
369 *
370 * Return values:
371 * On success, returns length in bytes of the Reply XDR buffer
372 * that was consumed by the Write segment, and updates @remaining
373 * %-EMSGSIZE on XDR buffer overflow
374 */
svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_chunk * chunk,u32 * remaining,unsigned int segno)375 static ssize_t svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt *sctxt,
376 const struct svc_rdma_chunk *chunk,
377 u32 *remaining, unsigned int segno)
378 {
379 const struct svc_rdma_segment *segment = &chunk->ch_segments[segno];
380 const size_t len = rpcrdma_segment_maxsz * sizeof(__be32);
381 u32 length;
382 __be32 *p;
383
384 p = xdr_reserve_space(&sctxt->sc_stream, len);
385 if (!p)
386 return -EMSGSIZE;
387
388 length = min_t(u32, *remaining, segment->rs_length);
389 *remaining -= length;
390 xdr_encode_rdma_segment(p, segment->rs_handle, length,
391 segment->rs_offset);
392 trace_svcrdma_encode_wseg(sctxt, segno, segment->rs_handle, length,
393 segment->rs_offset);
394 return len;
395 }
396
397 /**
398 * svc_rdma_encode_write_chunk - Encode one Write chunk
399 * @sctxt: Send context for the RPC Reply
400 * @chunk: Write chunk to push
401 *
402 * Copy a Write chunk from the Call transport header to the
403 * Reply transport header. Update each segment's length field
404 * to reflect the number of bytes written in that segment.
405 *
406 * Return values:
407 * On success, returns length in bytes of the Reply XDR buffer
408 * that was consumed by the Write chunk
409 * %-EMSGSIZE on XDR buffer overflow
410 */
svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_chunk * chunk)411 static ssize_t svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt *sctxt,
412 const struct svc_rdma_chunk *chunk)
413 {
414 u32 remaining = chunk->ch_payload_length;
415 unsigned int segno;
416 ssize_t len, ret;
417
418 len = 0;
419 ret = xdr_stream_encode_item_present(&sctxt->sc_stream);
420 if (ret < 0)
421 return ret;
422 len += ret;
423
424 ret = xdr_stream_encode_u32(&sctxt->sc_stream, chunk->ch_segcount);
425 if (ret < 0)
426 return ret;
427 len += ret;
428
429 for (segno = 0; segno < chunk->ch_segcount; segno++) {
430 ret = svc_rdma_encode_write_segment(sctxt, chunk, &remaining, segno);
431 if (ret < 0)
432 return ret;
433 len += ret;
434 }
435
436 return len;
437 }
438
439 /**
440 * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list
441 * @rctxt: Reply context with information about the RPC Call
442 * @sctxt: Send context for the RPC Reply
443 *
444 * Return values:
445 * On success, returns length in bytes of the Reply XDR buffer
446 * that was consumed by the Reply's Write list
447 * %-EMSGSIZE on XDR buffer overflow
448 */
svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt * rctxt,struct svc_rdma_send_ctxt * sctxt)449 static ssize_t svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt *rctxt,
450 struct svc_rdma_send_ctxt *sctxt)
451 {
452 struct svc_rdma_chunk *chunk;
453 ssize_t len, ret;
454
455 len = 0;
456 pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) {
457 ret = svc_rdma_encode_write_chunk(sctxt, chunk);
458 if (ret < 0)
459 return ret;
460 len += ret;
461 }
462
463 /* Terminate the Write list */
464 ret = xdr_stream_encode_item_absent(&sctxt->sc_stream);
465 if (ret < 0)
466 return ret;
467
468 return len + ret;
469 }
470
471 /**
472 * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk
473 * @rctxt: Reply context with information about the RPC Call
474 * @sctxt: Send context for the RPC Reply
475 * @length: size in bytes of the payload in the Reply chunk
476 *
477 * Return values:
478 * On success, returns length in bytes of the Reply XDR buffer
479 * that was consumed by the Reply's Reply chunk
480 * %-EMSGSIZE on XDR buffer overflow
481 * %-E2BIG if the RPC message is larger than the Reply chunk
482 */
483 static ssize_t
svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt * rctxt,struct svc_rdma_send_ctxt * sctxt,unsigned int length)484 svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt *rctxt,
485 struct svc_rdma_send_ctxt *sctxt,
486 unsigned int length)
487 {
488 struct svc_rdma_chunk *chunk;
489
490 if (pcl_is_empty(&rctxt->rc_reply_pcl))
491 return xdr_stream_encode_item_absent(&sctxt->sc_stream);
492
493 chunk = pcl_first_chunk(&rctxt->rc_reply_pcl);
494 if (length > chunk->ch_length)
495 return -E2BIG;
496
497 chunk->ch_payload_length = length;
498 return svc_rdma_encode_write_chunk(sctxt, chunk);
499 }
500
501 struct svc_rdma_map_data {
502 struct svcxprt_rdma *md_rdma;
503 struct svc_rdma_send_ctxt *md_ctxt;
504 };
505
506 /**
507 * svc_rdma_page_dma_map - DMA map one page
508 * @data: pointer to arguments
509 * @page: struct page to DMA map
510 * @offset: offset into the page
511 * @len: number of bytes to map
512 *
513 * Returns:
514 * %0 if DMA mapping was successful
515 * %-EIO if the page cannot be DMA mapped
516 */
svc_rdma_page_dma_map(void * data,struct page * page,unsigned long offset,unsigned int len)517 static int svc_rdma_page_dma_map(void *data, struct page *page,
518 unsigned long offset, unsigned int len)
519 {
520 struct svc_rdma_map_data *args = data;
521 struct svcxprt_rdma *rdma = args->md_rdma;
522 struct svc_rdma_send_ctxt *ctxt = args->md_ctxt;
523 struct ib_device *dev = rdma->sc_cm_id->device;
524 dma_addr_t dma_addr;
525
526 ++ctxt->sc_cur_sge_no;
527
528 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
529 if (ib_dma_mapping_error(dev, dma_addr))
530 goto out_maperr;
531
532 trace_svcrdma_dma_map_page(rdma, dma_addr, len);
533 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
534 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
535 ctxt->sc_send_wr.num_sge++;
536 return 0;
537
538 out_maperr:
539 trace_svcrdma_dma_map_err(rdma, dma_addr, len);
540 return -EIO;
541 }
542
543 /**
544 * svc_rdma_iov_dma_map - DMA map an iovec
545 * @data: pointer to arguments
546 * @iov: kvec to DMA map
547 *
548 * ib_dma_map_page() is used here because svc_rdma_dma_unmap()
549 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
550 *
551 * Returns:
552 * %0 if DMA mapping was successful
553 * %-EIO if the iovec cannot be DMA mapped
554 */
svc_rdma_iov_dma_map(void * data,const struct kvec * iov)555 static int svc_rdma_iov_dma_map(void *data, const struct kvec *iov)
556 {
557 if (!iov->iov_len)
558 return 0;
559 return svc_rdma_page_dma_map(data, virt_to_page(iov->iov_base),
560 offset_in_page(iov->iov_base),
561 iov->iov_len);
562 }
563
564 /**
565 * svc_rdma_xb_dma_map - DMA map all segments of an xdr_buf
566 * @xdr: xdr_buf containing portion of an RPC message to transmit
567 * @data: pointer to arguments
568 *
569 * Returns:
570 * %0 if DMA mapping was successful
571 * %-EIO if DMA mapping failed
572 *
573 * On failure, any DMA mappings that have been already done must be
574 * unmapped by the caller.
575 */
svc_rdma_xb_dma_map(const struct xdr_buf * xdr,void * data)576 static int svc_rdma_xb_dma_map(const struct xdr_buf *xdr, void *data)
577 {
578 unsigned int len, remaining;
579 unsigned long pageoff;
580 struct page **ppages;
581 int ret;
582
583 ret = svc_rdma_iov_dma_map(data, &xdr->head[0]);
584 if (ret < 0)
585 return ret;
586
587 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
588 pageoff = offset_in_page(xdr->page_base);
589 remaining = xdr->page_len;
590 while (remaining) {
591 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
592
593 ret = svc_rdma_page_dma_map(data, *ppages++, pageoff, len);
594 if (ret < 0)
595 return ret;
596
597 remaining -= len;
598 pageoff = 0;
599 }
600
601 ret = svc_rdma_iov_dma_map(data, &xdr->tail[0]);
602 if (ret < 0)
603 return ret;
604
605 return xdr->len;
606 }
607
608 struct svc_rdma_pullup_data {
609 u8 *pd_dest;
610 unsigned int pd_length;
611 unsigned int pd_num_sges;
612 };
613
614 /**
615 * svc_rdma_xb_count_sges - Count how many SGEs will be needed
616 * @xdr: xdr_buf containing portion of an RPC message to transmit
617 * @data: pointer to arguments
618 *
619 * Returns:
620 * Number of SGEs needed to Send the contents of @xdr inline
621 */
svc_rdma_xb_count_sges(const struct xdr_buf * xdr,void * data)622 static int svc_rdma_xb_count_sges(const struct xdr_buf *xdr,
623 void *data)
624 {
625 struct svc_rdma_pullup_data *args = data;
626 unsigned int remaining;
627 unsigned long offset;
628
629 if (xdr->head[0].iov_len)
630 ++args->pd_num_sges;
631
632 offset = offset_in_page(xdr->page_base);
633 remaining = xdr->page_len;
634 while (remaining) {
635 ++args->pd_num_sges;
636 remaining -= min_t(u32, PAGE_SIZE - offset, remaining);
637 offset = 0;
638 }
639
640 if (xdr->tail[0].iov_len)
641 ++args->pd_num_sges;
642
643 args->pd_length += xdr->len;
644 return 0;
645 }
646
647 /**
648 * svc_rdma_pull_up_needed - Determine whether to use pull-up
649 * @rdma: controlling transport
650 * @sctxt: send_ctxt for the Send WR
651 * @rctxt: Write and Reply chunks provided by client
652 * @xdr: xdr_buf containing RPC message to transmit
653 *
654 * Returns:
655 * %true if pull-up must be used
656 * %false otherwise
657 */
svc_rdma_pull_up_needed(const struct svcxprt_rdma * rdma,const struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_recv_ctxt * rctxt,const struct xdr_buf * xdr)658 static bool svc_rdma_pull_up_needed(const struct svcxprt_rdma *rdma,
659 const struct svc_rdma_send_ctxt *sctxt,
660 const struct svc_rdma_recv_ctxt *rctxt,
661 const struct xdr_buf *xdr)
662 {
663 /* Resources needed for the transport header */
664 struct svc_rdma_pullup_data args = {
665 .pd_length = sctxt->sc_hdrbuf.len,
666 .pd_num_sges = 1,
667 };
668 int ret;
669
670 ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
671 svc_rdma_xb_count_sges, &args);
672 if (ret < 0)
673 return false;
674
675 if (args.pd_length < RPCRDMA_PULLUP_THRESH)
676 return true;
677 return args.pd_num_sges >= rdma->sc_max_send_sges;
678 }
679
680 /**
681 * svc_rdma_xb_linearize - Copy region of xdr_buf to flat buffer
682 * @xdr: xdr_buf containing portion of an RPC message to copy
683 * @data: pointer to arguments
684 *
685 * Returns:
686 * Always zero.
687 */
svc_rdma_xb_linearize(const struct xdr_buf * xdr,void * data)688 static int svc_rdma_xb_linearize(const struct xdr_buf *xdr,
689 void *data)
690 {
691 struct svc_rdma_pullup_data *args = data;
692 unsigned int len, remaining;
693 unsigned long pageoff;
694 struct page **ppages;
695
696 if (xdr->head[0].iov_len) {
697 memcpy(args->pd_dest, xdr->head[0].iov_base, xdr->head[0].iov_len);
698 args->pd_dest += xdr->head[0].iov_len;
699 }
700
701 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
702 pageoff = offset_in_page(xdr->page_base);
703 remaining = xdr->page_len;
704 while (remaining) {
705 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
706 memcpy(args->pd_dest, page_address(*ppages) + pageoff, len);
707 remaining -= len;
708 args->pd_dest += len;
709 pageoff = 0;
710 ppages++;
711 }
712
713 if (xdr->tail[0].iov_len) {
714 memcpy(args->pd_dest, xdr->tail[0].iov_base, xdr->tail[0].iov_len);
715 args->pd_dest += xdr->tail[0].iov_len;
716 }
717
718 args->pd_length += xdr->len;
719 return 0;
720 }
721
722 /**
723 * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer
724 * @rdma: controlling transport
725 * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared
726 * @rctxt: Write and Reply chunks provided by client
727 * @xdr: prepared xdr_buf containing RPC message
728 *
729 * The device is not capable of sending the reply directly.
730 * Assemble the elements of @xdr into the transport header buffer.
731 *
732 * Assumptions:
733 * pull_up_needed has determined that @xdr will fit in the buffer.
734 *
735 * Returns:
736 * %0 if pull-up was successful
737 * %-EMSGSIZE if a buffer manipulation problem occurred
738 */
svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_recv_ctxt * rctxt,const struct xdr_buf * xdr)739 static int svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma *rdma,
740 struct svc_rdma_send_ctxt *sctxt,
741 const struct svc_rdma_recv_ctxt *rctxt,
742 const struct xdr_buf *xdr)
743 {
744 struct svc_rdma_pullup_data args = {
745 .pd_dest = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len,
746 };
747 int ret;
748
749 ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
750 svc_rdma_xb_linearize, &args);
751 if (ret < 0)
752 return ret;
753
754 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len + args.pd_length;
755 trace_svcrdma_send_pullup(sctxt, args.pd_length);
756 return 0;
757 }
758
759 /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message
760 * @rdma: controlling transport
761 * @sctxt: send_ctxt for the Send WR
762 * @rctxt: Write and Reply chunks provided by client
763 * @xdr: prepared xdr_buf containing RPC message
764 *
765 * Returns:
766 * %0 if DMA mapping was successful.
767 * %-EMSGSIZE if a buffer manipulation problem occurred
768 * %-EIO if DMA mapping failed
769 *
770 * The Send WR's num_sge field is set in all cases.
771 */
svc_rdma_map_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_recv_ctxt * rctxt,const struct xdr_buf * xdr)772 int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
773 struct svc_rdma_send_ctxt *sctxt,
774 const struct svc_rdma_recv_ctxt *rctxt,
775 const struct xdr_buf *xdr)
776 {
777 struct svc_rdma_map_data args = {
778 .md_rdma = rdma,
779 .md_ctxt = sctxt,
780 };
781
782 /* Set up the (persistently-mapped) transport header SGE. */
783 sctxt->sc_send_wr.num_sge = 1;
784 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
785
786 /* If there is a Reply chunk, nothing follows the transport
787 * header, and we're done here.
788 */
789 if (!pcl_is_empty(&rctxt->rc_reply_pcl))
790 return 0;
791
792 /* For pull-up, svc_rdma_send() will sync the transport header.
793 * No additional DMA mapping is necessary.
794 */
795 if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr))
796 return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr);
797
798 return pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
799 svc_rdma_xb_dma_map, &args);
800 }
801
802 /* Prepare the portion of the RPC Reply that will be transmitted
803 * via RDMA Send. The RPC-over-RDMA transport header is prepared
804 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
805 *
806 * Depending on whether a Write list or Reply chunk is present,
807 * the server may send all, a portion of, or none of the xdr_buf.
808 * In the latter case, only the transport header (sc_sges[0]) is
809 * transmitted.
810 *
811 * RDMA Send is the last step of transmitting an RPC reply. Pages
812 * involved in the earlier RDMA Writes are here transferred out
813 * of the rqstp and into the sctxt's page array. These pages are
814 * DMA unmapped by each Write completion, but the subsequent Send
815 * completion finally releases these pages.
816 *
817 * Assumptions:
818 * - The Reply's transport header will never be larger than a page.
819 */
svc_rdma_send_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_recv_ctxt * rctxt,struct svc_rqst * rqstp)820 static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
821 struct svc_rdma_send_ctxt *sctxt,
822 const struct svc_rdma_recv_ctxt *rctxt,
823 struct svc_rqst *rqstp)
824 {
825 int ret;
826
827 ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res);
828 if (ret < 0)
829 return ret;
830
831 if (rctxt->rc_inv_rkey) {
832 sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
833 sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
834 } else {
835 sctxt->sc_send_wr.opcode = IB_WR_SEND;
836 }
837
838 ret = svc_rdma_send(rdma, sctxt);
839 if (ret < 0)
840 return ret;
841
842 ret = wait_for_completion_killable(&sctxt->sc_done);
843 svc_rdma_send_ctxt_put(rdma, sctxt);
844 return ret;
845 }
846
847 /**
848 * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response
849 * @rdma: controlling transport context
850 * @sctxt: Send context for the response
851 * @rctxt: Receive context for incoming bad message
852 * @status: negative errno indicating error that occurred
853 *
854 * Given the client-provided Read, Write, and Reply chunks, the
855 * server was not able to parse the Call or form a complete Reply.
856 * Return an RDMA_ERROR message so the client can retire the RPC
857 * transaction.
858 *
859 * The caller does not have to release @sctxt. It is released by
860 * Send completion, or by this function on error.
861 */
svc_rdma_send_error_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,struct svc_rdma_recv_ctxt * rctxt,int status)862 void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
863 struct svc_rdma_send_ctxt *sctxt,
864 struct svc_rdma_recv_ctxt *rctxt,
865 int status)
866 {
867 __be32 *rdma_argp = rctxt->rc_recv_buf;
868 __be32 *p;
869
870 rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0);
871 xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf,
872 sctxt->sc_xprt_buf, NULL);
873
874 p = xdr_reserve_space(&sctxt->sc_stream,
875 rpcrdma_fixed_maxsz * sizeof(*p));
876 if (!p)
877 goto put_ctxt;
878
879 *p++ = *rdma_argp;
880 *p++ = *(rdma_argp + 1);
881 *p++ = rdma->sc_fc_credits;
882 *p = rdma_error;
883
884 switch (status) {
885 case -EPROTONOSUPPORT:
886 p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p));
887 if (!p)
888 goto put_ctxt;
889
890 *p++ = err_vers;
891 *p++ = rpcrdma_version;
892 *p = rpcrdma_version;
893 trace_svcrdma_err_vers(*rdma_argp);
894 break;
895 default:
896 p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p));
897 if (!p)
898 goto put_ctxt;
899
900 *p = err_chunk;
901 trace_svcrdma_err_chunk(*rdma_argp);
902 }
903
904 /* Remote Invalidation is skipped for simplicity. */
905 sctxt->sc_send_wr.num_sge = 1;
906 sctxt->sc_send_wr.opcode = IB_WR_SEND;
907 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
908 if (svc_rdma_send(rdma, sctxt))
909 goto put_ctxt;
910
911 wait_for_completion_killable(&sctxt->sc_done);
912
913 put_ctxt:
914 svc_rdma_send_ctxt_put(rdma, sctxt);
915 }
916
917 /**
918 * svc_rdma_sendto - Transmit an RPC reply
919 * @rqstp: processed RPC request, reply XDR already in ::rq_res
920 *
921 * Any resources still associated with @rqstp are released upon return.
922 * If no reply message was possible, the connection is closed.
923 *
924 * Returns:
925 * %0 if an RPC reply has been successfully posted,
926 * %-ENOMEM if a resource shortage occurred (connection is lost),
927 * %-ENOTCONN if posting failed (connection is lost).
928 */
svc_rdma_sendto(struct svc_rqst * rqstp)929 int svc_rdma_sendto(struct svc_rqst *rqstp)
930 {
931 struct svc_xprt *xprt = rqstp->rq_xprt;
932 struct svcxprt_rdma *rdma =
933 container_of(xprt, struct svcxprt_rdma, sc_xprt);
934 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
935 __be32 *rdma_argp = rctxt->rc_recv_buf;
936 struct svc_rdma_send_ctxt *sctxt;
937 unsigned int rc_size;
938 __be32 *p;
939 int ret;
940
941 ret = -ENOTCONN;
942 if (svc_xprt_is_dead(xprt))
943 goto drop_connection;
944
945 ret = -ENOMEM;
946 sctxt = svc_rdma_send_ctxt_get(rdma);
947 if (!sctxt)
948 goto drop_connection;
949
950 ret = -EMSGSIZE;
951 p = xdr_reserve_space(&sctxt->sc_stream,
952 rpcrdma_fixed_maxsz * sizeof(*p));
953 if (!p)
954 goto put_ctxt;
955
956 ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
957 if (ret < 0)
958 goto reply_chunk;
959 rc_size = ret;
960
961 *p++ = *rdma_argp;
962 *p++ = *(rdma_argp + 1);
963 *p++ = rdma->sc_fc_credits;
964 *p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg;
965
966 ret = svc_rdma_encode_read_list(sctxt);
967 if (ret < 0)
968 goto put_ctxt;
969 ret = svc_rdma_encode_write_list(rctxt, sctxt);
970 if (ret < 0)
971 goto put_ctxt;
972 ret = svc_rdma_encode_reply_chunk(rctxt, sctxt, rc_size);
973 if (ret < 0)
974 goto put_ctxt;
975
976 ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
977 if (ret < 0)
978 goto put_ctxt;
979
980 /* Prevent svc_xprt_release() from releasing the page backing
981 * rq_res.head[0].iov_base. It's no longer being accessed by
982 * the I/O device. */
983 rqstp->rq_respages++;
984 return 0;
985
986 reply_chunk:
987 if (ret != -E2BIG && ret != -EINVAL)
988 goto put_ctxt;
989
990 svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret);
991 return 0;
992
993 put_ctxt:
994 svc_rdma_send_ctxt_put(rdma, sctxt);
995 drop_connection:
996 trace_svcrdma_send_err(rqstp, ret);
997 svc_xprt_deferred_close(&rdma->sc_xprt);
998 return -ENOTCONN;
999 }
1000
1001 /**
1002 * svc_rdma_result_payload - special processing for a result payload
1003 * @rqstp: svc_rqst to operate on
1004 * @offset: payload's byte offset in @xdr
1005 * @length: size of payload, in bytes
1006 *
1007 * Return values:
1008 * %0 if successful or nothing needed to be done
1009 * %-EMSGSIZE on XDR buffer overflow
1010 * %-E2BIG if the payload was larger than the Write chunk
1011 * %-EINVAL if client provided too many segments
1012 * %-ENOMEM if rdma_rw context pool was exhausted
1013 * %-ENOTCONN if posting failed (connection is lost)
1014 * %-EIO if rdma_rw initialization failed (DMA mapping, etc)
1015 */
svc_rdma_result_payload(struct svc_rqst * rqstp,unsigned int offset,unsigned int length)1016 int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset,
1017 unsigned int length)
1018 {
1019 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
1020 struct svc_rdma_chunk *chunk;
1021 struct svcxprt_rdma *rdma;
1022 struct xdr_buf subbuf;
1023 int ret;
1024
1025 chunk = rctxt->rc_cur_result_payload;
1026 if (!length || !chunk)
1027 return 0;
1028 rctxt->rc_cur_result_payload =
1029 pcl_next_chunk(&rctxt->rc_write_pcl, chunk);
1030 if (length > chunk->ch_length)
1031 return -E2BIG;
1032
1033 chunk->ch_position = offset;
1034 chunk->ch_payload_length = length;
1035
1036 if (xdr_buf_subsegment(&rqstp->rq_res, &subbuf, offset, length))
1037 return -EMSGSIZE;
1038
1039 rdma = container_of(rqstp->rq_xprt, struct svcxprt_rdma, sc_xprt);
1040 ret = svc_rdma_send_write_chunk(rdma, chunk, &subbuf);
1041 if (ret < 0)
1042 return ret;
1043 return 0;
1044 }
1045