1 /*
2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34 #include <linux/kernel.h>
35 #include <linux/slab.h>
36 #include <linux/delay.h>
37
38 #include "iscsi_iser.h"
39
40 #define ISCSI_ISER_MAX_CONN 8
41 #define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
42 #define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
43 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \
44 ISCSI_ISER_MAX_CONN)
45
iser_qp_event_callback(struct ib_event * cause,void * context)46 static void iser_qp_event_callback(struct ib_event *cause, void *context)
47 {
48 iser_err("qp event %s (%d)\n",
49 ib_event_msg(cause->event), cause->event);
50 }
51
iser_event_handler(struct ib_event_handler * handler,struct ib_event * event)52 static void iser_event_handler(struct ib_event_handler *handler,
53 struct ib_event *event)
54 {
55 iser_err("async event %s (%d) on device %s port %d\n",
56 ib_event_msg(event->event), event->event,
57 dev_name(&event->device->dev), event->element.port_num);
58 }
59
60 /*
61 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
62 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
63 * the adaptor.
64 *
65 * Return: 0 on success, -1 on failure
66 */
iser_create_device_ib_res(struct iser_device * device)67 static int iser_create_device_ib_res(struct iser_device *device)
68 {
69 struct ib_device *ib_dev = device->ib_device;
70
71 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) {
72 iser_err("IB device does not support memory registrations\n");
73 return -1;
74 }
75
76 device->pd = ib_alloc_pd(ib_dev,
77 iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
78 if (IS_ERR(device->pd))
79 goto pd_err;
80
81 INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev,
82 iser_event_handler);
83 ib_register_event_handler(&device->event_handler);
84 return 0;
85
86 pd_err:
87 iser_err("failed to allocate an IB resource\n");
88 return -1;
89 }
90
91 /*
92 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
93 * CQ and PD created with the device associated with the adaptor.
94 */
iser_free_device_ib_res(struct iser_device * device)95 static void iser_free_device_ib_res(struct iser_device *device)
96 {
97 ib_unregister_event_handler(&device->event_handler);
98 ib_dealloc_pd(device->pd);
99
100 device->pd = NULL;
101 }
102
103 static struct iser_fr_desc *
iser_create_fastreg_desc(struct iser_device * device,struct ib_pd * pd,bool pi_enable,unsigned int size)104 iser_create_fastreg_desc(struct iser_device *device,
105 struct ib_pd *pd,
106 bool pi_enable,
107 unsigned int size)
108 {
109 struct iser_fr_desc *desc;
110 struct ib_device *ib_dev = device->ib_device;
111 enum ib_mr_type mr_type;
112 int ret;
113
114 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
115 if (!desc)
116 return ERR_PTR(-ENOMEM);
117
118 if (ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
119 mr_type = IB_MR_TYPE_SG_GAPS;
120 else
121 mr_type = IB_MR_TYPE_MEM_REG;
122
123 desc->rsc.mr = ib_alloc_mr(pd, mr_type, size);
124 if (IS_ERR(desc->rsc.mr)) {
125 ret = PTR_ERR(desc->rsc.mr);
126 iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
127 goto err_alloc_mr;
128 }
129
130 if (pi_enable) {
131 desc->rsc.sig_mr = ib_alloc_mr_integrity(pd, size, size);
132 if (IS_ERR(desc->rsc.sig_mr)) {
133 ret = PTR_ERR(desc->rsc.sig_mr);
134 iser_err("Failed to allocate sig_mr err=%d\n", ret);
135 goto err_alloc_mr_integrity;
136 }
137 }
138 desc->rsc.mr_valid = 0;
139
140 return desc;
141
142 err_alloc_mr_integrity:
143 ib_dereg_mr(desc->rsc.mr);
144 err_alloc_mr:
145 kfree(desc);
146
147 return ERR_PTR(ret);
148 }
149
iser_destroy_fastreg_desc(struct iser_fr_desc * desc)150 static void iser_destroy_fastreg_desc(struct iser_fr_desc *desc)
151 {
152 struct iser_reg_resources *res = &desc->rsc;
153
154 ib_dereg_mr(res->mr);
155 if (res->sig_mr) {
156 ib_dereg_mr(res->sig_mr);
157 res->sig_mr = NULL;
158 }
159 kfree(desc);
160 }
161
162 /**
163 * iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors
164 * for fast registration work requests.
165 * @ib_conn: connection RDMA resources
166 * @cmds_max: max number of SCSI commands for this connection
167 * @size: max number of pages per map request
168 *
169 * Return: 0 on success, or errno code on failure
170 */
iser_alloc_fastreg_pool(struct ib_conn * ib_conn,unsigned cmds_max,unsigned int size)171 int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
172 unsigned cmds_max,
173 unsigned int size)
174 {
175 struct iser_device *device = ib_conn->device;
176 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
177 struct iser_fr_desc *desc;
178 int i, ret;
179
180 INIT_LIST_HEAD(&fr_pool->list);
181 INIT_LIST_HEAD(&fr_pool->all_list);
182 spin_lock_init(&fr_pool->lock);
183 fr_pool->size = 0;
184 for (i = 0; i < cmds_max; i++) {
185 desc = iser_create_fastreg_desc(device, device->pd,
186 ib_conn->pi_support, size);
187 if (IS_ERR(desc)) {
188 ret = PTR_ERR(desc);
189 goto err;
190 }
191
192 list_add_tail(&desc->list, &fr_pool->list);
193 list_add_tail(&desc->all_list, &fr_pool->all_list);
194 fr_pool->size++;
195 }
196
197 return 0;
198
199 err:
200 iser_free_fastreg_pool(ib_conn);
201 return ret;
202 }
203
204 /**
205 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
206 * @ib_conn: connection RDMA resources
207 */
iser_free_fastreg_pool(struct ib_conn * ib_conn)208 void iser_free_fastreg_pool(struct ib_conn *ib_conn)
209 {
210 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
211 struct iser_fr_desc *desc, *tmp;
212 int i = 0;
213
214 if (list_empty(&fr_pool->all_list))
215 return;
216
217 iser_info("freeing conn %p fr pool\n", ib_conn);
218
219 list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
220 list_del(&desc->all_list);
221 iser_destroy_fastreg_desc(desc);
222 ++i;
223 }
224
225 if (i < fr_pool->size)
226 iser_warn("pool still has %d regions registered\n",
227 fr_pool->size - i);
228 }
229
230 /*
231 * iser_create_ib_conn_res - Queue-Pair (QP)
232 *
233 * Return: 0 on success, -1 on failure
234 */
iser_create_ib_conn_res(struct ib_conn * ib_conn)235 static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
236 {
237 struct iser_conn *iser_conn = to_iser_conn(ib_conn);
238 struct iser_device *device;
239 struct ib_device *ib_dev;
240 struct ib_qp_init_attr init_attr;
241 int ret = -ENOMEM;
242 unsigned int max_send_wr, cq_size;
243
244 BUG_ON(ib_conn->device == NULL);
245
246 device = ib_conn->device;
247 ib_dev = device->ib_device;
248
249 /* +1 for drain */
250 if (ib_conn->pi_support)
251 max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
252 else
253 max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
254 max_send_wr = min_t(unsigned int, max_send_wr,
255 (unsigned int)ib_dev->attrs.max_qp_wr);
256
257 cq_size = max_send_wr + ISER_QP_MAX_RECV_DTOS;
258 ib_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_SOFTIRQ);
259 if (IS_ERR(ib_conn->cq)) {
260 ret = PTR_ERR(ib_conn->cq);
261 goto cq_err;
262 }
263 ib_conn->cq_size = cq_size;
264
265 memset(&init_attr, 0, sizeof(init_attr));
266
267 init_attr.event_handler = iser_qp_event_callback;
268 init_attr.qp_context = (void *)ib_conn;
269 init_attr.send_cq = ib_conn->cq;
270 init_attr.recv_cq = ib_conn->cq;
271 /* +1 for drain */
272 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS + 1;
273 init_attr.cap.max_send_sge = 2;
274 init_attr.cap.max_recv_sge = 1;
275 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
276 init_attr.qp_type = IB_QPT_RC;
277 init_attr.cap.max_send_wr = max_send_wr;
278 if (ib_conn->pi_support)
279 init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
280 iser_conn->max_cmds = ISER_GET_MAX_XMIT_CMDS(max_send_wr - 1);
281
282 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
283 if (ret)
284 goto out_err;
285
286 ib_conn->qp = ib_conn->cma_id->qp;
287 iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n", ib_conn,
288 ib_conn->cma_id, ib_conn->cma_id->qp, max_send_wr);
289 return ret;
290
291 out_err:
292 ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
293 cq_err:
294 iser_err("unable to alloc mem or create resource, err %d\n", ret);
295
296 return ret;
297 }
298
299 /*
300 * based on the resolved device node GUID see if there already allocated
301 * device for this device. If there's no such, create one.
302 */
303 static
iser_device_find_by_ib_device(struct rdma_cm_id * cma_id)304 struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
305 {
306 struct iser_device *device;
307
308 mutex_lock(&ig.device_list_mutex);
309
310 list_for_each_entry(device, &ig.device_list, ig_list)
311 /* find if there's a match using the node GUID */
312 if (device->ib_device->node_guid == cma_id->device->node_guid)
313 goto inc_refcnt;
314
315 device = kzalloc(sizeof *device, GFP_KERNEL);
316 if (!device)
317 goto out;
318
319 /* assign this device to the device */
320 device->ib_device = cma_id->device;
321 /* init the device and link it into ig device list */
322 if (iser_create_device_ib_res(device)) {
323 kfree(device);
324 device = NULL;
325 goto out;
326 }
327 list_add(&device->ig_list, &ig.device_list);
328
329 inc_refcnt:
330 device->refcount++;
331 out:
332 mutex_unlock(&ig.device_list_mutex);
333 return device;
334 }
335
336 /* if there's no demand for this device, release it */
iser_device_try_release(struct iser_device * device)337 static void iser_device_try_release(struct iser_device *device)
338 {
339 mutex_lock(&ig.device_list_mutex);
340 device->refcount--;
341 iser_info("device %p refcount %d\n", device, device->refcount);
342 if (!device->refcount) {
343 iser_free_device_ib_res(device);
344 list_del(&device->ig_list);
345 kfree(device);
346 }
347 mutex_unlock(&ig.device_list_mutex);
348 }
349
350 /*
351 * Called with state mutex held
352 */
iser_conn_state_comp_exch(struct iser_conn * iser_conn,enum iser_conn_state comp,enum iser_conn_state exch)353 static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
354 enum iser_conn_state comp,
355 enum iser_conn_state exch)
356 {
357 int ret;
358
359 ret = (iser_conn->state == comp);
360 if (ret)
361 iser_conn->state = exch;
362
363 return ret;
364 }
365
iser_release_work(struct work_struct * work)366 void iser_release_work(struct work_struct *work)
367 {
368 struct iser_conn *iser_conn;
369
370 iser_conn = container_of(work, struct iser_conn, release_work);
371
372 /* Wait for conn_stop to complete */
373 wait_for_completion(&iser_conn->stop_completion);
374 /* Wait for IB resouces cleanup to complete */
375 wait_for_completion(&iser_conn->ib_completion);
376
377 mutex_lock(&iser_conn->state_mutex);
378 iser_conn->state = ISER_CONN_DOWN;
379 mutex_unlock(&iser_conn->state_mutex);
380
381 iser_conn_release(iser_conn);
382 }
383
384 /**
385 * iser_free_ib_conn_res - release IB related resources
386 * @iser_conn: iser connection struct
387 * @destroy: indicator if we need to try to release the
388 * iser device and memory regoins pool (only iscsi
389 * shutdown and DEVICE_REMOVAL will use this).
390 *
391 * This routine is called with the iser state mutex held
392 * so the cm_id removal is out of here. It is Safe to
393 * be invoked multiple times.
394 */
iser_free_ib_conn_res(struct iser_conn * iser_conn,bool destroy)395 static void iser_free_ib_conn_res(struct iser_conn *iser_conn, bool destroy)
396 {
397 struct ib_conn *ib_conn = &iser_conn->ib_conn;
398 struct iser_device *device = ib_conn->device;
399
400 iser_info("freeing conn %p cma_id %p qp %p\n",
401 iser_conn, ib_conn->cma_id, ib_conn->qp);
402
403 if (ib_conn->qp) {
404 rdma_destroy_qp(ib_conn->cma_id);
405 ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
406 ib_conn->qp = NULL;
407 }
408
409 if (destroy) {
410 if (iser_conn->rx_descs)
411 iser_free_rx_descriptors(iser_conn);
412
413 if (device) {
414 iser_device_try_release(device);
415 ib_conn->device = NULL;
416 }
417 }
418 }
419
420 /**
421 * iser_conn_release - Frees all conn objects and deallocs conn descriptor
422 * @iser_conn: iSER connection context
423 */
iser_conn_release(struct iser_conn * iser_conn)424 void iser_conn_release(struct iser_conn *iser_conn)
425 {
426 struct ib_conn *ib_conn = &iser_conn->ib_conn;
427
428 mutex_lock(&ig.connlist_mutex);
429 list_del(&iser_conn->conn_list);
430 mutex_unlock(&ig.connlist_mutex);
431
432 mutex_lock(&iser_conn->state_mutex);
433 /* In case we endup here without ep_disconnect being invoked. */
434 if (iser_conn->state != ISER_CONN_DOWN) {
435 iser_warn("iser conn %p state %d, expected state down.\n",
436 iser_conn, iser_conn->state);
437 iscsi_destroy_endpoint(iser_conn->ep);
438 iser_conn->state = ISER_CONN_DOWN;
439 }
440 /*
441 * In case we never got to bind stage, we still need to
442 * release IB resources (which is safe to call more than once).
443 */
444 iser_free_ib_conn_res(iser_conn, true);
445 mutex_unlock(&iser_conn->state_mutex);
446
447 if (ib_conn->cma_id) {
448 rdma_destroy_id(ib_conn->cma_id);
449 ib_conn->cma_id = NULL;
450 }
451
452 kfree(iser_conn);
453 }
454
455 /**
456 * iser_conn_terminate - triggers start of the disconnect procedures and
457 * waits for them to be done
458 * @iser_conn: iSER connection context
459 *
460 * Called with state mutex held
461 */
iser_conn_terminate(struct iser_conn * iser_conn)462 int iser_conn_terminate(struct iser_conn *iser_conn)
463 {
464 struct ib_conn *ib_conn = &iser_conn->ib_conn;
465 int err = 0;
466
467 /* terminate the iser conn only if the conn state is UP */
468 if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
469 ISER_CONN_TERMINATING))
470 return 0;
471
472 iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);
473
474 /* suspend queuing of new iscsi commands */
475 if (iser_conn->iscsi_conn)
476 iscsi_suspend_queue(iser_conn->iscsi_conn);
477
478 /*
479 * In case we didn't already clean up the cma_id (peer initiated
480 * a disconnection), we need to Cause the CMA to change the QP
481 * state to ERROR.
482 */
483 if (ib_conn->cma_id) {
484 err = rdma_disconnect(ib_conn->cma_id);
485 if (err)
486 iser_err("Failed to disconnect, conn: 0x%p err %d\n",
487 iser_conn, err);
488
489 /* block until all flush errors are consumed */
490 ib_drain_qp(ib_conn->qp);
491 }
492
493 return 1;
494 }
495
496 /*
497 * Called with state mutex held
498 */
iser_connect_error(struct rdma_cm_id * cma_id)499 static void iser_connect_error(struct rdma_cm_id *cma_id)
500 {
501 struct iser_conn *iser_conn;
502
503 iser_conn = cma_id->context;
504 iser_conn->state = ISER_CONN_TERMINATING;
505 }
506
iser_calc_scsi_params(struct iser_conn * iser_conn,unsigned int max_sectors)507 static void iser_calc_scsi_params(struct iser_conn *iser_conn,
508 unsigned int max_sectors)
509 {
510 struct iser_device *device = iser_conn->ib_conn.device;
511 struct ib_device_attr *attr = &device->ib_device->attrs;
512 unsigned short sg_tablesize, sup_sg_tablesize;
513 unsigned short reserved_mr_pages;
514 u32 max_num_sg;
515
516 /*
517 * FRs without SG_GAPS can only map up to a (device) page per entry,
518 * but if the first entry is misaligned we'll end up using two entries
519 * (head and tail) for a single page worth data, so one additional
520 * entry is required.
521 */
522 if (attr->kernel_cap_flags & IBK_SG_GAPS_REG)
523 reserved_mr_pages = 0;
524 else
525 reserved_mr_pages = 1;
526
527 if (iser_conn->ib_conn.pi_support)
528 max_num_sg = attr->max_pi_fast_reg_page_list_len;
529 else
530 max_num_sg = attr->max_fast_reg_page_list_len;
531
532 sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K);
533 sup_sg_tablesize = min_t(uint, ISCSI_ISER_MAX_SG_TABLESIZE,
534 max_num_sg - reserved_mr_pages);
535 iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
536 iser_conn->pages_per_mr =
537 iser_conn->scsi_sg_tablesize + reserved_mr_pages;
538 }
539
540 /*
541 * Called with state mutex held
542 */
iser_addr_handler(struct rdma_cm_id * cma_id)543 static void iser_addr_handler(struct rdma_cm_id *cma_id)
544 {
545 struct iser_device *device;
546 struct iser_conn *iser_conn;
547 struct ib_conn *ib_conn;
548 int ret;
549
550 iser_conn = cma_id->context;
551 if (iser_conn->state != ISER_CONN_PENDING)
552 /* bailout */
553 return;
554
555 ib_conn = &iser_conn->ib_conn;
556 device = iser_device_find_by_ib_device(cma_id);
557 if (!device) {
558 iser_err("device lookup/creation failed\n");
559 iser_connect_error(cma_id);
560 return;
561 }
562
563 ib_conn->device = device;
564
565 /* connection T10-PI support */
566 if (iser_pi_enable) {
567 if (!(device->ib_device->attrs.kernel_cap_flags &
568 IBK_INTEGRITY_HANDOVER)) {
569 iser_warn("T10-PI requested but not supported on %s, "
570 "continue without T10-PI\n",
571 dev_name(&ib_conn->device->ib_device->dev));
572 ib_conn->pi_support = false;
573 } else {
574 ib_conn->pi_support = true;
575 }
576 }
577
578 iser_calc_scsi_params(iser_conn, iser_max_sectors);
579
580 ret = rdma_resolve_route(cma_id, 1000);
581 if (ret) {
582 iser_err("resolve route failed: %d\n", ret);
583 iser_connect_error(cma_id);
584 return;
585 }
586 }
587
588 /*
589 * Called with state mutex held
590 */
iser_route_handler(struct rdma_cm_id * cma_id)591 static void iser_route_handler(struct rdma_cm_id *cma_id)
592 {
593 struct rdma_conn_param conn_param;
594 int ret;
595 struct iser_cm_hdr req_hdr;
596 struct iser_conn *iser_conn = cma_id->context;
597 struct ib_conn *ib_conn = &iser_conn->ib_conn;
598 struct ib_device *ib_dev = ib_conn->device->ib_device;
599
600 if (iser_conn->state != ISER_CONN_PENDING)
601 /* bailout */
602 return;
603
604 ret = iser_create_ib_conn_res(ib_conn);
605 if (ret)
606 goto failure;
607
608 memset(&conn_param, 0, sizeof conn_param);
609 conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom;
610 conn_param.initiator_depth = 1;
611 conn_param.retry_count = 7;
612 conn_param.rnr_retry_count = 6;
613
614 memset(&req_hdr, 0, sizeof(req_hdr));
615 req_hdr.flags = ISER_ZBVA_NOT_SUP;
616 if (!iser_always_reg)
617 req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP;
618 conn_param.private_data = (void *)&req_hdr;
619 conn_param.private_data_len = sizeof(struct iser_cm_hdr);
620
621 ret = rdma_connect_locked(cma_id, &conn_param);
622 if (ret) {
623 iser_err("failure connecting: %d\n", ret);
624 goto failure;
625 }
626
627 return;
628 failure:
629 iser_connect_error(cma_id);
630 }
631
iser_connected_handler(struct rdma_cm_id * cma_id,const void * private_data)632 static void iser_connected_handler(struct rdma_cm_id *cma_id,
633 const void *private_data)
634 {
635 struct iser_conn *iser_conn;
636 struct ib_qp_attr attr;
637 struct ib_qp_init_attr init_attr;
638
639 iser_conn = cma_id->context;
640 if (iser_conn->state != ISER_CONN_PENDING)
641 /* bailout */
642 return;
643
644 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
645 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
646
647 if (private_data) {
648 u8 flags = *(u8 *)private_data;
649
650 iser_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP);
651 }
652
653 iser_info("conn %p: negotiated %s invalidation\n",
654 iser_conn, iser_conn->snd_w_inv ? "remote" : "local");
655
656 iser_conn->state = ISER_CONN_UP;
657 complete(&iser_conn->up_completion);
658 }
659
iser_disconnected_handler(struct rdma_cm_id * cma_id)660 static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
661 {
662 struct iser_conn *iser_conn = cma_id->context;
663
664 if (iser_conn_terminate(iser_conn)) {
665 if (iser_conn->iscsi_conn)
666 iscsi_conn_failure(iser_conn->iscsi_conn,
667 ISCSI_ERR_CONN_FAILED);
668 else
669 iser_err("iscsi_iser connection isn't bound\n");
670 }
671 }
672
iser_cleanup_handler(struct rdma_cm_id * cma_id,bool destroy)673 static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
674 bool destroy)
675 {
676 struct iser_conn *iser_conn = cma_id->context;
677
678 /*
679 * We are not guaranteed that we visited disconnected_handler
680 * by now, call it here to be safe that we handle CM drep
681 * and flush errors.
682 */
683 iser_disconnected_handler(cma_id);
684 iser_free_ib_conn_res(iser_conn, destroy);
685 complete(&iser_conn->ib_completion);
686 }
687
iser_cma_handler(struct rdma_cm_id * cma_id,struct rdma_cm_event * event)688 static int iser_cma_handler(struct rdma_cm_id *cma_id,
689 struct rdma_cm_event *event)
690 {
691 struct iser_conn *iser_conn;
692 int ret = 0;
693
694 iser_conn = cma_id->context;
695 iser_info("%s (%d): status %d conn %p id %p\n",
696 rdma_event_msg(event->event), event->event,
697 event->status, cma_id->context, cma_id);
698
699 mutex_lock(&iser_conn->state_mutex);
700 switch (event->event) {
701 case RDMA_CM_EVENT_ADDR_RESOLVED:
702 iser_addr_handler(cma_id);
703 break;
704 case RDMA_CM_EVENT_ROUTE_RESOLVED:
705 iser_route_handler(cma_id);
706 break;
707 case RDMA_CM_EVENT_ESTABLISHED:
708 iser_connected_handler(cma_id, event->param.conn.private_data);
709 break;
710 case RDMA_CM_EVENT_REJECTED:
711 iser_info("Connection rejected: %s\n",
712 rdma_reject_msg(cma_id, event->status));
713 fallthrough;
714 case RDMA_CM_EVENT_ADDR_ERROR:
715 case RDMA_CM_EVENT_ROUTE_ERROR:
716 case RDMA_CM_EVENT_CONNECT_ERROR:
717 case RDMA_CM_EVENT_UNREACHABLE:
718 iser_connect_error(cma_id);
719 break;
720 case RDMA_CM_EVENT_DISCONNECTED:
721 case RDMA_CM_EVENT_ADDR_CHANGE:
722 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
723 iser_cleanup_handler(cma_id, false);
724 break;
725 case RDMA_CM_EVENT_DEVICE_REMOVAL:
726 /*
727 * we *must* destroy the device as we cannot rely
728 * on iscsid to be around to initiate error handling.
729 * also if we are not in state DOWN implicitly destroy
730 * the cma_id.
731 */
732 iser_cleanup_handler(cma_id, true);
733 if (iser_conn->state != ISER_CONN_DOWN) {
734 iser_conn->ib_conn.cma_id = NULL;
735 ret = 1;
736 }
737 break;
738 default:
739 iser_err("Unexpected RDMA CM event: %s (%d)\n",
740 rdma_event_msg(event->event), event->event);
741 break;
742 }
743 mutex_unlock(&iser_conn->state_mutex);
744
745 return ret;
746 }
747
iser_conn_init(struct iser_conn * iser_conn)748 void iser_conn_init(struct iser_conn *iser_conn)
749 {
750 struct ib_conn *ib_conn = &iser_conn->ib_conn;
751
752 iser_conn->state = ISER_CONN_INIT;
753 init_completion(&iser_conn->stop_completion);
754 init_completion(&iser_conn->ib_completion);
755 init_completion(&iser_conn->up_completion);
756 INIT_LIST_HEAD(&iser_conn->conn_list);
757 mutex_init(&iser_conn->state_mutex);
758
759 ib_conn->reg_cqe.done = iser_reg_comp;
760 }
761
762 /*
763 * starts the process of connecting to the target
764 * sleeps until the connection is established or rejected
765 */
iser_connect(struct iser_conn * iser_conn,struct sockaddr * src_addr,struct sockaddr * dst_addr,int non_blocking)766 int iser_connect(struct iser_conn *iser_conn, struct sockaddr *src_addr,
767 struct sockaddr *dst_addr, int non_blocking)
768 {
769 struct ib_conn *ib_conn = &iser_conn->ib_conn;
770 int err = 0;
771
772 mutex_lock(&iser_conn->state_mutex);
773
774 sprintf(iser_conn->name, "%pISp", dst_addr);
775
776 iser_info("connecting to: %s\n", iser_conn->name);
777
778 /* the device is known only --after-- address resolution */
779 ib_conn->device = NULL;
780
781 iser_conn->state = ISER_CONN_PENDING;
782
783 ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler,
784 iser_conn, RDMA_PS_TCP, IB_QPT_RC);
785 if (IS_ERR(ib_conn->cma_id)) {
786 err = PTR_ERR(ib_conn->cma_id);
787 iser_err("rdma_create_id failed: %d\n", err);
788 goto id_failure;
789 }
790
791 err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
792 if (err) {
793 iser_err("rdma_resolve_addr failed: %d\n", err);
794 goto addr_failure;
795 }
796
797 if (!non_blocking) {
798 wait_for_completion_interruptible(&iser_conn->up_completion);
799
800 if (iser_conn->state != ISER_CONN_UP) {
801 err = -EIO;
802 goto connect_failure;
803 }
804 }
805 mutex_unlock(&iser_conn->state_mutex);
806
807 mutex_lock(&ig.connlist_mutex);
808 list_add(&iser_conn->conn_list, &ig.connlist);
809 mutex_unlock(&ig.connlist_mutex);
810 return 0;
811
812 id_failure:
813 ib_conn->cma_id = NULL;
814 addr_failure:
815 iser_conn->state = ISER_CONN_DOWN;
816 connect_failure:
817 mutex_unlock(&iser_conn->state_mutex);
818 iser_conn_release(iser_conn);
819 return err;
820 }
821
iser_post_recvl(struct iser_conn * iser_conn)822 int iser_post_recvl(struct iser_conn *iser_conn)
823 {
824 struct ib_conn *ib_conn = &iser_conn->ib_conn;
825 struct iser_login_desc *desc = &iser_conn->login_desc;
826 struct ib_recv_wr wr;
827 int ret;
828
829 desc->sge.addr = desc->rsp_dma;
830 desc->sge.length = ISER_RX_LOGIN_SIZE;
831 desc->sge.lkey = ib_conn->device->pd->local_dma_lkey;
832
833 desc->cqe.done = iser_login_rsp;
834 wr.wr_cqe = &desc->cqe;
835 wr.sg_list = &desc->sge;
836 wr.num_sge = 1;
837 wr.next = NULL;
838
839 ret = ib_post_recv(ib_conn->qp, &wr, NULL);
840 if (unlikely(ret))
841 iser_err("ib_post_recv login failed ret=%d\n", ret);
842
843 return ret;
844 }
845
iser_post_recvm(struct iser_conn * iser_conn,struct iser_rx_desc * rx_desc)846 int iser_post_recvm(struct iser_conn *iser_conn, struct iser_rx_desc *rx_desc)
847 {
848 struct ib_conn *ib_conn = &iser_conn->ib_conn;
849 struct ib_recv_wr wr;
850 int ret;
851
852 rx_desc->cqe.done = iser_task_rsp;
853 wr.wr_cqe = &rx_desc->cqe;
854 wr.sg_list = &rx_desc->rx_sg;
855 wr.num_sge = 1;
856 wr.next = NULL;
857
858 ret = ib_post_recv(ib_conn->qp, &wr, NULL);
859 if (unlikely(ret))
860 iser_err("ib_post_recv failed ret=%d\n", ret);
861
862 return ret;
863 }
864
865
866 /**
867 * iser_post_send - Initiate a Send DTO operation
868 * @ib_conn: connection RDMA resources
869 * @tx_desc: iSER TX descriptor
870 *
871 * Return: 0 on success, -1 on failure
872 */
iser_post_send(struct ib_conn * ib_conn,struct iser_tx_desc * tx_desc)873 int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc)
874 {
875 struct ib_send_wr *wr = &tx_desc->send_wr;
876 struct ib_send_wr *first_wr;
877 int ret;
878
879 ib_dma_sync_single_for_device(ib_conn->device->ib_device,
880 tx_desc->dma_addr, ISER_HEADERS_LEN,
881 DMA_TO_DEVICE);
882
883 wr->next = NULL;
884 wr->wr_cqe = &tx_desc->cqe;
885 wr->sg_list = tx_desc->tx_sg;
886 wr->num_sge = tx_desc->num_sge;
887 wr->opcode = IB_WR_SEND;
888 wr->send_flags = IB_SEND_SIGNALED;
889
890 if (tx_desc->inv_wr.next)
891 first_wr = &tx_desc->inv_wr;
892 else if (tx_desc->reg_wr.wr.next)
893 first_wr = &tx_desc->reg_wr.wr;
894 else
895 first_wr = wr;
896
897 ret = ib_post_send(ib_conn->qp, first_wr, NULL);
898 if (unlikely(ret))
899 iser_err("ib_post_send failed, ret:%d opcode:%d\n",
900 ret, wr->opcode);
901
902 return ret;
903 }
904
iser_check_task_pi_status(struct iscsi_iser_task * iser_task,enum iser_data_dir cmd_dir,sector_t * sector)905 u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
906 enum iser_data_dir cmd_dir, sector_t *sector)
907 {
908 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
909 struct iser_fr_desc *desc = reg->desc;
910 unsigned long sector_size = iser_task->sc->device->sector_size;
911 struct ib_mr_status mr_status;
912 int ret;
913
914 if (desc && desc->sig_protected) {
915 desc->sig_protected = false;
916 ret = ib_check_mr_status(desc->rsc.sig_mr,
917 IB_MR_CHECK_SIG_STATUS, &mr_status);
918 if (ret) {
919 iser_err("ib_check_mr_status failed, ret %d\n", ret);
920 /* Not a lot we can do, return ambiguous guard error */
921 *sector = 0;
922 return 0x1;
923 }
924
925 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
926 sector_t sector_off = mr_status.sig_err.sig_err_offset;
927
928 sector_div(sector_off, sector_size + 8);
929 *sector = scsi_get_sector(iser_task->sc) + sector_off;
930
931 iser_err("PI error found type %d at sector %llx "
932 "expected %x vs actual %x\n",
933 mr_status.sig_err.err_type,
934 (unsigned long long)*sector,
935 mr_status.sig_err.expected,
936 mr_status.sig_err.actual);
937
938 switch (mr_status.sig_err.err_type) {
939 case IB_SIG_BAD_GUARD:
940 return 0x1;
941 case IB_SIG_BAD_REFTAG:
942 return 0x3;
943 case IB_SIG_BAD_APPTAG:
944 return 0x2;
945 }
946 }
947 }
948
949 return 0;
950 }
951
iser_err_comp(struct ib_wc * wc,const char * type)952 void iser_err_comp(struct ib_wc *wc, const char *type)
953 {
954 if (wc->status != IB_WC_WR_FLUSH_ERR) {
955 struct iser_conn *iser_conn = to_iser_conn(wc->qp->qp_context);
956
957 iser_err("%s failure: %s (%d) vend_err %#x\n", type,
958 ib_wc_status_msg(wc->status), wc->status,
959 wc->vendor_err);
960
961 if (iser_conn->iscsi_conn)
962 iscsi_conn_failure(iser_conn->iscsi_conn,
963 ISCSI_ERR_CONN_FAILED);
964 } else {
965 iser_dbg("%s failure: %s (%d)\n", type,
966 ib_wc_status_msg(wc->status), wc->status);
967 }
968 }
969