1 /*
2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
7 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
8 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 *
37 */
38 #include <linux/dma-mapping.h>
39 #include <linux/err.h>
40 #include <linux/idr.h>
41 #include <linux/interrupt.h>
42 #include <linux/rbtree.h>
43 #include <linux/sched.h>
44 #include <linux/spinlock.h>
45 #include <linux/workqueue.h>
46 #include <linux/completion.h>
47 #include <linux/slab.h>
48 #include <linux/module.h>
49 #include <linux/sysctl.h>
50
51 #include <rdma/iw_cm.h>
52 #include <rdma/ib_addr.h>
53 #include <rdma/iw_portmap.h>
54 #include <rdma/rdma_netlink.h>
55
56 #include "iwcm.h"
57
58 MODULE_AUTHOR("Tom Tucker");
59 MODULE_DESCRIPTION("iWARP CM");
60 MODULE_LICENSE("Dual BSD/GPL");
61
62 static const char * const iwcm_rej_reason_strs[] = {
63 [ECONNRESET] = "reset by remote host",
64 [ECONNREFUSED] = "refused by remote application",
65 [ETIMEDOUT] = "setup timeout",
66 };
67
iwcm_reject_msg(int reason)68 const char *__attribute_const__ iwcm_reject_msg(int reason)
69 {
70 size_t index;
71
72 /* iWARP uses negative errnos */
73 index = -reason;
74
75 if (index < ARRAY_SIZE(iwcm_rej_reason_strs) &&
76 iwcm_rej_reason_strs[index])
77 return iwcm_rej_reason_strs[index];
78 else
79 return "unrecognized reason";
80 }
81 EXPORT_SYMBOL(iwcm_reject_msg);
82
83 static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
84 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
85 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
86 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
87 [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
88 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
89 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
90 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb},
91 [RDMA_NL_IWPM_HELLO] = {.dump = iwpm_hello_cb}
92 };
93
94 static struct workqueue_struct *iwcm_wq;
95 struct iwcm_work {
96 struct work_struct work;
97 struct iwcm_id_private *cm_id;
98 struct list_head list;
99 struct iw_cm_event event;
100 struct list_head free_list;
101 };
102
103 static unsigned int default_backlog = 256;
104
105 static struct ctl_table_header *iwcm_ctl_table_hdr;
106 static struct ctl_table iwcm_ctl_table[] = {
107 {
108 .procname = "default_backlog",
109 .data = &default_backlog,
110 .maxlen = sizeof(default_backlog),
111 .mode = 0644,
112 .proc_handler = proc_dointvec,
113 },
114 { }
115 };
116
117 /*
118 * The following services provide a mechanism for pre-allocating iwcm_work
119 * elements. The design pre-allocates them based on the cm_id type:
120 * LISTENING IDS: Get enough elements preallocated to handle the
121 * listen backlog.
122 * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE
123 * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE
124 *
125 * Allocating them in connect and listen avoids having to deal
126 * with allocation failures on the event upcall from the provider (which
127 * is called in the interrupt context).
128 *
129 * One exception is when creating the cm_id for incoming connection requests.
130 * There are two cases:
131 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
132 * the backlog is exceeded, then no more connection request events will
133 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up
134 * to the provider to reject the connection request.
135 * 2) in the connection request workqueue handler, cm_conn_req_handler().
136 * If work elements cannot be allocated for the new connect request cm_id,
137 * then IWCM will call the provider reject method. This is ok since
138 * cm_conn_req_handler() runs in the workqueue thread context.
139 */
140
get_work(struct iwcm_id_private * cm_id_priv)141 static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
142 {
143 struct iwcm_work *work;
144
145 if (list_empty(&cm_id_priv->work_free_list))
146 return NULL;
147 work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work,
148 free_list);
149 list_del_init(&work->free_list);
150 return work;
151 }
152
put_work(struct iwcm_work * work)153 static void put_work(struct iwcm_work *work)
154 {
155 list_add(&work->free_list, &work->cm_id->work_free_list);
156 }
157
dealloc_work_entries(struct iwcm_id_private * cm_id_priv)158 static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
159 {
160 struct list_head *e, *tmp;
161
162 list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) {
163 list_del(e);
164 kfree(list_entry(e, struct iwcm_work, free_list));
165 }
166 }
167
alloc_work_entries(struct iwcm_id_private * cm_id_priv,int count)168 static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
169 {
170 struct iwcm_work *work;
171
172 BUG_ON(!list_empty(&cm_id_priv->work_free_list));
173 while (count--) {
174 work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL);
175 if (!work) {
176 dealloc_work_entries(cm_id_priv);
177 return -ENOMEM;
178 }
179 work->cm_id = cm_id_priv;
180 INIT_LIST_HEAD(&work->list);
181 put_work(work);
182 }
183 return 0;
184 }
185
186 /*
187 * Save private data from incoming connection requests to
188 * iw_cm_event, so the low level driver doesn't have to. Adjust
189 * the event ptr to point to the local copy.
190 */
copy_private_data(struct iw_cm_event * event)191 static int copy_private_data(struct iw_cm_event *event)
192 {
193 void *p;
194
195 p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
196 if (!p)
197 return -ENOMEM;
198 event->private_data = p;
199 return 0;
200 }
201
free_cm_id(struct iwcm_id_private * cm_id_priv)202 static void free_cm_id(struct iwcm_id_private *cm_id_priv)
203 {
204 dealloc_work_entries(cm_id_priv);
205 kfree(cm_id_priv);
206 }
207
208 /*
209 * Release a reference on cm_id. If the last reference is being
210 * released, free the cm_id and return 1.
211 */
iwcm_deref_id(struct iwcm_id_private * cm_id_priv)212 static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
213 {
214 if (refcount_dec_and_test(&cm_id_priv->refcount)) {
215 BUG_ON(!list_empty(&cm_id_priv->work_list));
216 free_cm_id(cm_id_priv);
217 return 1;
218 }
219
220 return 0;
221 }
222
add_ref(struct iw_cm_id * cm_id)223 static void add_ref(struct iw_cm_id *cm_id)
224 {
225 struct iwcm_id_private *cm_id_priv;
226 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
227 refcount_inc(&cm_id_priv->refcount);
228 }
229
rem_ref(struct iw_cm_id * cm_id)230 static void rem_ref(struct iw_cm_id *cm_id)
231 {
232 struct iwcm_id_private *cm_id_priv;
233
234 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
235
236 (void)iwcm_deref_id(cm_id_priv);
237 }
238
239 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
240
iw_create_cm_id(struct ib_device * device,iw_cm_handler cm_handler,void * context)241 struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
242 iw_cm_handler cm_handler,
243 void *context)
244 {
245 struct iwcm_id_private *cm_id_priv;
246
247 cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL);
248 if (!cm_id_priv)
249 return ERR_PTR(-ENOMEM);
250
251 cm_id_priv->state = IW_CM_STATE_IDLE;
252 cm_id_priv->id.device = device;
253 cm_id_priv->id.cm_handler = cm_handler;
254 cm_id_priv->id.context = context;
255 cm_id_priv->id.event_handler = cm_event_handler;
256 cm_id_priv->id.add_ref = add_ref;
257 cm_id_priv->id.rem_ref = rem_ref;
258 spin_lock_init(&cm_id_priv->lock);
259 refcount_set(&cm_id_priv->refcount, 1);
260 init_waitqueue_head(&cm_id_priv->connect_wait);
261 init_completion(&cm_id_priv->destroy_comp);
262 INIT_LIST_HEAD(&cm_id_priv->work_list);
263 INIT_LIST_HEAD(&cm_id_priv->work_free_list);
264
265 return &cm_id_priv->id;
266 }
267 EXPORT_SYMBOL(iw_create_cm_id);
268
269
iwcm_modify_qp_err(struct ib_qp * qp)270 static int iwcm_modify_qp_err(struct ib_qp *qp)
271 {
272 struct ib_qp_attr qp_attr;
273
274 if (!qp)
275 return -EINVAL;
276
277 qp_attr.qp_state = IB_QPS_ERR;
278 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
279 }
280
281 /*
282 * This is really the RDMAC CLOSING state. It is most similar to the
283 * IB SQD QP state.
284 */
iwcm_modify_qp_sqd(struct ib_qp * qp)285 static int iwcm_modify_qp_sqd(struct ib_qp *qp)
286 {
287 struct ib_qp_attr qp_attr;
288
289 BUG_ON(qp == NULL);
290 qp_attr.qp_state = IB_QPS_SQD;
291 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
292 }
293
294 /*
295 * CM_ID <-- CLOSING
296 *
297 * Block if a passive or active connection is currently being processed. Then
298 * process the event as follows:
299 * - If we are ESTABLISHED, move to CLOSING and modify the QP state
300 * based on the abrupt flag
301 * - If the connection is already in the CLOSING or IDLE state, the peer is
302 * disconnecting concurrently with us and we've already seen the
303 * DISCONNECT event -- ignore the request and return 0
304 * - Disconnect on a listening endpoint returns -EINVAL
305 */
iw_cm_disconnect(struct iw_cm_id * cm_id,int abrupt)306 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
307 {
308 struct iwcm_id_private *cm_id_priv;
309 unsigned long flags;
310 int ret = 0;
311 struct ib_qp *qp = NULL;
312
313 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
314 /* Wait if we're currently in a connect or accept downcall */
315 wait_event(cm_id_priv->connect_wait,
316 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
317
318 spin_lock_irqsave(&cm_id_priv->lock, flags);
319 switch (cm_id_priv->state) {
320 case IW_CM_STATE_ESTABLISHED:
321 cm_id_priv->state = IW_CM_STATE_CLOSING;
322
323 /* QP could be <nul> for user-mode client */
324 if (cm_id_priv->qp)
325 qp = cm_id_priv->qp;
326 else
327 ret = -EINVAL;
328 break;
329 case IW_CM_STATE_LISTEN:
330 ret = -EINVAL;
331 break;
332 case IW_CM_STATE_CLOSING:
333 /* remote peer closed first */
334 case IW_CM_STATE_IDLE:
335 /* accept or connect returned !0 */
336 break;
337 case IW_CM_STATE_CONN_RECV:
338 /*
339 * App called disconnect before/without calling accept after
340 * connect_request event delivered.
341 */
342 break;
343 case IW_CM_STATE_CONN_SENT:
344 /* Can only get here if wait above fails */
345 default:
346 BUG();
347 }
348 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
349
350 if (qp) {
351 if (abrupt)
352 ret = iwcm_modify_qp_err(qp);
353 else
354 ret = iwcm_modify_qp_sqd(qp);
355
356 /*
357 * If both sides are disconnecting the QP could
358 * already be in ERR or SQD states
359 */
360 ret = 0;
361 }
362
363 return ret;
364 }
365 EXPORT_SYMBOL(iw_cm_disconnect);
366
367 /*
368 * CM_ID <-- DESTROYING
369 *
370 * Clean up all resources associated with the connection and release
371 * the initial reference taken by iw_create_cm_id.
372 */
destroy_cm_id(struct iw_cm_id * cm_id)373 static void destroy_cm_id(struct iw_cm_id *cm_id)
374 {
375 struct iwcm_id_private *cm_id_priv;
376 struct ib_qp *qp;
377 unsigned long flags;
378
379 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
380 /*
381 * Wait if we're currently in a connect or accept downcall. A
382 * listening endpoint should never block here.
383 */
384 wait_event(cm_id_priv->connect_wait,
385 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
386
387 /*
388 * Since we're deleting the cm_id, drop any events that
389 * might arrive before the last dereference.
390 */
391 set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
392
393 spin_lock_irqsave(&cm_id_priv->lock, flags);
394 qp = cm_id_priv->qp;
395 cm_id_priv->qp = NULL;
396
397 switch (cm_id_priv->state) {
398 case IW_CM_STATE_LISTEN:
399 cm_id_priv->state = IW_CM_STATE_DESTROYING;
400 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
401 /* destroy the listening endpoint */
402 cm_id->device->ops.iw_destroy_listen(cm_id);
403 spin_lock_irqsave(&cm_id_priv->lock, flags);
404 break;
405 case IW_CM_STATE_ESTABLISHED:
406 cm_id_priv->state = IW_CM_STATE_DESTROYING;
407 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
408 /* Abrupt close of the connection */
409 (void)iwcm_modify_qp_err(qp);
410 spin_lock_irqsave(&cm_id_priv->lock, flags);
411 break;
412 case IW_CM_STATE_IDLE:
413 case IW_CM_STATE_CLOSING:
414 cm_id_priv->state = IW_CM_STATE_DESTROYING;
415 break;
416 case IW_CM_STATE_CONN_RECV:
417 /*
418 * App called destroy before/without calling accept after
419 * receiving connection request event notification or
420 * returned non zero from the event callback function.
421 * In either case, must tell the provider to reject.
422 */
423 cm_id_priv->state = IW_CM_STATE_DESTROYING;
424 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
425 cm_id->device->ops.iw_reject(cm_id, NULL, 0);
426 spin_lock_irqsave(&cm_id_priv->lock, flags);
427 break;
428 case IW_CM_STATE_CONN_SENT:
429 case IW_CM_STATE_DESTROYING:
430 default:
431 BUG();
432 break;
433 }
434 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
435 if (qp)
436 cm_id_priv->id.device->ops.iw_rem_ref(qp);
437
438 if (cm_id->mapped) {
439 iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
440 iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
441 }
442
443 (void)iwcm_deref_id(cm_id_priv);
444 }
445
446 /*
447 * This function is only called by the application thread and cannot
448 * be called by the event thread. The function will wait for all
449 * references to be released on the cm_id and then kfree the cm_id
450 * object.
451 */
iw_destroy_cm_id(struct iw_cm_id * cm_id)452 void iw_destroy_cm_id(struct iw_cm_id *cm_id)
453 {
454 destroy_cm_id(cm_id);
455 }
456 EXPORT_SYMBOL(iw_destroy_cm_id);
457
458 /**
459 * iw_cm_check_wildcard - If IP address is 0 then use original
460 * @pm_addr: sockaddr containing the ip to check for wildcard
461 * @cm_addr: sockaddr containing the actual IP address
462 * @cm_outaddr: sockaddr to set IP addr which leaving port
463 *
464 * Checks the pm_addr for wildcard and then sets cm_outaddr's
465 * IP to the actual (cm_addr).
466 */
iw_cm_check_wildcard(struct sockaddr_storage * pm_addr,struct sockaddr_storage * cm_addr,struct sockaddr_storage * cm_outaddr)467 static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
468 struct sockaddr_storage *cm_addr,
469 struct sockaddr_storage *cm_outaddr)
470 {
471 if (pm_addr->ss_family == AF_INET) {
472 struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr;
473
474 if (pm4_addr->sin_addr.s_addr == htonl(INADDR_ANY)) {
475 struct sockaddr_in *cm4_addr =
476 (struct sockaddr_in *)cm_addr;
477 struct sockaddr_in *cm4_outaddr =
478 (struct sockaddr_in *)cm_outaddr;
479
480 cm4_outaddr->sin_addr = cm4_addr->sin_addr;
481 }
482 } else {
483 struct sockaddr_in6 *pm6_addr = (struct sockaddr_in6 *)pm_addr;
484
485 if (ipv6_addr_type(&pm6_addr->sin6_addr) == IPV6_ADDR_ANY) {
486 struct sockaddr_in6 *cm6_addr =
487 (struct sockaddr_in6 *)cm_addr;
488 struct sockaddr_in6 *cm6_outaddr =
489 (struct sockaddr_in6 *)cm_outaddr;
490
491 cm6_outaddr->sin6_addr = cm6_addr->sin6_addr;
492 }
493 }
494 }
495
496 /**
497 * iw_cm_map - Use portmapper to map the ports
498 * @cm_id: connection manager pointer
499 * @active: Indicates the active side when true
500 * returns nonzero for error only if iwpm_create_mapinfo() fails
501 *
502 * Tries to add a mapping for a port using the Portmapper. If
503 * successful in mapping the IP/Port it will check the remote
504 * mapped IP address for a wildcard IP address and replace the
505 * zero IP address with the remote_addr.
506 */
iw_cm_map(struct iw_cm_id * cm_id,bool active)507 static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
508 {
509 const char *devname = dev_name(&cm_id->device->dev);
510 const char *ifname = cm_id->device->iw_ifname;
511 struct iwpm_dev_data pm_reg_msg = {};
512 struct iwpm_sa_data pm_msg;
513 int status;
514
515 if (strlen(devname) >= sizeof(pm_reg_msg.dev_name) ||
516 strlen(ifname) >= sizeof(pm_reg_msg.if_name))
517 return -EINVAL;
518
519 cm_id->m_local_addr = cm_id->local_addr;
520 cm_id->m_remote_addr = cm_id->remote_addr;
521
522 strcpy(pm_reg_msg.dev_name, devname);
523 strcpy(pm_reg_msg.if_name, ifname);
524
525 if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) ||
526 !iwpm_valid_pid())
527 return 0;
528
529 cm_id->mapped = true;
530 pm_msg.loc_addr = cm_id->local_addr;
531 pm_msg.rem_addr = cm_id->remote_addr;
532 pm_msg.flags = (cm_id->device->iw_driver_flags & IW_F_NO_PORT_MAP) ?
533 IWPM_FLAGS_NO_PORT_MAP : 0;
534 if (active)
535 status = iwpm_add_and_query_mapping(&pm_msg,
536 RDMA_NL_IWCM);
537 else
538 status = iwpm_add_mapping(&pm_msg, RDMA_NL_IWCM);
539
540 if (!status) {
541 cm_id->m_local_addr = pm_msg.mapped_loc_addr;
542 if (active) {
543 cm_id->m_remote_addr = pm_msg.mapped_rem_addr;
544 iw_cm_check_wildcard(&pm_msg.mapped_rem_addr,
545 &cm_id->remote_addr,
546 &cm_id->m_remote_addr);
547 }
548 }
549
550 return iwpm_create_mapinfo(&cm_id->local_addr,
551 &cm_id->m_local_addr,
552 RDMA_NL_IWCM, pm_msg.flags);
553 }
554
555 /*
556 * CM_ID <-- LISTEN
557 *
558 * Start listening for connect requests. Generates one CONNECT_REQUEST
559 * event for each inbound connect request.
560 */
iw_cm_listen(struct iw_cm_id * cm_id,int backlog)561 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
562 {
563 struct iwcm_id_private *cm_id_priv;
564 unsigned long flags;
565 int ret;
566
567 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
568
569 if (!backlog)
570 backlog = default_backlog;
571
572 ret = alloc_work_entries(cm_id_priv, backlog);
573 if (ret)
574 return ret;
575
576 spin_lock_irqsave(&cm_id_priv->lock, flags);
577 switch (cm_id_priv->state) {
578 case IW_CM_STATE_IDLE:
579 cm_id_priv->state = IW_CM_STATE_LISTEN;
580 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
581 ret = iw_cm_map(cm_id, false);
582 if (!ret)
583 ret = cm_id->device->ops.iw_create_listen(cm_id,
584 backlog);
585 if (ret)
586 cm_id_priv->state = IW_CM_STATE_IDLE;
587 spin_lock_irqsave(&cm_id_priv->lock, flags);
588 break;
589 default:
590 ret = -EINVAL;
591 }
592 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
593
594 return ret;
595 }
596 EXPORT_SYMBOL(iw_cm_listen);
597
598 /*
599 * CM_ID <-- IDLE
600 *
601 * Rejects an inbound connection request. No events are generated.
602 */
iw_cm_reject(struct iw_cm_id * cm_id,const void * private_data,u8 private_data_len)603 int iw_cm_reject(struct iw_cm_id *cm_id,
604 const void *private_data,
605 u8 private_data_len)
606 {
607 struct iwcm_id_private *cm_id_priv;
608 unsigned long flags;
609 int ret;
610
611 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
612 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
613
614 spin_lock_irqsave(&cm_id_priv->lock, flags);
615 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
616 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
617 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
618 wake_up_all(&cm_id_priv->connect_wait);
619 return -EINVAL;
620 }
621 cm_id_priv->state = IW_CM_STATE_IDLE;
622 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
623
624 ret = cm_id->device->ops.iw_reject(cm_id, private_data,
625 private_data_len);
626
627 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
628 wake_up_all(&cm_id_priv->connect_wait);
629
630 return ret;
631 }
632 EXPORT_SYMBOL(iw_cm_reject);
633
634 /*
635 * CM_ID <-- ESTABLISHED
636 *
637 * Accepts an inbound connection request and generates an ESTABLISHED
638 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block
639 * until the ESTABLISHED event is received from the provider.
640 */
iw_cm_accept(struct iw_cm_id * cm_id,struct iw_cm_conn_param * iw_param)641 int iw_cm_accept(struct iw_cm_id *cm_id,
642 struct iw_cm_conn_param *iw_param)
643 {
644 struct iwcm_id_private *cm_id_priv;
645 struct ib_qp *qp;
646 unsigned long flags;
647 int ret;
648
649 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
650 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
651
652 spin_lock_irqsave(&cm_id_priv->lock, flags);
653 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
654 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
655 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
656 wake_up_all(&cm_id_priv->connect_wait);
657 return -EINVAL;
658 }
659 /* Get the ib_qp given the QPN */
660 qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn);
661 if (!qp) {
662 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
663 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
664 wake_up_all(&cm_id_priv->connect_wait);
665 return -EINVAL;
666 }
667 cm_id->device->ops.iw_add_ref(qp);
668 cm_id_priv->qp = qp;
669 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
670
671 ret = cm_id->device->ops.iw_accept(cm_id, iw_param);
672 if (ret) {
673 /* An error on accept precludes provider events */
674 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
675 cm_id_priv->state = IW_CM_STATE_IDLE;
676 spin_lock_irqsave(&cm_id_priv->lock, flags);
677 qp = cm_id_priv->qp;
678 cm_id_priv->qp = NULL;
679 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
680 if (qp)
681 cm_id->device->ops.iw_rem_ref(qp);
682 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
683 wake_up_all(&cm_id_priv->connect_wait);
684 }
685
686 return ret;
687 }
688 EXPORT_SYMBOL(iw_cm_accept);
689
690 /*
691 * Active Side: CM_ID <-- CONN_SENT
692 *
693 * If successful, results in the generation of a CONNECT_REPLY
694 * event. iw_cm_disconnect and iw_cm_destroy will block until the
695 * CONNECT_REPLY event is received from the provider.
696 */
iw_cm_connect(struct iw_cm_id * cm_id,struct iw_cm_conn_param * iw_param)697 int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
698 {
699 struct iwcm_id_private *cm_id_priv;
700 int ret;
701 unsigned long flags;
702 struct ib_qp *qp = NULL;
703
704 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
705
706 ret = alloc_work_entries(cm_id_priv, 4);
707 if (ret)
708 return ret;
709
710 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
711 spin_lock_irqsave(&cm_id_priv->lock, flags);
712
713 if (cm_id_priv->state != IW_CM_STATE_IDLE) {
714 ret = -EINVAL;
715 goto err;
716 }
717
718 /* Get the ib_qp given the QPN */
719 qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn);
720 if (!qp) {
721 ret = -EINVAL;
722 goto err;
723 }
724 cm_id->device->ops.iw_add_ref(qp);
725 cm_id_priv->qp = qp;
726 cm_id_priv->state = IW_CM_STATE_CONN_SENT;
727 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
728
729 ret = iw_cm_map(cm_id, true);
730 if (!ret)
731 ret = cm_id->device->ops.iw_connect(cm_id, iw_param);
732 if (!ret)
733 return 0; /* success */
734
735 spin_lock_irqsave(&cm_id_priv->lock, flags);
736 qp = cm_id_priv->qp;
737 cm_id_priv->qp = NULL;
738 cm_id_priv->state = IW_CM_STATE_IDLE;
739 err:
740 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
741 if (qp)
742 cm_id->device->ops.iw_rem_ref(qp);
743 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
744 wake_up_all(&cm_id_priv->connect_wait);
745 return ret;
746 }
747 EXPORT_SYMBOL(iw_cm_connect);
748
749 /*
750 * Passive Side: new CM_ID <-- CONN_RECV
751 *
752 * Handles an inbound connect request. The function creates a new
753 * iw_cm_id to represent the new connection and inherits the client
754 * callback function and other attributes from the listening parent.
755 *
756 * The work item contains a pointer to the listen_cm_id and the event. The
757 * listen_cm_id contains the client cm_handler, context and
758 * device. These are copied when the device is cloned. The event
759 * contains the new four tuple.
760 *
761 * An error on the child should not affect the parent, so this
762 * function does not return a value.
763 */
cm_conn_req_handler(struct iwcm_id_private * listen_id_priv,struct iw_cm_event * iw_event)764 static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
765 struct iw_cm_event *iw_event)
766 {
767 unsigned long flags;
768 struct iw_cm_id *cm_id;
769 struct iwcm_id_private *cm_id_priv;
770 int ret;
771
772 /*
773 * The provider should never generate a connection request
774 * event with a bad status.
775 */
776 BUG_ON(iw_event->status);
777
778 cm_id = iw_create_cm_id(listen_id_priv->id.device,
779 listen_id_priv->id.cm_handler,
780 listen_id_priv->id.context);
781 /* If the cm_id could not be created, ignore the request */
782 if (IS_ERR(cm_id))
783 goto out;
784
785 cm_id->provider_data = iw_event->provider_data;
786 cm_id->m_local_addr = iw_event->local_addr;
787 cm_id->m_remote_addr = iw_event->remote_addr;
788 cm_id->local_addr = listen_id_priv->id.local_addr;
789
790 ret = iwpm_get_remote_info(&listen_id_priv->id.m_local_addr,
791 &iw_event->remote_addr,
792 &cm_id->remote_addr,
793 RDMA_NL_IWCM);
794 if (ret) {
795 cm_id->remote_addr = iw_event->remote_addr;
796 } else {
797 iw_cm_check_wildcard(&listen_id_priv->id.m_local_addr,
798 &iw_event->local_addr,
799 &cm_id->local_addr);
800 iw_event->local_addr = cm_id->local_addr;
801 iw_event->remote_addr = cm_id->remote_addr;
802 }
803
804 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
805 cm_id_priv->state = IW_CM_STATE_CONN_RECV;
806
807 /*
808 * We could be destroying the listening id. If so, ignore this
809 * upcall.
810 */
811 spin_lock_irqsave(&listen_id_priv->lock, flags);
812 if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
813 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
814 iw_cm_reject(cm_id, NULL, 0);
815 iw_destroy_cm_id(cm_id);
816 goto out;
817 }
818 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
819
820 ret = alloc_work_entries(cm_id_priv, 3);
821 if (ret) {
822 iw_cm_reject(cm_id, NULL, 0);
823 iw_destroy_cm_id(cm_id);
824 goto out;
825 }
826
827 /* Call the client CM handler */
828 ret = cm_id->cm_handler(cm_id, iw_event);
829 if (ret) {
830 iw_cm_reject(cm_id, NULL, 0);
831 iw_destroy_cm_id(cm_id);
832 }
833
834 out:
835 if (iw_event->private_data_len)
836 kfree(iw_event->private_data);
837 }
838
839 /*
840 * Passive Side: CM_ID <-- ESTABLISHED
841 *
842 * The provider generated an ESTABLISHED event which means that
843 * the MPA negotion has completed successfully and we are now in MPA
844 * FPDU mode.
845 *
846 * This event can only be received in the CONN_RECV state. If the
847 * remote peer closed, the ESTABLISHED event would be received followed
848 * by the CLOSE event. If the app closes, it will block until we wake
849 * it up after processing this event.
850 */
cm_conn_est_handler(struct iwcm_id_private * cm_id_priv,struct iw_cm_event * iw_event)851 static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
852 struct iw_cm_event *iw_event)
853 {
854 unsigned long flags;
855 int ret;
856
857 spin_lock_irqsave(&cm_id_priv->lock, flags);
858
859 /*
860 * We clear the CONNECT_WAIT bit here to allow the callback
861 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id
862 * from a callback handler is not allowed.
863 */
864 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
865 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
866 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
867 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
868 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
869 wake_up_all(&cm_id_priv->connect_wait);
870
871 return ret;
872 }
873
874 /*
875 * Active Side: CM_ID <-- ESTABLISHED
876 *
877 * The app has called connect and is waiting for the established event to
878 * post it's requests to the server. This event will wake up anyone
879 * blocked in iw_cm_disconnect or iw_destroy_id.
880 */
cm_conn_rep_handler(struct iwcm_id_private * cm_id_priv,struct iw_cm_event * iw_event)881 static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
882 struct iw_cm_event *iw_event)
883 {
884 struct ib_qp *qp = NULL;
885 unsigned long flags;
886 int ret;
887
888 spin_lock_irqsave(&cm_id_priv->lock, flags);
889 /*
890 * Clear the connect wait bit so a callback function calling
891 * iw_cm_disconnect will not wait and deadlock this thread
892 */
893 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
894 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
895 if (iw_event->status == 0) {
896 cm_id_priv->id.m_local_addr = iw_event->local_addr;
897 cm_id_priv->id.m_remote_addr = iw_event->remote_addr;
898 iw_event->local_addr = cm_id_priv->id.local_addr;
899 iw_event->remote_addr = cm_id_priv->id.remote_addr;
900 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
901 } else {
902 /* REJECTED or RESET */
903 qp = cm_id_priv->qp;
904 cm_id_priv->qp = NULL;
905 cm_id_priv->state = IW_CM_STATE_IDLE;
906 }
907 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
908 if (qp)
909 cm_id_priv->id.device->ops.iw_rem_ref(qp);
910 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
911
912 if (iw_event->private_data_len)
913 kfree(iw_event->private_data);
914
915 /* Wake up waiters on connect complete */
916 wake_up_all(&cm_id_priv->connect_wait);
917
918 return ret;
919 }
920
921 /*
922 * CM_ID <-- CLOSING
923 *
924 * If in the ESTABLISHED state, move to CLOSING.
925 */
cm_disconnect_handler(struct iwcm_id_private * cm_id_priv,struct iw_cm_event * iw_event)926 static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
927 struct iw_cm_event *iw_event)
928 {
929 unsigned long flags;
930
931 spin_lock_irqsave(&cm_id_priv->lock, flags);
932 if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED)
933 cm_id_priv->state = IW_CM_STATE_CLOSING;
934 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
935 }
936
937 /*
938 * CM_ID <-- IDLE
939 *
940 * If in the ESTBLISHED or CLOSING states, the QP will have have been
941 * moved by the provider to the ERR state. Disassociate the CM_ID from
942 * the QP, move to IDLE, and remove the 'connected' reference.
943 *
944 * If in some other state, the cm_id was destroyed asynchronously.
945 * This is the last reference that will result in waking up
946 * the app thread blocked in iw_destroy_cm_id.
947 */
cm_close_handler(struct iwcm_id_private * cm_id_priv,struct iw_cm_event * iw_event)948 static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
949 struct iw_cm_event *iw_event)
950 {
951 struct ib_qp *qp;
952 unsigned long flags;
953 int ret = 0, notify_event = 0;
954 spin_lock_irqsave(&cm_id_priv->lock, flags);
955 qp = cm_id_priv->qp;
956 cm_id_priv->qp = NULL;
957
958 switch (cm_id_priv->state) {
959 case IW_CM_STATE_ESTABLISHED:
960 case IW_CM_STATE_CLOSING:
961 cm_id_priv->state = IW_CM_STATE_IDLE;
962 notify_event = 1;
963 break;
964 case IW_CM_STATE_DESTROYING:
965 break;
966 default:
967 BUG();
968 }
969 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
970
971 if (qp)
972 cm_id_priv->id.device->ops.iw_rem_ref(qp);
973 if (notify_event)
974 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
975 return ret;
976 }
977
process_event(struct iwcm_id_private * cm_id_priv,struct iw_cm_event * iw_event)978 static int process_event(struct iwcm_id_private *cm_id_priv,
979 struct iw_cm_event *iw_event)
980 {
981 int ret = 0;
982
983 switch (iw_event->event) {
984 case IW_CM_EVENT_CONNECT_REQUEST:
985 cm_conn_req_handler(cm_id_priv, iw_event);
986 break;
987 case IW_CM_EVENT_CONNECT_REPLY:
988 ret = cm_conn_rep_handler(cm_id_priv, iw_event);
989 break;
990 case IW_CM_EVENT_ESTABLISHED:
991 ret = cm_conn_est_handler(cm_id_priv, iw_event);
992 break;
993 case IW_CM_EVENT_DISCONNECT:
994 cm_disconnect_handler(cm_id_priv, iw_event);
995 break;
996 case IW_CM_EVENT_CLOSE:
997 ret = cm_close_handler(cm_id_priv, iw_event);
998 break;
999 default:
1000 BUG();
1001 }
1002
1003 return ret;
1004 }
1005
1006 /*
1007 * Process events on the work_list for the cm_id. If the callback
1008 * function requests that the cm_id be deleted, a flag is set in the
1009 * cm_id flags to indicate that when the last reference is
1010 * removed, the cm_id is to be destroyed. This is necessary to
1011 * distinguish between an object that will be destroyed by the app
1012 * thread asleep on the destroy_comp list vs. an object destroyed
1013 * here synchronously when the last reference is removed.
1014 */
cm_work_handler(struct work_struct * _work)1015 static void cm_work_handler(struct work_struct *_work)
1016 {
1017 struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
1018 struct iw_cm_event levent;
1019 struct iwcm_id_private *cm_id_priv = work->cm_id;
1020 unsigned long flags;
1021 int empty;
1022 int ret = 0;
1023
1024 spin_lock_irqsave(&cm_id_priv->lock, flags);
1025 empty = list_empty(&cm_id_priv->work_list);
1026 while (!empty) {
1027 work = list_entry(cm_id_priv->work_list.next,
1028 struct iwcm_work, list);
1029 list_del_init(&work->list);
1030 empty = list_empty(&cm_id_priv->work_list);
1031 levent = work->event;
1032 put_work(work);
1033 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1034
1035 if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
1036 ret = process_event(cm_id_priv, &levent);
1037 if (ret)
1038 destroy_cm_id(&cm_id_priv->id);
1039 } else
1040 pr_debug("dropping event %d\n", levent.event);
1041 if (iwcm_deref_id(cm_id_priv))
1042 return;
1043 if (empty)
1044 return;
1045 spin_lock_irqsave(&cm_id_priv->lock, flags);
1046 }
1047 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1048 }
1049
1050 /*
1051 * This function is called on interrupt context. Schedule events on
1052 * the iwcm_wq thread to allow callback functions to downcall into
1053 * the CM and/or block. Events are queued to a per-CM_ID
1054 * work_list. If this is the first event on the work_list, the work
1055 * element is also queued on the iwcm_wq thread.
1056 *
1057 * Each event holds a reference on the cm_id. Until the last posted
1058 * event has been delivered and processed, the cm_id cannot be
1059 * deleted.
1060 *
1061 * Returns:
1062 * 0 - the event was handled.
1063 * -ENOMEM - the event was not handled due to lack of resources.
1064 */
cm_event_handler(struct iw_cm_id * cm_id,struct iw_cm_event * iw_event)1065 static int cm_event_handler(struct iw_cm_id *cm_id,
1066 struct iw_cm_event *iw_event)
1067 {
1068 struct iwcm_work *work;
1069 struct iwcm_id_private *cm_id_priv;
1070 unsigned long flags;
1071 int ret = 0;
1072
1073 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1074
1075 spin_lock_irqsave(&cm_id_priv->lock, flags);
1076 work = get_work(cm_id_priv);
1077 if (!work) {
1078 ret = -ENOMEM;
1079 goto out;
1080 }
1081
1082 INIT_WORK(&work->work, cm_work_handler);
1083 work->cm_id = cm_id_priv;
1084 work->event = *iw_event;
1085
1086 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
1087 work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
1088 work->event.private_data_len) {
1089 ret = copy_private_data(&work->event);
1090 if (ret) {
1091 put_work(work);
1092 goto out;
1093 }
1094 }
1095
1096 refcount_inc(&cm_id_priv->refcount);
1097 if (list_empty(&cm_id_priv->work_list)) {
1098 list_add_tail(&work->list, &cm_id_priv->work_list);
1099 queue_work(iwcm_wq, &work->work);
1100 } else
1101 list_add_tail(&work->list, &cm_id_priv->work_list);
1102 out:
1103 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1104 return ret;
1105 }
1106
iwcm_init_qp_init_attr(struct iwcm_id_private * cm_id_priv,struct ib_qp_attr * qp_attr,int * qp_attr_mask)1107 static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
1108 struct ib_qp_attr *qp_attr,
1109 int *qp_attr_mask)
1110 {
1111 unsigned long flags;
1112 int ret;
1113
1114 spin_lock_irqsave(&cm_id_priv->lock, flags);
1115 switch (cm_id_priv->state) {
1116 case IW_CM_STATE_IDLE:
1117 case IW_CM_STATE_CONN_SENT:
1118 case IW_CM_STATE_CONN_RECV:
1119 case IW_CM_STATE_ESTABLISHED:
1120 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
1121 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE|
1122 IB_ACCESS_REMOTE_READ;
1123 ret = 0;
1124 break;
1125 default:
1126 ret = -EINVAL;
1127 break;
1128 }
1129 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1130 return ret;
1131 }
1132
iwcm_init_qp_rts_attr(struct iwcm_id_private * cm_id_priv,struct ib_qp_attr * qp_attr,int * qp_attr_mask)1133 static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,
1134 struct ib_qp_attr *qp_attr,
1135 int *qp_attr_mask)
1136 {
1137 unsigned long flags;
1138 int ret;
1139
1140 spin_lock_irqsave(&cm_id_priv->lock, flags);
1141 switch (cm_id_priv->state) {
1142 case IW_CM_STATE_IDLE:
1143 case IW_CM_STATE_CONN_SENT:
1144 case IW_CM_STATE_CONN_RECV:
1145 case IW_CM_STATE_ESTABLISHED:
1146 *qp_attr_mask = 0;
1147 ret = 0;
1148 break;
1149 default:
1150 ret = -EINVAL;
1151 break;
1152 }
1153 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1154 return ret;
1155 }
1156
iw_cm_init_qp_attr(struct iw_cm_id * cm_id,struct ib_qp_attr * qp_attr,int * qp_attr_mask)1157 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
1158 struct ib_qp_attr *qp_attr,
1159 int *qp_attr_mask)
1160 {
1161 struct iwcm_id_private *cm_id_priv;
1162 int ret;
1163
1164 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1165 switch (qp_attr->qp_state) {
1166 case IB_QPS_INIT:
1167 case IB_QPS_RTR:
1168 ret = iwcm_init_qp_init_attr(cm_id_priv,
1169 qp_attr, qp_attr_mask);
1170 break;
1171 case IB_QPS_RTS:
1172 ret = iwcm_init_qp_rts_attr(cm_id_priv,
1173 qp_attr, qp_attr_mask);
1174 break;
1175 default:
1176 ret = -EINVAL;
1177 break;
1178 }
1179 return ret;
1180 }
1181 EXPORT_SYMBOL(iw_cm_init_qp_attr);
1182
iw_cm_init(void)1183 static int __init iw_cm_init(void)
1184 {
1185 int ret;
1186
1187 ret = iwpm_init(RDMA_NL_IWCM);
1188 if (ret)
1189 return ret;
1190
1191 iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
1192 if (!iwcm_wq)
1193 goto err_alloc;
1194
1195 iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
1196 iwcm_ctl_table);
1197 if (!iwcm_ctl_table_hdr) {
1198 pr_err("iw_cm: couldn't register sysctl paths\n");
1199 goto err_sysctl;
1200 }
1201
1202 rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
1203 return 0;
1204
1205 err_sysctl:
1206 destroy_workqueue(iwcm_wq);
1207 err_alloc:
1208 iwpm_exit(RDMA_NL_IWCM);
1209 return -ENOMEM;
1210 }
1211
iw_cm_cleanup(void)1212 static void __exit iw_cm_cleanup(void)
1213 {
1214 rdma_nl_unregister(RDMA_NL_IWCM);
1215 unregister_net_sysctl_table(iwcm_ctl_table_hdr);
1216 destroy_workqueue(iwcm_wq);
1217 iwpm_exit(RDMA_NL_IWCM);
1218 }
1219
1220 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_IWCM, 2);
1221
1222 module_init(iw_cm_init);
1223 module_exit(iw_cm_cleanup);
1224