1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2 
3 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4 /*          Fredy Neeser */
5 /*          Greg Joyce <greg@opengridcomputing.com> */
6 /* Copyright (c) 2008-2019, IBM Corporation */
7 /* Copyright (c) 2017, Open Grid Computing, Inc. */
8 
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/net.h>
12 #include <linux/inetdevice.h>
13 #include <net/addrconf.h>
14 #include <linux/workqueue.h>
15 #include <net/sock.h>
16 #include <net/tcp.h>
17 #include <linux/inet.h>
18 #include <linux/tcp.h>
19 #include <trace/events/sock.h>
20 
21 #include <rdma/iw_cm.h>
22 #include <rdma/ib_verbs.h>
23 #include <rdma/ib_user_verbs.h>
24 
25 #include "siw.h"
26 #include "siw_cm.h"
27 
28 /*
29  * Set to any combination of
30  * MPA_V2_RDMA_NO_RTR, MPA_V2_RDMA_READ_RTR, MPA_V2_RDMA_WRITE_RTR
31  */
32 static __be16 rtr_type = MPA_V2_RDMA_READ_RTR | MPA_V2_RDMA_WRITE_RTR;
33 static const bool relaxed_ird_negotiation = true;
34 
35 static void siw_cm_llp_state_change(struct sock *s);
36 static void siw_cm_llp_data_ready(struct sock *s);
37 static void siw_cm_llp_write_space(struct sock *s);
38 static void siw_cm_llp_error_report(struct sock *s);
39 static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
40 			 int status);
41 
siw_sk_assign_cm_upcalls(struct sock * sk)42 static void siw_sk_assign_cm_upcalls(struct sock *sk)
43 {
44 	write_lock_bh(&sk->sk_callback_lock);
45 	sk->sk_state_change = siw_cm_llp_state_change;
46 	sk->sk_data_ready = siw_cm_llp_data_ready;
47 	sk->sk_write_space = siw_cm_llp_write_space;
48 	sk->sk_error_report = siw_cm_llp_error_report;
49 	write_unlock_bh(&sk->sk_callback_lock);
50 }
51 
siw_sk_save_upcalls(struct sock * sk)52 static void siw_sk_save_upcalls(struct sock *sk)
53 {
54 	struct siw_cep *cep = sk_to_cep(sk);
55 
56 	write_lock_bh(&sk->sk_callback_lock);
57 	cep->sk_state_change = sk->sk_state_change;
58 	cep->sk_data_ready = sk->sk_data_ready;
59 	cep->sk_write_space = sk->sk_write_space;
60 	cep->sk_error_report = sk->sk_error_report;
61 	write_unlock_bh(&sk->sk_callback_lock);
62 }
63 
siw_sk_restore_upcalls(struct sock * sk,struct siw_cep * cep)64 static void siw_sk_restore_upcalls(struct sock *sk, struct siw_cep *cep)
65 {
66 	sk->sk_state_change = cep->sk_state_change;
67 	sk->sk_data_ready = cep->sk_data_ready;
68 	sk->sk_write_space = cep->sk_write_space;
69 	sk->sk_error_report = cep->sk_error_report;
70 	sk->sk_user_data = NULL;
71 }
72 
siw_qp_socket_assoc(struct siw_cep * cep,struct siw_qp * qp)73 static void siw_qp_socket_assoc(struct siw_cep *cep, struct siw_qp *qp)
74 {
75 	struct socket *s = cep->sock;
76 	struct sock *sk = s->sk;
77 
78 	write_lock_bh(&sk->sk_callback_lock);
79 
80 	qp->attrs.sk = s;
81 	sk->sk_data_ready = siw_qp_llp_data_ready;
82 	sk->sk_write_space = siw_qp_llp_write_space;
83 
84 	write_unlock_bh(&sk->sk_callback_lock);
85 }
86 
siw_socket_disassoc(struct socket * s)87 static void siw_socket_disassoc(struct socket *s)
88 {
89 	struct sock *sk = s->sk;
90 	struct siw_cep *cep;
91 
92 	if (sk) {
93 		write_lock_bh(&sk->sk_callback_lock);
94 		cep = sk_to_cep(sk);
95 		if (cep) {
96 			siw_sk_restore_upcalls(sk, cep);
97 			siw_cep_put(cep);
98 		} else {
99 			pr_warn("siw: cannot restore sk callbacks: no ep\n");
100 		}
101 		write_unlock_bh(&sk->sk_callback_lock);
102 	} else {
103 		pr_warn("siw: cannot restore sk callbacks: no sk\n");
104 	}
105 }
106 
siw_rtr_data_ready(struct sock * sk)107 static void siw_rtr_data_ready(struct sock *sk)
108 {
109 	struct siw_cep *cep;
110 	struct siw_qp *qp = NULL;
111 	read_descriptor_t rd_desc;
112 
113 	trace_sk_data_ready(sk);
114 
115 	read_lock(&sk->sk_callback_lock);
116 
117 	cep = sk_to_cep(sk);
118 	if (!cep) {
119 		WARN(1, "No connection endpoint\n");
120 		goto out;
121 	}
122 	qp = sk_to_qp(sk);
123 
124 	memset(&rd_desc, 0, sizeof(rd_desc));
125 	rd_desc.arg.data = qp;
126 	rd_desc.count = 1;
127 
128 	tcp_read_sock(sk, &rd_desc, siw_tcp_rx_data);
129 	/*
130 	 * Check if first frame was successfully processed.
131 	 * Signal connection full establishment if yes.
132 	 * Failed data processing would have already scheduled
133 	 * connection drop.
134 	 */
135 	if (!qp->rx_stream.rx_suspend)
136 		siw_cm_upcall(cep, IW_CM_EVENT_ESTABLISHED, 0);
137 out:
138 	read_unlock(&sk->sk_callback_lock);
139 	if (qp)
140 		siw_qp_socket_assoc(cep, qp);
141 }
142 
siw_sk_assign_rtr_upcalls(struct siw_cep * cep)143 static void siw_sk_assign_rtr_upcalls(struct siw_cep *cep)
144 {
145 	struct sock *sk = cep->sock->sk;
146 
147 	write_lock_bh(&sk->sk_callback_lock);
148 	sk->sk_data_ready = siw_rtr_data_ready;
149 	sk->sk_write_space = siw_qp_llp_write_space;
150 	write_unlock_bh(&sk->sk_callback_lock);
151 }
152 
siw_cep_socket_assoc(struct siw_cep * cep,struct socket * s)153 static void siw_cep_socket_assoc(struct siw_cep *cep, struct socket *s)
154 {
155 	cep->sock = s;
156 	siw_cep_get(cep);
157 	s->sk->sk_user_data = cep;
158 
159 	siw_sk_save_upcalls(s->sk);
160 	siw_sk_assign_cm_upcalls(s->sk);
161 }
162 
siw_cep_alloc(struct siw_device * sdev)163 static struct siw_cep *siw_cep_alloc(struct siw_device *sdev)
164 {
165 	struct siw_cep *cep = kzalloc(sizeof(*cep), GFP_KERNEL);
166 	unsigned long flags;
167 
168 	if (!cep)
169 		return NULL;
170 
171 	INIT_LIST_HEAD(&cep->listenq);
172 	INIT_LIST_HEAD(&cep->devq);
173 	INIT_LIST_HEAD(&cep->work_freelist);
174 
175 	kref_init(&cep->ref);
176 	cep->state = SIW_EPSTATE_IDLE;
177 	init_waitqueue_head(&cep->waitq);
178 	spin_lock_init(&cep->lock);
179 	cep->sdev = sdev;
180 	cep->enhanced_rdma_conn_est = false;
181 
182 	spin_lock_irqsave(&sdev->lock, flags);
183 	list_add_tail(&cep->devq, &sdev->cep_list);
184 	spin_unlock_irqrestore(&sdev->lock, flags);
185 
186 	siw_dbg_cep(cep, "new endpoint\n");
187 	return cep;
188 }
189 
siw_cm_free_work(struct siw_cep * cep)190 static void siw_cm_free_work(struct siw_cep *cep)
191 {
192 	struct list_head *w, *tmp;
193 	struct siw_cm_work *work;
194 
195 	list_for_each_safe(w, tmp, &cep->work_freelist) {
196 		work = list_entry(w, struct siw_cm_work, list);
197 		list_del(&work->list);
198 		kfree(work);
199 	}
200 }
201 
siw_cancel_mpatimer(struct siw_cep * cep)202 static void siw_cancel_mpatimer(struct siw_cep *cep)
203 {
204 	spin_lock_bh(&cep->lock);
205 	if (cep->mpa_timer) {
206 		if (cancel_delayed_work(&cep->mpa_timer->work)) {
207 			siw_cep_put(cep);
208 			kfree(cep->mpa_timer); /* not needed again */
209 		}
210 		cep->mpa_timer = NULL;
211 	}
212 	spin_unlock_bh(&cep->lock);
213 }
214 
siw_put_work(struct siw_cm_work * work)215 static void siw_put_work(struct siw_cm_work *work)
216 {
217 	INIT_LIST_HEAD(&work->list);
218 	spin_lock_bh(&work->cep->lock);
219 	list_add(&work->list, &work->cep->work_freelist);
220 	spin_unlock_bh(&work->cep->lock);
221 }
222 
siw_cep_set_inuse(struct siw_cep * cep)223 static void siw_cep_set_inuse(struct siw_cep *cep)
224 {
225 	unsigned long flags;
226 retry:
227 	spin_lock_irqsave(&cep->lock, flags);
228 
229 	if (cep->in_use) {
230 		spin_unlock_irqrestore(&cep->lock, flags);
231 		wait_event_interruptible(cep->waitq, !cep->in_use);
232 		if (signal_pending(current))
233 			flush_signals(current);
234 		goto retry;
235 	} else {
236 		cep->in_use = 1;
237 		spin_unlock_irqrestore(&cep->lock, flags);
238 	}
239 }
240 
siw_cep_set_free(struct siw_cep * cep)241 static void siw_cep_set_free(struct siw_cep *cep)
242 {
243 	unsigned long flags;
244 
245 	spin_lock_irqsave(&cep->lock, flags);
246 	cep->in_use = 0;
247 	spin_unlock_irqrestore(&cep->lock, flags);
248 
249 	wake_up(&cep->waitq);
250 }
251 
__siw_cep_dealloc(struct kref * ref)252 static void __siw_cep_dealloc(struct kref *ref)
253 {
254 	struct siw_cep *cep = container_of(ref, struct siw_cep, ref);
255 	struct siw_device *sdev = cep->sdev;
256 	unsigned long flags;
257 
258 	WARN_ON(cep->listen_cep);
259 
260 	/* kfree(NULL) is safe */
261 	kfree(cep->mpa.pdata);
262 	spin_lock_bh(&cep->lock);
263 	if (!list_empty(&cep->work_freelist))
264 		siw_cm_free_work(cep);
265 	spin_unlock_bh(&cep->lock);
266 
267 	spin_lock_irqsave(&sdev->lock, flags);
268 	list_del(&cep->devq);
269 	spin_unlock_irqrestore(&sdev->lock, flags);
270 
271 	siw_dbg_cep(cep, "free endpoint\n");
272 	kfree(cep);
273 }
274 
siw_get_work(struct siw_cep * cep)275 static struct siw_cm_work *siw_get_work(struct siw_cep *cep)
276 {
277 	struct siw_cm_work *work = NULL;
278 
279 	spin_lock_bh(&cep->lock);
280 	if (!list_empty(&cep->work_freelist)) {
281 		work = list_entry(cep->work_freelist.next, struct siw_cm_work,
282 				  list);
283 		list_del_init(&work->list);
284 	}
285 	spin_unlock_bh(&cep->lock);
286 	return work;
287 }
288 
siw_cm_alloc_work(struct siw_cep * cep,int num)289 static int siw_cm_alloc_work(struct siw_cep *cep, int num)
290 {
291 	struct siw_cm_work *work;
292 
293 	while (num--) {
294 		work = kmalloc(sizeof(*work), GFP_KERNEL);
295 		if (!work) {
296 			if (!(list_empty(&cep->work_freelist)))
297 				siw_cm_free_work(cep);
298 			return -ENOMEM;
299 		}
300 		work->cep = cep;
301 		INIT_LIST_HEAD(&work->list);
302 		list_add(&work->list, &cep->work_freelist);
303 	}
304 	return 0;
305 }
306 
307 /*
308  * siw_cm_upcall()
309  *
310  * Upcall to IWCM to inform about async connection events
311  */
siw_cm_upcall(struct siw_cep * cep,enum iw_cm_event_type reason,int status)312 static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
313 			 int status)
314 {
315 	struct iw_cm_event event;
316 	struct iw_cm_id *id;
317 
318 	memset(&event, 0, sizeof(event));
319 	event.status = status;
320 	event.event = reason;
321 
322 	if (reason == IW_CM_EVENT_CONNECT_REQUEST) {
323 		event.provider_data = cep;
324 		id = cep->listen_cep->cm_id;
325 	} else {
326 		id = cep->cm_id;
327 	}
328 	/* Signal IRD and ORD */
329 	if (reason == IW_CM_EVENT_ESTABLISHED ||
330 	    reason == IW_CM_EVENT_CONNECT_REPLY) {
331 		/* Signal negotiated IRD/ORD values we will use */
332 		event.ird = cep->ird;
333 		event.ord = cep->ord;
334 	} else if (reason == IW_CM_EVENT_CONNECT_REQUEST) {
335 		event.ird = cep->ord;
336 		event.ord = cep->ird;
337 	}
338 	/* Signal private data and address information */
339 	if (reason == IW_CM_EVENT_CONNECT_REQUEST ||
340 	    reason == IW_CM_EVENT_CONNECT_REPLY) {
341 		u16 pd_len = be16_to_cpu(cep->mpa.hdr.params.pd_len);
342 
343 		if (pd_len) {
344 			/*
345 			 * hand over MPA private data
346 			 */
347 			event.private_data_len = pd_len;
348 			event.private_data = cep->mpa.pdata;
349 
350 			/* Hide MPA V2 IRD/ORD control */
351 			if (cep->enhanced_rdma_conn_est) {
352 				event.private_data_len -=
353 					sizeof(struct mpa_v2_data);
354 				event.private_data +=
355 					sizeof(struct mpa_v2_data);
356 			}
357 		}
358 		getname_local(cep->sock, &event.local_addr);
359 		getname_peer(cep->sock, &event.remote_addr);
360 	}
361 	siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n",
362 		    cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status);
363 
364 	return id->event_handler(id, &event);
365 }
366 
367 /*
368  * siw_qp_cm_drop()
369  *
370  * Drops established LLP connection if present and not already
371  * scheduled for dropping. Called from user context, SQ workqueue
372  * or receive IRQ. Caller signals if socket can be immediately
373  * closed (basically, if not in IRQ).
374  */
siw_qp_cm_drop(struct siw_qp * qp,int schedule)375 void siw_qp_cm_drop(struct siw_qp *qp, int schedule)
376 {
377 	struct siw_cep *cep = qp->cep;
378 
379 	qp->rx_stream.rx_suspend = 1;
380 	qp->tx_ctx.tx_suspend = 1;
381 
382 	if (!qp->cep)
383 		return;
384 
385 	if (schedule) {
386 		siw_cm_queue_work(cep, SIW_CM_WORK_CLOSE_LLP);
387 	} else {
388 		siw_cep_set_inuse(cep);
389 
390 		if (cep->state == SIW_EPSTATE_CLOSED) {
391 			siw_dbg_cep(cep, "already closed\n");
392 			goto out;
393 		}
394 		siw_dbg_cep(cep, "immediate close, state %d\n", cep->state);
395 
396 		if (qp->term_info.valid)
397 			siw_send_terminate(qp);
398 
399 		if (cep->cm_id) {
400 			switch (cep->state) {
401 			case SIW_EPSTATE_AWAIT_MPAREP:
402 				siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
403 					      -EINVAL);
404 				break;
405 
406 			case SIW_EPSTATE_RDMA_MODE:
407 				siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
408 				break;
409 
410 			case SIW_EPSTATE_IDLE:
411 			case SIW_EPSTATE_LISTENING:
412 			case SIW_EPSTATE_CONNECTING:
413 			case SIW_EPSTATE_AWAIT_MPAREQ:
414 			case SIW_EPSTATE_RECVD_MPAREQ:
415 			case SIW_EPSTATE_CLOSED:
416 			default:
417 				break;
418 			}
419 			cep->cm_id->rem_ref(cep->cm_id);
420 			cep->cm_id = NULL;
421 			siw_cep_put(cep);
422 		}
423 		cep->state = SIW_EPSTATE_CLOSED;
424 
425 		if (cep->sock) {
426 			siw_socket_disassoc(cep->sock);
427 			/*
428 			 * Immediately close socket
429 			 */
430 			sock_release(cep->sock);
431 			cep->sock = NULL;
432 		}
433 		if (cep->qp) {
434 			cep->qp = NULL;
435 			siw_qp_put(qp);
436 		}
437 out:
438 		siw_cep_set_free(cep);
439 	}
440 }
441 
siw_cep_put(struct siw_cep * cep)442 void siw_cep_put(struct siw_cep *cep)
443 {
444 	WARN_ON(kref_read(&cep->ref) < 1);
445 	kref_put(&cep->ref, __siw_cep_dealloc);
446 }
447 
siw_cep_get(struct siw_cep * cep)448 void siw_cep_get(struct siw_cep *cep)
449 {
450 	kref_get(&cep->ref);
451 }
452 
453 /*
454  * Expects params->pd_len in host byte order
455  */
siw_send_mpareqrep(struct siw_cep * cep,const void * pdata,u8 pd_len)456 static int siw_send_mpareqrep(struct siw_cep *cep, const void *pdata, u8 pd_len)
457 {
458 	struct socket *s = cep->sock;
459 	struct mpa_rr *rr = &cep->mpa.hdr;
460 	struct kvec iov[3];
461 	struct msghdr msg;
462 	int rv;
463 	int iovec_num = 0;
464 	int mpa_len;
465 
466 	memset(&msg, 0, sizeof(msg));
467 
468 	iov[iovec_num].iov_base = rr;
469 	iov[iovec_num].iov_len = sizeof(*rr);
470 	mpa_len = sizeof(*rr);
471 
472 	if (cep->enhanced_rdma_conn_est) {
473 		iovec_num++;
474 		iov[iovec_num].iov_base = &cep->mpa.v2_ctrl;
475 		iov[iovec_num].iov_len = sizeof(cep->mpa.v2_ctrl);
476 		mpa_len += sizeof(cep->mpa.v2_ctrl);
477 	}
478 	if (pd_len) {
479 		iovec_num++;
480 		iov[iovec_num].iov_base = (char *)pdata;
481 		iov[iovec_num].iov_len = pd_len;
482 		mpa_len += pd_len;
483 	}
484 	if (cep->enhanced_rdma_conn_est)
485 		pd_len += sizeof(cep->mpa.v2_ctrl);
486 
487 	rr->params.pd_len = cpu_to_be16(pd_len);
488 
489 	rv = kernel_sendmsg(s, &msg, iov, iovec_num + 1, mpa_len);
490 
491 	return rv < 0 ? rv : 0;
492 }
493 
494 /*
495  * Receive MPA Request/Reply header.
496  *
497  * Returns 0 if complete MPA Request/Reply header including
498  * eventual private data was received. Returns -EAGAIN if
499  * header was partially received or negative error code otherwise.
500  *
501  * Context: May be called in process context only
502  */
siw_recv_mpa_rr(struct siw_cep * cep)503 static int siw_recv_mpa_rr(struct siw_cep *cep)
504 {
505 	struct mpa_rr *hdr = &cep->mpa.hdr;
506 	struct socket *s = cep->sock;
507 	u16 pd_len;
508 	int rcvd, to_rcv;
509 
510 	if (cep->mpa.bytes_rcvd < sizeof(struct mpa_rr)) {
511 		rcvd = ksock_recv(s, (char *)hdr + cep->mpa.bytes_rcvd,
512 				  sizeof(struct mpa_rr) - cep->mpa.bytes_rcvd,
513 				  0);
514 		if (rcvd <= 0)
515 			return -ECONNABORTED;
516 
517 		cep->mpa.bytes_rcvd += rcvd;
518 
519 		if (cep->mpa.bytes_rcvd < sizeof(struct mpa_rr))
520 			return -EAGAIN;
521 
522 		if (be16_to_cpu(hdr->params.pd_len) > MPA_MAX_PRIVDATA)
523 			return -EPROTO;
524 	}
525 	pd_len = be16_to_cpu(hdr->params.pd_len);
526 
527 	/*
528 	 * At least the MPA Request/Reply header (frame not including
529 	 * private data) has been received.
530 	 * Receive (or continue receiving) any private data.
531 	 */
532 	to_rcv = pd_len - (cep->mpa.bytes_rcvd - sizeof(struct mpa_rr));
533 
534 	if (!to_rcv) {
535 		/*
536 		 * We must have hdr->params.pd_len == 0 and thus received a
537 		 * complete MPA Request/Reply frame.
538 		 * Check against peer protocol violation.
539 		 */
540 		u32 word;
541 
542 		rcvd = ksock_recv(s, (char *)&word, sizeof(word), MSG_DONTWAIT);
543 		if (rcvd == -EAGAIN)
544 			return 0;
545 
546 		if (rcvd == 0) {
547 			siw_dbg_cep(cep, "peer EOF\n");
548 			return -EPIPE;
549 		}
550 		if (rcvd < 0) {
551 			siw_dbg_cep(cep, "error: %d\n", rcvd);
552 			return rcvd;
553 		}
554 		siw_dbg_cep(cep, "peer sent extra data: %d\n", rcvd);
555 
556 		return -EPROTO;
557 	}
558 
559 	/*
560 	 * At this point, we must have hdr->params.pd_len != 0.
561 	 * A private data buffer gets allocated if hdr->params.pd_len != 0.
562 	 */
563 	if (!cep->mpa.pdata) {
564 		cep->mpa.pdata = kmalloc(pd_len + 4, GFP_KERNEL);
565 		if (!cep->mpa.pdata)
566 			return -ENOMEM;
567 	}
568 	rcvd = ksock_recv(
569 		s, cep->mpa.pdata + cep->mpa.bytes_rcvd - sizeof(struct mpa_rr),
570 		to_rcv + 4, MSG_DONTWAIT);
571 
572 	if (rcvd < 0)
573 		return rcvd;
574 
575 	if (rcvd > to_rcv)
576 		return -EPROTO;
577 
578 	cep->mpa.bytes_rcvd += rcvd;
579 
580 	if (to_rcv == rcvd) {
581 		siw_dbg_cep(cep, "%d bytes private data received\n", pd_len);
582 		return 0;
583 	}
584 	return -EAGAIN;
585 }
586 
587 /*
588  * siw_proc_mpareq()
589  *
590  * Read MPA Request from socket and signal new connection to IWCM
591  * if success. Caller must hold lock on corresponding listening CEP.
592  */
siw_proc_mpareq(struct siw_cep * cep)593 static int siw_proc_mpareq(struct siw_cep *cep)
594 {
595 	struct mpa_rr *req;
596 	int version, rv;
597 	u16 pd_len;
598 
599 	rv = siw_recv_mpa_rr(cep);
600 	if (rv)
601 		return rv;
602 
603 	req = &cep->mpa.hdr;
604 
605 	version = __mpa_rr_revision(req->params.bits);
606 	pd_len = be16_to_cpu(req->params.pd_len);
607 
608 	if (version > MPA_REVISION_2)
609 		/* allow for 0, 1, and 2 only */
610 		return -EPROTO;
611 
612 	if (memcmp(req->key, MPA_KEY_REQ, 16))
613 		return -EPROTO;
614 
615 	/* Prepare for sending MPA reply */
616 	memcpy(req->key, MPA_KEY_REP, 16);
617 
618 	if (version == MPA_REVISION_2 &&
619 	    (req->params.bits & MPA_RR_FLAG_ENHANCED)) {
620 		/*
621 		 * MPA version 2 must signal IRD/ORD values and P2P mode
622 		 * in private data if header flag MPA_RR_FLAG_ENHANCED
623 		 * is set.
624 		 */
625 		if (pd_len < sizeof(struct mpa_v2_data))
626 			goto reject_conn;
627 
628 		cep->enhanced_rdma_conn_est = true;
629 	}
630 
631 	/* MPA Markers: currently not supported. Marker TX to be added. */
632 	if (req->params.bits & MPA_RR_FLAG_MARKERS)
633 		goto reject_conn;
634 
635 	if (req->params.bits & MPA_RR_FLAG_CRC) {
636 		/*
637 		 * RFC 5044, page 27: CRC MUST be used if peer requests it.
638 		 * siw specific: 'mpa_crc_strict' parameter to reject
639 		 * connection with CRC if local CRC off enforced by
640 		 * 'mpa_crc_strict' module parameter.
641 		 */
642 		if (!mpa_crc_required && mpa_crc_strict)
643 			goto reject_conn;
644 
645 		/* Enable CRC if requested by module parameter */
646 		if (mpa_crc_required)
647 			req->params.bits |= MPA_RR_FLAG_CRC;
648 	}
649 	if (cep->enhanced_rdma_conn_est) {
650 		struct mpa_v2_data *v2 = (struct mpa_v2_data *)cep->mpa.pdata;
651 
652 		/*
653 		 * Peer requested ORD becomes requested local IRD,
654 		 * peer requested IRD becomes requested local ORD.
655 		 * IRD and ORD get limited by global maximum values.
656 		 */
657 		cep->ord = ntohs(v2->ird) & MPA_IRD_ORD_MASK;
658 		cep->ord = min(cep->ord, SIW_MAX_ORD_QP);
659 		cep->ird = ntohs(v2->ord) & MPA_IRD_ORD_MASK;
660 		cep->ird = min(cep->ird, SIW_MAX_IRD_QP);
661 
662 		/* May get overwritten by locally negotiated values */
663 		cep->mpa.v2_ctrl.ird = htons(cep->ird);
664 		cep->mpa.v2_ctrl.ord = htons(cep->ord);
665 
666 		/*
667 		 * Support for peer sent zero length Write or Read to
668 		 * let local side enter RTS. Writes are preferred.
669 		 * Sends would require pre-posting a Receive and are
670 		 * not supported.
671 		 * Propose zero length Write if none of Read and Write
672 		 * is indicated.
673 		 */
674 		if (v2->ird & MPA_V2_PEER_TO_PEER) {
675 			cep->mpa.v2_ctrl.ird |= MPA_V2_PEER_TO_PEER;
676 
677 			if (v2->ord & MPA_V2_RDMA_WRITE_RTR)
678 				cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_WRITE_RTR;
679 			else if (v2->ord & MPA_V2_RDMA_READ_RTR)
680 				cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_READ_RTR;
681 			else
682 				cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_WRITE_RTR;
683 		}
684 	}
685 
686 	cep->state = SIW_EPSTATE_RECVD_MPAREQ;
687 
688 	/* Keep reference until IWCM accepts/rejects */
689 	siw_cep_get(cep);
690 	rv = siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REQUEST, 0);
691 	if (rv)
692 		siw_cep_put(cep);
693 
694 	return rv;
695 
696 reject_conn:
697 	siw_dbg_cep(cep, "reject: crc %d:%d:%d, m %d:%d\n",
698 		    req->params.bits & MPA_RR_FLAG_CRC ? 1 : 0,
699 		    mpa_crc_required, mpa_crc_strict,
700 		    req->params.bits & MPA_RR_FLAG_MARKERS ? 1 : 0, 0);
701 
702 	req->params.bits &= ~MPA_RR_FLAG_MARKERS;
703 	req->params.bits |= MPA_RR_FLAG_REJECT;
704 
705 	if (!mpa_crc_required && mpa_crc_strict)
706 		req->params.bits &= ~MPA_RR_FLAG_CRC;
707 
708 	if (pd_len)
709 		kfree(cep->mpa.pdata);
710 
711 	cep->mpa.pdata = NULL;
712 
713 	siw_send_mpareqrep(cep, NULL, 0);
714 
715 	return -EOPNOTSUPP;
716 }
717 
siw_proc_mpareply(struct siw_cep * cep)718 static int siw_proc_mpareply(struct siw_cep *cep)
719 {
720 	struct siw_qp_attrs qp_attrs;
721 	enum siw_qp_attr_mask qp_attr_mask;
722 	struct siw_qp *qp = cep->qp;
723 	struct mpa_rr *rep;
724 	int rv;
725 	u16 rep_ord;
726 	u16 rep_ird;
727 	bool ird_insufficient = false;
728 	enum mpa_v2_ctrl mpa_p2p_mode = MPA_V2_RDMA_NO_RTR;
729 
730 	rv = siw_recv_mpa_rr(cep);
731 	if (rv)
732 		goto out_err;
733 
734 	siw_cancel_mpatimer(cep);
735 
736 	rep = &cep->mpa.hdr;
737 
738 	if (__mpa_rr_revision(rep->params.bits) > MPA_REVISION_2) {
739 		/* allow for 0, 1,  and 2 only */
740 		rv = -EPROTO;
741 		goto out_err;
742 	}
743 	if (memcmp(rep->key, MPA_KEY_REP, 16)) {
744 		siw_init_terminate(qp, TERM_ERROR_LAYER_LLP, LLP_ETYPE_MPA,
745 				   LLP_ECODE_INVALID_REQ_RESP, 0);
746 		siw_send_terminate(qp);
747 		rv = -EPROTO;
748 		goto out_err;
749 	}
750 	if (rep->params.bits & MPA_RR_FLAG_REJECT) {
751 		siw_dbg_cep(cep, "got mpa reject\n");
752 		siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNRESET);
753 
754 		return -ECONNRESET;
755 	}
756 	if (try_gso && rep->params.bits & MPA_RR_FLAG_GSO_EXP) {
757 		siw_dbg_cep(cep, "peer allows GSO on TX\n");
758 		qp->tx_ctx.gso_seg_limit = 0;
759 	}
760 	if ((rep->params.bits & MPA_RR_FLAG_MARKERS) ||
761 	    (mpa_crc_required && !(rep->params.bits & MPA_RR_FLAG_CRC)) ||
762 	    (mpa_crc_strict && !mpa_crc_required &&
763 	     (rep->params.bits & MPA_RR_FLAG_CRC))) {
764 		siw_dbg_cep(cep, "reply unsupp: crc %d:%d:%d, m %d:%d\n",
765 			    rep->params.bits & MPA_RR_FLAG_CRC ? 1 : 0,
766 			    mpa_crc_required, mpa_crc_strict,
767 			    rep->params.bits & MPA_RR_FLAG_MARKERS ? 1 : 0, 0);
768 
769 		siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED);
770 
771 		return -EINVAL;
772 	}
773 	if (cep->enhanced_rdma_conn_est) {
774 		struct mpa_v2_data *v2;
775 
776 		if (__mpa_rr_revision(rep->params.bits) < MPA_REVISION_2 ||
777 		    !(rep->params.bits & MPA_RR_FLAG_ENHANCED)) {
778 			/*
779 			 * Protocol failure: The responder MUST reply with
780 			 * MPA version 2 and MUST set MPA_RR_FLAG_ENHANCED.
781 			 */
782 			siw_dbg_cep(cep, "mpa reply error: vers %d, enhcd %d\n",
783 				    __mpa_rr_revision(rep->params.bits),
784 				    rep->params.bits & MPA_RR_FLAG_ENHANCED ?
785 					    1 :
786 					    0);
787 
788 			siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
789 				      -ECONNRESET);
790 			return -EINVAL;
791 		}
792 		v2 = (struct mpa_v2_data *)cep->mpa.pdata;
793 		rep_ird = ntohs(v2->ird) & MPA_IRD_ORD_MASK;
794 		rep_ord = ntohs(v2->ord) & MPA_IRD_ORD_MASK;
795 
796 		if (cep->ird < rep_ord &&
797 		    (relaxed_ird_negotiation == false ||
798 		     rep_ord > cep->sdev->attrs.max_ird)) {
799 			siw_dbg_cep(cep, "ird %d, rep_ord %d, max_ord %d\n",
800 				    cep->ird, rep_ord,
801 				    cep->sdev->attrs.max_ord);
802 			ird_insufficient = true;
803 		}
804 		if (cep->ord > rep_ird && relaxed_ird_negotiation == false) {
805 			siw_dbg_cep(cep, "ord %d, rep_ird %d\n", cep->ord,
806 				    rep_ird);
807 			ird_insufficient = true;
808 		}
809 		/*
810 		 * Always report negotiated peer values to user,
811 		 * even if IRD/ORD negotiation failed
812 		 */
813 		cep->ird = rep_ord;
814 		cep->ord = rep_ird;
815 
816 		if (ird_insufficient) {
817 			/*
818 			 * If the initiator IRD is insuffient for the
819 			 * responder ORD, send a TERM.
820 			 */
821 			siw_init_terminate(qp, TERM_ERROR_LAYER_LLP,
822 					   LLP_ETYPE_MPA,
823 					   LLP_ECODE_INSUFFICIENT_IRD, 0);
824 			siw_send_terminate(qp);
825 			rv = -ENOMEM;
826 			goto out_err;
827 		}
828 		if (cep->mpa.v2_ctrl_req.ird & MPA_V2_PEER_TO_PEER)
829 			mpa_p2p_mode =
830 				cep->mpa.v2_ctrl_req.ord &
831 				(MPA_V2_RDMA_WRITE_RTR | MPA_V2_RDMA_READ_RTR);
832 
833 		/*
834 		 * Check if we requested P2P mode, and if peer agrees
835 		 */
836 		if (mpa_p2p_mode != MPA_V2_RDMA_NO_RTR) {
837 			if ((mpa_p2p_mode & v2->ord) == 0) {
838 				/*
839 				 * We requested RTR mode(s), but the peer
840 				 * did not pick any mode we support.
841 				 */
842 				siw_dbg_cep(cep,
843 					    "rtr mode:  req %2x, got %2x\n",
844 					    mpa_p2p_mode,
845 					    v2->ord & (MPA_V2_RDMA_WRITE_RTR |
846 						       MPA_V2_RDMA_READ_RTR));
847 
848 				siw_init_terminate(qp, TERM_ERROR_LAYER_LLP,
849 						   LLP_ETYPE_MPA,
850 						   LLP_ECODE_NO_MATCHING_RTR,
851 						   0);
852 				siw_send_terminate(qp);
853 				rv = -EPROTO;
854 				goto out_err;
855 			}
856 			mpa_p2p_mode = v2->ord & (MPA_V2_RDMA_WRITE_RTR |
857 						  MPA_V2_RDMA_READ_RTR);
858 		}
859 	}
860 	memset(&qp_attrs, 0, sizeof(qp_attrs));
861 
862 	if (rep->params.bits & MPA_RR_FLAG_CRC)
863 		qp_attrs.flags = SIW_MPA_CRC;
864 
865 	qp_attrs.irq_size = cep->ird;
866 	qp_attrs.orq_size = cep->ord;
867 	qp_attrs.sk = cep->sock;
868 	qp_attrs.state = SIW_QP_STATE_RTS;
869 
870 	qp_attr_mask = SIW_QP_ATTR_STATE | SIW_QP_ATTR_LLP_HANDLE |
871 		       SIW_QP_ATTR_ORD | SIW_QP_ATTR_IRD | SIW_QP_ATTR_MPA;
872 
873 	/* Move socket RX/TX under QP control */
874 	down_write(&qp->state_lock);
875 	if (qp->attrs.state > SIW_QP_STATE_RTR) {
876 		rv = -EINVAL;
877 		up_write(&qp->state_lock);
878 		goto out_err;
879 	}
880 	rv = siw_qp_modify(qp, &qp_attrs, qp_attr_mask);
881 
882 	siw_qp_socket_assoc(cep, qp);
883 
884 	up_write(&qp->state_lock);
885 
886 	/* Send extra RDMA frame to trigger peer RTS if negotiated */
887 	if (mpa_p2p_mode != MPA_V2_RDMA_NO_RTR) {
888 		rv = siw_qp_mpa_rts(qp, mpa_p2p_mode);
889 		if (rv)
890 			goto out_err;
891 	}
892 	if (!rv) {
893 		rv = siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, 0);
894 		if (!rv)
895 			cep->state = SIW_EPSTATE_RDMA_MODE;
896 
897 		return 0;
898 	}
899 
900 out_err:
901 	if (rv != -EAGAIN)
902 		siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
903 
904 	return rv;
905 }
906 
907 /*
908  * siw_accept_newconn - accept an incoming pending connection
909  *
910  */
siw_accept_newconn(struct siw_cep * cep)911 static void siw_accept_newconn(struct siw_cep *cep)
912 {
913 	struct socket *s = cep->sock;
914 	struct socket *new_s = NULL;
915 	struct siw_cep *new_cep = NULL;
916 	int rv = 0; /* debug only. should disappear */
917 
918 	if (cep->state != SIW_EPSTATE_LISTENING)
919 		goto error;
920 
921 	new_cep = siw_cep_alloc(cep->sdev);
922 	if (!new_cep)
923 		goto error;
924 
925 	/*
926 	 * 4: Allocate a sufficient number of work elements
927 	 * to allow concurrent handling of local + peer close
928 	 * events, MPA header processing + MPA timeout.
929 	 */
930 	if (siw_cm_alloc_work(new_cep, 4) != 0)
931 		goto error;
932 
933 	/*
934 	 * Copy saved socket callbacks from listening CEP
935 	 * and assign new socket with new CEP
936 	 */
937 	new_cep->sk_state_change = cep->sk_state_change;
938 	new_cep->sk_data_ready = cep->sk_data_ready;
939 	new_cep->sk_write_space = cep->sk_write_space;
940 	new_cep->sk_error_report = cep->sk_error_report;
941 
942 	rv = kernel_accept(s, &new_s, O_NONBLOCK);
943 	if (rv != 0) {
944 		/*
945 		 * Connection already aborted by peer..?
946 		 */
947 		siw_dbg_cep(cep, "kernel_accept() error: %d\n", rv);
948 		goto error;
949 	}
950 	new_cep->sock = new_s;
951 	siw_cep_get(new_cep);
952 	new_s->sk->sk_user_data = new_cep;
953 
954 	if (siw_tcp_nagle == false)
955 		tcp_sock_set_nodelay(new_s->sk);
956 	new_cep->state = SIW_EPSTATE_AWAIT_MPAREQ;
957 
958 	rv = siw_cm_queue_work(new_cep, SIW_CM_WORK_MPATIMEOUT);
959 	if (rv)
960 		goto error;
961 	/*
962 	 * See siw_proc_mpareq() etc. for the use of new_cep->listen_cep.
963 	 */
964 	new_cep->listen_cep = cep;
965 	siw_cep_get(cep);
966 
967 	if (atomic_read(&new_s->sk->sk_rmem_alloc)) {
968 		/*
969 		 * MPA REQ already queued
970 		 */
971 		siw_dbg_cep(cep, "immediate mpa request\n");
972 
973 		siw_cep_set_inuse(new_cep);
974 		rv = siw_proc_mpareq(new_cep);
975 		if (rv != -EAGAIN) {
976 			siw_cep_put(cep);
977 			new_cep->listen_cep = NULL;
978 			if (rv) {
979 				siw_cancel_mpatimer(new_cep);
980 				siw_cep_set_free(new_cep);
981 				goto error;
982 			}
983 		}
984 		siw_cep_set_free(new_cep);
985 	}
986 	return;
987 
988 error:
989 	if (new_cep)
990 		siw_cep_put(new_cep);
991 
992 	if (new_s) {
993 		siw_socket_disassoc(new_s);
994 		sock_release(new_s);
995 		new_cep->sock = NULL;
996 	}
997 	siw_dbg_cep(cep, "error %d\n", rv);
998 }
999 
siw_cm_work_handler(struct work_struct * w)1000 static void siw_cm_work_handler(struct work_struct *w)
1001 {
1002 	struct siw_cm_work *work;
1003 	struct siw_cep *cep;
1004 	int release_cep = 0, rv = 0;
1005 
1006 	work = container_of(w, struct siw_cm_work, work.work);
1007 	cep = work->cep;
1008 
1009 	siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n",
1010 		    cep->qp ? qp_id(cep->qp) : UINT_MAX,
1011 		    work->type, cep->state);
1012 
1013 	siw_cep_set_inuse(cep);
1014 
1015 	switch (work->type) {
1016 	case SIW_CM_WORK_ACCEPT:
1017 		siw_accept_newconn(cep);
1018 		break;
1019 
1020 	case SIW_CM_WORK_READ_MPAHDR:
1021 		if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) {
1022 			if (cep->listen_cep) {
1023 				siw_cep_set_inuse(cep->listen_cep);
1024 
1025 				if (cep->listen_cep->state ==
1026 				    SIW_EPSTATE_LISTENING)
1027 					rv = siw_proc_mpareq(cep);
1028 				else
1029 					rv = -EFAULT;
1030 
1031 				siw_cep_set_free(cep->listen_cep);
1032 
1033 				if (rv != -EAGAIN) {
1034 					siw_cep_put(cep->listen_cep);
1035 					cep->listen_cep = NULL;
1036 					if (rv)
1037 						siw_cep_put(cep);
1038 				}
1039 			}
1040 		} else if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) {
1041 			rv = siw_proc_mpareply(cep);
1042 		} else {
1043 			/*
1044 			 * CEP already moved out of MPA handshake.
1045 			 * any connection management already done.
1046 			 * silently ignore the mpa packet.
1047 			 */
1048 			if (cep->state == SIW_EPSTATE_RDMA_MODE) {
1049 				cep->sock->sk->sk_data_ready(cep->sock->sk);
1050 				siw_dbg_cep(cep, "already in RDMA mode");
1051 			} else {
1052 				siw_dbg_cep(cep, "out of state: %d\n",
1053 					    cep->state);
1054 			}
1055 		}
1056 		if (rv && rv != -EAGAIN)
1057 			release_cep = 1;
1058 		break;
1059 
1060 	case SIW_CM_WORK_CLOSE_LLP:
1061 		/*
1062 		 * QP scheduled LLP close
1063 		 */
1064 		if (cep->qp && cep->qp->term_info.valid)
1065 			siw_send_terminate(cep->qp);
1066 
1067 		if (cep->cm_id)
1068 			siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
1069 
1070 		release_cep = 1;
1071 		break;
1072 
1073 	case SIW_CM_WORK_PEER_CLOSE:
1074 		if (cep->cm_id) {
1075 			if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) {
1076 				/*
1077 				 * MPA reply not received, but connection drop
1078 				 */
1079 				siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
1080 					      -ECONNRESET);
1081 			} else if (cep->state == SIW_EPSTATE_RDMA_MODE) {
1082 				/*
1083 				 * NOTE: IW_CM_EVENT_DISCONNECT is given just
1084 				 *       to transition IWCM into CLOSING.
1085 				 */
1086 				siw_cm_upcall(cep, IW_CM_EVENT_DISCONNECT, 0);
1087 				siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
1088 			}
1089 			/*
1090 			 * for other states there is no connection
1091 			 * known to the IWCM.
1092 			 */
1093 		} else {
1094 			if (cep->state == SIW_EPSTATE_RECVD_MPAREQ) {
1095 				/*
1096 				 * Wait for the ulp/CM to call accept/reject
1097 				 */
1098 				siw_dbg_cep(cep,
1099 					    "mpa req recvd, wait for ULP\n");
1100 			} else if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) {
1101 				/*
1102 				 * Socket close before MPA request received.
1103 				 */
1104 				if (cep->listen_cep) {
1105 					siw_dbg_cep(cep,
1106 						"no mpareq: drop listener\n");
1107 					siw_cep_put(cep->listen_cep);
1108 					cep->listen_cep = NULL;
1109 				}
1110 			}
1111 		}
1112 		release_cep = 1;
1113 		break;
1114 
1115 	case SIW_CM_WORK_MPATIMEOUT:
1116 		cep->mpa_timer = NULL;
1117 
1118 		if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) {
1119 			/*
1120 			 * MPA request timed out:
1121 			 * Hide any partially received private data and signal
1122 			 * timeout
1123 			 */
1124 			cep->mpa.hdr.params.pd_len = 0;
1125 
1126 			if (cep->cm_id)
1127 				siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
1128 					      -ETIMEDOUT);
1129 			release_cep = 1;
1130 
1131 		} else if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) {
1132 			/*
1133 			 * No MPA request received after peer TCP stream setup.
1134 			 */
1135 			if (cep->listen_cep) {
1136 				siw_cep_put(cep->listen_cep);
1137 				cep->listen_cep = NULL;
1138 			}
1139 			release_cep = 1;
1140 		}
1141 		break;
1142 
1143 	default:
1144 		WARN(1, "Undefined CM work type: %d\n", work->type);
1145 	}
1146 	if (release_cep) {
1147 		siw_dbg_cep(cep,
1148 			    "release: timer=%s, QP[%u]\n",
1149 			    cep->mpa_timer ? "y" : "n",
1150 			    cep->qp ? qp_id(cep->qp) : UINT_MAX);
1151 
1152 		siw_cancel_mpatimer(cep);
1153 
1154 		cep->state = SIW_EPSTATE_CLOSED;
1155 
1156 		if (cep->qp) {
1157 			struct siw_qp *qp = cep->qp;
1158 			/*
1159 			 * Serialize a potential race with application
1160 			 * closing the QP and calling siw_qp_cm_drop()
1161 			 */
1162 			siw_qp_get(qp);
1163 			siw_cep_set_free(cep);
1164 
1165 			siw_qp_llp_close(qp);
1166 			siw_qp_put(qp);
1167 
1168 			siw_cep_set_inuse(cep);
1169 			cep->qp = NULL;
1170 			siw_qp_put(qp);
1171 		}
1172 		if (cep->sock) {
1173 			siw_socket_disassoc(cep->sock);
1174 			sock_release(cep->sock);
1175 			cep->sock = NULL;
1176 		}
1177 		if (cep->cm_id) {
1178 			cep->cm_id->rem_ref(cep->cm_id);
1179 			cep->cm_id = NULL;
1180 			siw_cep_put(cep);
1181 		}
1182 	}
1183 	siw_cep_set_free(cep);
1184 	siw_put_work(work);
1185 	siw_cep_put(cep);
1186 }
1187 
1188 static struct workqueue_struct *siw_cm_wq;
1189 
siw_cm_queue_work(struct siw_cep * cep,enum siw_work_type type)1190 int siw_cm_queue_work(struct siw_cep *cep, enum siw_work_type type)
1191 {
1192 	struct siw_cm_work *work = siw_get_work(cep);
1193 	unsigned long delay = 0;
1194 
1195 	if (!work) {
1196 		siw_dbg_cep(cep, "failed with no work available\n");
1197 		return -ENOMEM;
1198 	}
1199 	work->type = type;
1200 	work->cep = cep;
1201 
1202 	siw_cep_get(cep);
1203 
1204 	INIT_DELAYED_WORK(&work->work, siw_cm_work_handler);
1205 
1206 	if (type == SIW_CM_WORK_MPATIMEOUT) {
1207 		cep->mpa_timer = work;
1208 
1209 		if (cep->state == SIW_EPSTATE_AWAIT_MPAREP)
1210 			delay = MPAREQ_TIMEOUT;
1211 		else
1212 			delay = MPAREP_TIMEOUT;
1213 	}
1214 	siw_dbg_cep(cep, "[QP %u]: work type: %d, timeout %lu\n",
1215 		    cep->qp ? qp_id(cep->qp) : -1, type, delay);
1216 
1217 	queue_delayed_work(siw_cm_wq, &work->work, delay);
1218 
1219 	return 0;
1220 }
1221 
siw_cm_llp_data_ready(struct sock * sk)1222 static void siw_cm_llp_data_ready(struct sock *sk)
1223 {
1224 	struct siw_cep *cep;
1225 
1226 	trace_sk_data_ready(sk);
1227 
1228 	read_lock(&sk->sk_callback_lock);
1229 
1230 	cep = sk_to_cep(sk);
1231 	if (!cep)
1232 		goto out;
1233 
1234 	siw_dbg_cep(cep, "cep state: %d, socket state %d\n",
1235 		    cep->state, sk->sk_state);
1236 
1237 	if (sk->sk_state != TCP_ESTABLISHED)
1238 		goto out;
1239 
1240 	switch (cep->state) {
1241 	case SIW_EPSTATE_RDMA_MODE:
1242 	case SIW_EPSTATE_LISTENING:
1243 		break;
1244 
1245 	case SIW_EPSTATE_AWAIT_MPAREQ:
1246 	case SIW_EPSTATE_AWAIT_MPAREP:
1247 		siw_cm_queue_work(cep, SIW_CM_WORK_READ_MPAHDR);
1248 		break;
1249 
1250 	default:
1251 		siw_dbg_cep(cep, "unexpected data, state %d\n", cep->state);
1252 		break;
1253 	}
1254 out:
1255 	read_unlock(&sk->sk_callback_lock);
1256 }
1257 
siw_cm_llp_write_space(struct sock * sk)1258 static void siw_cm_llp_write_space(struct sock *sk)
1259 {
1260 	struct siw_cep *cep = sk_to_cep(sk);
1261 
1262 	if (cep)
1263 		siw_dbg_cep(cep, "state: %d\n", cep->state);
1264 }
1265 
siw_cm_llp_error_report(struct sock * sk)1266 static void siw_cm_llp_error_report(struct sock *sk)
1267 {
1268 	struct siw_cep *cep = sk_to_cep(sk);
1269 
1270 	if (cep) {
1271 		siw_dbg_cep(cep, "error %d, socket state: %d, cep state: %d\n",
1272 			    sk->sk_err, sk->sk_state, cep->state);
1273 		cep->sk_error_report(sk);
1274 	}
1275 }
1276 
siw_cm_llp_state_change(struct sock * sk)1277 static void siw_cm_llp_state_change(struct sock *sk)
1278 {
1279 	struct siw_cep *cep;
1280 	void (*orig_state_change)(struct sock *s);
1281 
1282 	read_lock(&sk->sk_callback_lock);
1283 
1284 	cep = sk_to_cep(sk);
1285 	if (!cep) {
1286 		/* endpoint already disassociated */
1287 		read_unlock(&sk->sk_callback_lock);
1288 		return;
1289 	}
1290 	orig_state_change = cep->sk_state_change;
1291 
1292 	siw_dbg_cep(cep, "state: %d\n", cep->state);
1293 
1294 	switch (sk->sk_state) {
1295 	case TCP_ESTABLISHED:
1296 		/*
1297 		 * handle accepting socket as special case where only
1298 		 * new connection is possible
1299 		 */
1300 		siw_cm_queue_work(cep, SIW_CM_WORK_ACCEPT);
1301 		break;
1302 
1303 	case TCP_CLOSE:
1304 	case TCP_CLOSE_WAIT:
1305 		if (cep->qp)
1306 			cep->qp->tx_ctx.tx_suspend = 1;
1307 		siw_cm_queue_work(cep, SIW_CM_WORK_PEER_CLOSE);
1308 		break;
1309 
1310 	default:
1311 		siw_dbg_cep(cep, "unexpected socket state %d\n", sk->sk_state);
1312 	}
1313 	read_unlock(&sk->sk_callback_lock);
1314 	orig_state_change(sk);
1315 }
1316 
kernel_bindconnect(struct socket * s,struct sockaddr * laddr,struct sockaddr * raddr,bool afonly)1317 static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr,
1318 			      struct sockaddr *raddr, bool afonly)
1319 {
1320 	int rv, flags = 0;
1321 	size_t size = laddr->sa_family == AF_INET ?
1322 		sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
1323 
1324 	/*
1325 	 * Make address available again asap.
1326 	 */
1327 	sock_set_reuseaddr(s->sk);
1328 
1329 	if (afonly) {
1330 		rv = ip6_sock_set_v6only(s->sk);
1331 		if (rv)
1332 			return rv;
1333 	}
1334 
1335 	rv = s->ops->bind(s, laddr, size);
1336 	if (rv < 0)
1337 		return rv;
1338 
1339 	rv = s->ops->connect(s, raddr, size, flags);
1340 
1341 	return rv < 0 ? rv : 0;
1342 }
1343 
siw_connect(struct iw_cm_id * id,struct iw_cm_conn_param * params)1344 int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1345 {
1346 	struct siw_device *sdev = to_siw_dev(id->device);
1347 	struct siw_qp *qp;
1348 	struct siw_cep *cep = NULL;
1349 	struct socket *s = NULL;
1350 	struct sockaddr *laddr = (struct sockaddr *)&id->local_addr,
1351 			*raddr = (struct sockaddr *)&id->remote_addr;
1352 	bool p2p_mode = peer_to_peer, v4 = true;
1353 	u16 pd_len = params->private_data_len;
1354 	int version = mpa_version, rv;
1355 
1356 	if (pd_len > MPA_MAX_PRIVDATA)
1357 		return -EINVAL;
1358 
1359 	if (params->ird > sdev->attrs.max_ird ||
1360 	    params->ord > sdev->attrs.max_ord)
1361 		return -ENOMEM;
1362 
1363 	if (laddr->sa_family == AF_INET6)
1364 		v4 = false;
1365 	else if (laddr->sa_family != AF_INET)
1366 		return -EAFNOSUPPORT;
1367 
1368 	/*
1369 	 * Respect any iwarp port mapping: Use mapped remote address
1370 	 * if valid. Local address must not be mapped, since siw
1371 	 * uses kernel TCP stack.
1372 	 */
1373 	if ((v4 && to_sockaddr_in(id->remote_addr).sin_port != 0) ||
1374 	     to_sockaddr_in6(id->remote_addr).sin6_port != 0)
1375 		raddr = (struct sockaddr *)&id->m_remote_addr;
1376 
1377 	qp = siw_qp_id2obj(sdev, params->qpn);
1378 	if (!qp) {
1379 		WARN(1, "[QP %u] does not exist\n", params->qpn);
1380 		rv = -EINVAL;
1381 		goto error;
1382 	}
1383 	siw_dbg_qp(qp, "pd_len %d, laddr %pISp, raddr %pISp\n", pd_len, laddr,
1384 		   raddr);
1385 
1386 	rv = sock_create(v4 ? AF_INET : AF_INET6, SOCK_STREAM, IPPROTO_TCP, &s);
1387 	if (rv < 0)
1388 		goto error;
1389 
1390 	/*
1391 	 * NOTE: For simplification, connect() is called in blocking
1392 	 * mode. Might be reconsidered for async connection setup at
1393 	 * TCP level.
1394 	 */
1395 	rv = kernel_bindconnect(s, laddr, raddr, id->afonly);
1396 	if (rv != 0) {
1397 		siw_dbg_qp(qp, "kernel_bindconnect: error %d\n", rv);
1398 		goto error;
1399 	}
1400 	if (siw_tcp_nagle == false)
1401 		tcp_sock_set_nodelay(s->sk);
1402 	cep = siw_cep_alloc(sdev);
1403 	if (!cep) {
1404 		rv = -ENOMEM;
1405 		goto error;
1406 	}
1407 	siw_cep_set_inuse(cep);
1408 
1409 	/* Associate QP with CEP */
1410 	siw_cep_get(cep);
1411 	qp->cep = cep;
1412 
1413 	/* siw_qp_get(qp) already done by QP lookup */
1414 	cep->qp = qp;
1415 
1416 	id->add_ref(id);
1417 	cep->cm_id = id;
1418 
1419 	/*
1420 	 * 4: Allocate a sufficient number of work elements
1421 	 * to allow concurrent handling of local + peer close
1422 	 * events, MPA header processing + MPA timeout.
1423 	 */
1424 	rv = siw_cm_alloc_work(cep, 4);
1425 	if (rv != 0) {
1426 		rv = -ENOMEM;
1427 		goto error;
1428 	}
1429 	cep->ird = params->ird;
1430 	cep->ord = params->ord;
1431 
1432 	if (p2p_mode && cep->ord == 0)
1433 		cep->ord = 1;
1434 
1435 	cep->state = SIW_EPSTATE_CONNECTING;
1436 
1437 	/*
1438 	 * Associate CEP with socket
1439 	 */
1440 	siw_cep_socket_assoc(cep, s);
1441 
1442 	cep->state = SIW_EPSTATE_AWAIT_MPAREP;
1443 
1444 	/*
1445 	 * Set MPA Request bits: CRC if required, no MPA Markers,
1446 	 * MPA Rev. according to module parameter 'mpa_version', Key 'Request'.
1447 	 */
1448 	cep->mpa.hdr.params.bits = 0;
1449 	if (version > MPA_REVISION_2) {
1450 		pr_warn("Setting MPA version to %u\n", MPA_REVISION_2);
1451 		version = MPA_REVISION_2;
1452 		/* Adjust also module parameter */
1453 		mpa_version = MPA_REVISION_2;
1454 	}
1455 	__mpa_rr_set_revision(&cep->mpa.hdr.params.bits, version);
1456 
1457 	if (try_gso)
1458 		cep->mpa.hdr.params.bits |= MPA_RR_FLAG_GSO_EXP;
1459 
1460 	if (mpa_crc_required)
1461 		cep->mpa.hdr.params.bits |= MPA_RR_FLAG_CRC;
1462 
1463 	/*
1464 	 * If MPA version == 2:
1465 	 * o Include ORD and IRD.
1466 	 * o Indicate peer-to-peer mode, if required by module
1467 	 *   parameter 'peer_to_peer'.
1468 	 */
1469 	if (version == MPA_REVISION_2) {
1470 		cep->enhanced_rdma_conn_est = true;
1471 		cep->mpa.hdr.params.bits |= MPA_RR_FLAG_ENHANCED;
1472 
1473 		cep->mpa.v2_ctrl.ird = htons(cep->ird);
1474 		cep->mpa.v2_ctrl.ord = htons(cep->ord);
1475 
1476 		if (p2p_mode) {
1477 			cep->mpa.v2_ctrl.ird |= MPA_V2_PEER_TO_PEER;
1478 			cep->mpa.v2_ctrl.ord |= rtr_type;
1479 		}
1480 		/* Remember own P2P mode requested */
1481 		cep->mpa.v2_ctrl_req.ird = cep->mpa.v2_ctrl.ird;
1482 		cep->mpa.v2_ctrl_req.ord = cep->mpa.v2_ctrl.ord;
1483 	}
1484 	memcpy(cep->mpa.hdr.key, MPA_KEY_REQ, 16);
1485 
1486 	rv = siw_send_mpareqrep(cep, params->private_data, pd_len);
1487 	/*
1488 	 * Reset private data.
1489 	 */
1490 	cep->mpa.hdr.params.pd_len = 0;
1491 
1492 	if (rv >= 0) {
1493 		rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT);
1494 		if (!rv) {
1495 			siw_dbg_cep(cep, "[QP %u]: exit\n", qp_id(qp));
1496 			siw_cep_set_free(cep);
1497 			return 0;
1498 		}
1499 	}
1500 error:
1501 	siw_dbg(id->device, "failed: %d\n", rv);
1502 
1503 	if (cep) {
1504 		siw_socket_disassoc(s);
1505 		sock_release(s);
1506 		cep->sock = NULL;
1507 
1508 		cep->qp = NULL;
1509 
1510 		cep->cm_id = NULL;
1511 		id->rem_ref(id);
1512 
1513 		qp->cep = NULL;
1514 		siw_cep_put(cep);
1515 
1516 		cep->state = SIW_EPSTATE_CLOSED;
1517 
1518 		siw_cep_set_free(cep);
1519 
1520 		siw_cep_put(cep);
1521 
1522 	} else if (s) {
1523 		sock_release(s);
1524 	}
1525 	if (qp)
1526 		siw_qp_put(qp);
1527 
1528 	return rv;
1529 }
1530 
1531 /*
1532  * siw_accept - Let SoftiWARP accept an RDMA connection request
1533  *
1534  * @id:		New connection management id to be used for accepted
1535  *		connection request
1536  * @params:	Connection parameters provided by ULP for accepting connection
1537  *
1538  * Transition QP to RTS state, associate new CM id @id with accepted CEP
1539  * and get prepared for TCP input by installing socket callbacks.
1540  * Then send MPA Reply and generate the "connection established" event.
1541  * Socket callbacks must be installed before sending MPA Reply, because
1542  * the latter may cause a first RDMA message to arrive from the RDMA Initiator
1543  * side very quickly, at which time the socket callbacks must be ready.
1544  */
siw_accept(struct iw_cm_id * id,struct iw_cm_conn_param * params)1545 int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1546 {
1547 	struct siw_device *sdev = to_siw_dev(id->device);
1548 	struct siw_cep *cep = (struct siw_cep *)id->provider_data;
1549 	struct siw_qp *qp;
1550 	struct siw_qp_attrs qp_attrs;
1551 	int rv, max_priv_data = MPA_MAX_PRIVDATA;
1552 	bool wait_for_peer_rts = false;
1553 
1554 	siw_cep_set_inuse(cep);
1555 	siw_cep_put(cep);
1556 
1557 	/* Free lingering inbound private data */
1558 	if (cep->mpa.hdr.params.pd_len) {
1559 		cep->mpa.hdr.params.pd_len = 0;
1560 		kfree(cep->mpa.pdata);
1561 		cep->mpa.pdata = NULL;
1562 	}
1563 	siw_cancel_mpatimer(cep);
1564 
1565 	if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1566 		siw_dbg_cep(cep, "out of state\n");
1567 
1568 		siw_cep_set_free(cep);
1569 		siw_cep_put(cep);
1570 
1571 		return -ECONNRESET;
1572 	}
1573 	qp = siw_qp_id2obj(sdev, params->qpn);
1574 	if (!qp) {
1575 		WARN(1, "[QP %d] does not exist\n", params->qpn);
1576 		siw_cep_set_free(cep);
1577 		siw_cep_put(cep);
1578 
1579 		return -EINVAL;
1580 	}
1581 	down_write(&qp->state_lock);
1582 	if (qp->attrs.state > SIW_QP_STATE_RTR) {
1583 		rv = -EINVAL;
1584 		up_write(&qp->state_lock);
1585 		goto error;
1586 	}
1587 	siw_dbg_cep(cep, "[QP %d]\n", params->qpn);
1588 
1589 	if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) {
1590 		siw_dbg_cep(cep, "peer allows GSO on TX\n");
1591 		qp->tx_ctx.gso_seg_limit = 0;
1592 	}
1593 	if (params->ord > sdev->attrs.max_ord ||
1594 	    params->ird > sdev->attrs.max_ird) {
1595 		siw_dbg_cep(
1596 			cep,
1597 			"[QP %u]: ord %d (max %d), ird %d (max %d)\n",
1598 			qp_id(qp), params->ord, sdev->attrs.max_ord,
1599 			params->ird, sdev->attrs.max_ird);
1600 		rv = -EINVAL;
1601 		up_write(&qp->state_lock);
1602 		goto error;
1603 	}
1604 	if (cep->enhanced_rdma_conn_est)
1605 		max_priv_data -= sizeof(struct mpa_v2_data);
1606 
1607 	if (params->private_data_len > max_priv_data) {
1608 		siw_dbg_cep(
1609 			cep,
1610 			"[QP %u]: private data length: %d (max %d)\n",
1611 			qp_id(qp), params->private_data_len, max_priv_data);
1612 		rv = -EINVAL;
1613 		up_write(&qp->state_lock);
1614 		goto error;
1615 	}
1616 	if (cep->enhanced_rdma_conn_est) {
1617 		if (params->ord > cep->ord) {
1618 			if (relaxed_ird_negotiation) {
1619 				params->ord = cep->ord;
1620 			} else {
1621 				cep->ird = params->ird;
1622 				cep->ord = params->ord;
1623 				rv = -EINVAL;
1624 				up_write(&qp->state_lock);
1625 				goto error;
1626 			}
1627 		}
1628 		if (params->ird < cep->ird) {
1629 			if (relaxed_ird_negotiation &&
1630 			    cep->ird <= sdev->attrs.max_ird)
1631 				params->ird = cep->ird;
1632 			else {
1633 				rv = -ENOMEM;
1634 				up_write(&qp->state_lock);
1635 				goto error;
1636 			}
1637 		}
1638 		if (cep->mpa.v2_ctrl.ord &
1639 		    (MPA_V2_RDMA_WRITE_RTR | MPA_V2_RDMA_READ_RTR))
1640 			wait_for_peer_rts = true;
1641 		/*
1642 		 * Signal back negotiated IRD and ORD values
1643 		 */
1644 		cep->mpa.v2_ctrl.ord =
1645 			htons(params->ord & MPA_IRD_ORD_MASK) |
1646 			(cep->mpa.v2_ctrl.ord & ~MPA_V2_MASK_IRD_ORD);
1647 		cep->mpa.v2_ctrl.ird =
1648 			htons(params->ird & MPA_IRD_ORD_MASK) |
1649 			(cep->mpa.v2_ctrl.ird & ~MPA_V2_MASK_IRD_ORD);
1650 	}
1651 	cep->ird = params->ird;
1652 	cep->ord = params->ord;
1653 
1654 	cep->cm_id = id;
1655 	id->add_ref(id);
1656 
1657 	memset(&qp_attrs, 0, sizeof(qp_attrs));
1658 	qp_attrs.orq_size = cep->ord;
1659 	qp_attrs.irq_size = cep->ird;
1660 	qp_attrs.sk = cep->sock;
1661 	if (cep->mpa.hdr.params.bits & MPA_RR_FLAG_CRC)
1662 		qp_attrs.flags = SIW_MPA_CRC;
1663 	qp_attrs.state = SIW_QP_STATE_RTS;
1664 
1665 	siw_dbg_cep(cep, "[QP%u]: moving to rts\n", qp_id(qp));
1666 
1667 	/* Associate QP with CEP */
1668 	siw_cep_get(cep);
1669 	qp->cep = cep;
1670 
1671 	/* siw_qp_get(qp) already done by QP lookup */
1672 	cep->qp = qp;
1673 
1674 	cep->state = SIW_EPSTATE_RDMA_MODE;
1675 
1676 	/* Move socket RX/TX under QP control */
1677 	rv = siw_qp_modify(qp, &qp_attrs,
1678 			   SIW_QP_ATTR_STATE | SIW_QP_ATTR_LLP_HANDLE |
1679 				   SIW_QP_ATTR_ORD | SIW_QP_ATTR_IRD |
1680 				   SIW_QP_ATTR_MPA);
1681 	up_write(&qp->state_lock);
1682 
1683 	if (rv)
1684 		goto error;
1685 
1686 	siw_dbg_cep(cep, "[QP %u]: send mpa reply, %d byte pdata\n",
1687 		    qp_id(qp), params->private_data_len);
1688 
1689 	rv = siw_send_mpareqrep(cep, params->private_data,
1690 				params->private_data_len);
1691 	if (rv != 0)
1692 		goto error;
1693 
1694 	if (wait_for_peer_rts) {
1695 		siw_sk_assign_rtr_upcalls(cep);
1696 	} else {
1697 		siw_qp_socket_assoc(cep, qp);
1698 		rv = siw_cm_upcall(cep, IW_CM_EVENT_ESTABLISHED, 0);
1699 		if (rv)
1700 			goto error;
1701 	}
1702 	siw_cep_set_free(cep);
1703 
1704 	return 0;
1705 error:
1706 	siw_socket_disassoc(cep->sock);
1707 	sock_release(cep->sock);
1708 	cep->sock = NULL;
1709 
1710 	cep->state = SIW_EPSTATE_CLOSED;
1711 
1712 	if (cep->cm_id) {
1713 		cep->cm_id->rem_ref(id);
1714 		cep->cm_id = NULL;
1715 	}
1716 	if (qp->cep) {
1717 		siw_cep_put(cep);
1718 		qp->cep = NULL;
1719 	}
1720 	cep->qp = NULL;
1721 	siw_qp_put(qp);
1722 
1723 	siw_cep_set_free(cep);
1724 	siw_cep_put(cep);
1725 
1726 	return rv;
1727 }
1728 
1729 /*
1730  * siw_reject()
1731  *
1732  * Local connection reject case. Send private data back to peer,
1733  * close connection and dereference connection id.
1734  */
siw_reject(struct iw_cm_id * id,const void * pdata,u8 pd_len)1735 int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
1736 {
1737 	struct siw_cep *cep = (struct siw_cep *)id->provider_data;
1738 
1739 	siw_cep_set_inuse(cep);
1740 	siw_cep_put(cep);
1741 
1742 	siw_cancel_mpatimer(cep);
1743 
1744 	if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1745 		siw_dbg_cep(cep, "out of state\n");
1746 
1747 		siw_cep_set_free(cep);
1748 		siw_cep_put(cep); /* put last reference */
1749 
1750 		return -ECONNRESET;
1751 	}
1752 	siw_dbg_cep(cep, "cep->state %d, pd_len %d\n", cep->state,
1753 		    pd_len);
1754 
1755 	if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) {
1756 		cep->mpa.hdr.params.bits |= MPA_RR_FLAG_REJECT; /* reject */
1757 		siw_send_mpareqrep(cep, pdata, pd_len);
1758 	}
1759 	siw_socket_disassoc(cep->sock);
1760 	sock_release(cep->sock);
1761 	cep->sock = NULL;
1762 
1763 	cep->state = SIW_EPSTATE_CLOSED;
1764 
1765 	siw_cep_set_free(cep);
1766 	siw_cep_put(cep);
1767 
1768 	return 0;
1769 }
1770 
1771 /*
1772  * siw_create_listen - Create resources for a listener's IWCM ID @id
1773  *
1774  * Starts listen on the socket address id->local_addr.
1775  *
1776  */
siw_create_listen(struct iw_cm_id * id,int backlog)1777 int siw_create_listen(struct iw_cm_id *id, int backlog)
1778 {
1779 	struct socket *s;
1780 	struct siw_cep *cep = NULL;
1781 	struct siw_device *sdev = to_siw_dev(id->device);
1782 	int addr_family = id->local_addr.ss_family;
1783 	int rv = 0;
1784 
1785 	if (addr_family != AF_INET && addr_family != AF_INET6)
1786 		return -EAFNOSUPPORT;
1787 
1788 	rv = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s);
1789 	if (rv < 0)
1790 		return rv;
1791 
1792 	/*
1793 	 * Allow binding local port when still in TIME_WAIT from last close.
1794 	 */
1795 	sock_set_reuseaddr(s->sk);
1796 
1797 	if (addr_family == AF_INET) {
1798 		struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
1799 
1800 		/* For wildcard addr, limit binding to current device only */
1801 		if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
1802 			s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
1803 
1804 		rv = s->ops->bind(s, (struct sockaddr *)laddr,
1805 				  sizeof(struct sockaddr_in));
1806 	} else {
1807 		struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr);
1808 
1809 		if (id->afonly) {
1810 			rv = ip6_sock_set_v6only(s->sk);
1811 			if (rv) {
1812 				siw_dbg(id->device,
1813 					"ip6_sock_set_v6only erro: %d\n", rv);
1814 				goto error;
1815 			}
1816 		}
1817 
1818 		/* For wildcard addr, limit binding to current device only */
1819 		if (ipv6_addr_any(&laddr->sin6_addr))
1820 			s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
1821 
1822 		rv = s->ops->bind(s, (struct sockaddr *)laddr,
1823 				  sizeof(struct sockaddr_in6));
1824 	}
1825 	if (rv) {
1826 		siw_dbg(id->device, "socket bind error: %d\n", rv);
1827 		goto error;
1828 	}
1829 	cep = siw_cep_alloc(sdev);
1830 	if (!cep) {
1831 		rv = -ENOMEM;
1832 		goto error;
1833 	}
1834 	siw_cep_socket_assoc(cep, s);
1835 
1836 	rv = siw_cm_alloc_work(cep, backlog);
1837 	if (rv) {
1838 		siw_dbg(id->device,
1839 			"alloc_work error %d, backlog %d\n",
1840 			rv, backlog);
1841 		goto error;
1842 	}
1843 	rv = s->ops->listen(s, backlog);
1844 	if (rv) {
1845 		siw_dbg(id->device, "listen error %d\n", rv);
1846 		goto error;
1847 	}
1848 	cep->cm_id = id;
1849 	id->add_ref(id);
1850 
1851 	/*
1852 	 * In case of a wildcard rdma_listen on a multi-homed device,
1853 	 * a listener's IWCM id is associated with more than one listening CEP.
1854 	 *
1855 	 * We currently use id->provider_data in three different ways:
1856 	 *
1857 	 * o For a listener's IWCM id, id->provider_data points to
1858 	 *   the list_head of the list of listening CEPs.
1859 	 *   Uses: siw_create_listen(), siw_destroy_listen()
1860 	 *
1861 	 * o For each accepted passive-side IWCM id, id->provider_data
1862 	 *   points to the CEP itself. This is a consequence of
1863 	 *   - siw_cm_upcall() setting event.provider_data = cep and
1864 	 *   - the IWCM's cm_conn_req_handler() setting provider_data of the
1865 	 *     new passive-side IWCM id equal to event.provider_data
1866 	 *   Uses: siw_accept(), siw_reject()
1867 	 *
1868 	 * o For an active-side IWCM id, id->provider_data is not used at all.
1869 	 *
1870 	 */
1871 	if (!id->provider_data) {
1872 		id->provider_data =
1873 			kmalloc(sizeof(struct list_head), GFP_KERNEL);
1874 		if (!id->provider_data) {
1875 			rv = -ENOMEM;
1876 			goto error;
1877 		}
1878 		INIT_LIST_HEAD((struct list_head *)id->provider_data);
1879 	}
1880 	list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
1881 	cep->state = SIW_EPSTATE_LISTENING;
1882 
1883 	siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
1884 
1885 	return 0;
1886 
1887 error:
1888 	siw_dbg(id->device, "failed: %d\n", rv);
1889 
1890 	if (cep) {
1891 		siw_cep_set_inuse(cep);
1892 
1893 		if (cep->cm_id) {
1894 			cep->cm_id->rem_ref(cep->cm_id);
1895 			cep->cm_id = NULL;
1896 		}
1897 		cep->sock = NULL;
1898 		siw_socket_disassoc(s);
1899 		cep->state = SIW_EPSTATE_CLOSED;
1900 
1901 		siw_cep_set_free(cep);
1902 		siw_cep_put(cep);
1903 	}
1904 	sock_release(s);
1905 
1906 	return rv;
1907 }
1908 
siw_drop_listeners(struct iw_cm_id * id)1909 static void siw_drop_listeners(struct iw_cm_id *id)
1910 {
1911 	struct list_head *p, *tmp;
1912 
1913 	/*
1914 	 * In case of a wildcard rdma_listen on a multi-homed device,
1915 	 * a listener's IWCM id is associated with more than one listening CEP.
1916 	 */
1917 	list_for_each_safe(p, tmp, (struct list_head *)id->provider_data) {
1918 		struct siw_cep *cep = list_entry(p, struct siw_cep, listenq);
1919 
1920 		list_del(p);
1921 
1922 		siw_dbg_cep(cep, "drop cep, state %d\n", cep->state);
1923 
1924 		siw_cep_set_inuse(cep);
1925 
1926 		if (cep->cm_id) {
1927 			cep->cm_id->rem_ref(cep->cm_id);
1928 			cep->cm_id = NULL;
1929 		}
1930 		if (cep->sock) {
1931 			siw_socket_disassoc(cep->sock);
1932 			sock_release(cep->sock);
1933 			cep->sock = NULL;
1934 		}
1935 		cep->state = SIW_EPSTATE_CLOSED;
1936 		siw_cep_set_free(cep);
1937 		siw_cep_put(cep);
1938 	}
1939 }
1940 
siw_destroy_listen(struct iw_cm_id * id)1941 int siw_destroy_listen(struct iw_cm_id *id)
1942 {
1943 	if (!id->provider_data) {
1944 		siw_dbg(id->device, "no cep(s)\n");
1945 		return 0;
1946 	}
1947 	siw_drop_listeners(id);
1948 	kfree(id->provider_data);
1949 	id->provider_data = NULL;
1950 
1951 	return 0;
1952 }
1953 
siw_cm_init(void)1954 int siw_cm_init(void)
1955 {
1956 	/*
1957 	 * create_single_workqueue for strict ordering
1958 	 */
1959 	siw_cm_wq = create_singlethread_workqueue("siw_cm_wq");
1960 	if (!siw_cm_wq)
1961 		return -ENOMEM;
1962 
1963 	return 0;
1964 }
1965 
siw_cm_exit(void)1966 void siw_cm_exit(void)
1967 {
1968 	if (siw_cm_wq)
1969 		destroy_workqueue(siw_cm_wq);
1970 }
1971