1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
67 
bdaddr_type(u8 link_type,u8 bdaddr_type)68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 {
70 	if (link_type == LE_LINK) {
71 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 			return BDADDR_LE_PUBLIC;
73 		else
74 			return BDADDR_LE_RANDOM;
75 	}
76 
77 	return BDADDR_BREDR;
78 }
79 
bdaddr_src_type(struct hci_conn * hcon)80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 {
82 	return bdaddr_type(hcon->type, hcon->src_type);
83 }
84 
bdaddr_dst_type(struct hci_conn * hcon)85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 {
87 	return bdaddr_type(hcon->type, hcon->dst_type);
88 }
89 
90 /* ---- L2CAP channels ---- */
91 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
93 						   u16 cid)
94 {
95 	struct l2cap_chan *c;
96 
97 	list_for_each_entry(c, &conn->chan_l, list) {
98 		if (c->dcid == cid)
99 			return c;
100 	}
101 	return NULL;
102 }
103 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 						   u16 cid)
106 {
107 	struct l2cap_chan *c;
108 
109 	list_for_each_entry(c, &conn->chan_l, list) {
110 		if (c->scid == cid)
111 			return c;
112 	}
113 	return NULL;
114 }
115 
116 /* Find channel with given SCID.
117  * Returns a reference locked channel.
118  */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 						 u16 cid)
121 {
122 	struct l2cap_chan *c;
123 
124 	mutex_lock(&conn->chan_lock);
125 	c = __l2cap_get_chan_by_scid(conn, cid);
126 	if (c) {
127 		/* Only lock if chan reference is not 0 */
128 		c = l2cap_chan_hold_unless_zero(c);
129 		if (c)
130 			l2cap_chan_lock(c);
131 	}
132 	mutex_unlock(&conn->chan_lock);
133 
134 	return c;
135 }
136 
137 /* Find channel with given DCID.
138  * Returns a reference locked channel.
139  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
141 						 u16 cid)
142 {
143 	struct l2cap_chan *c;
144 
145 	mutex_lock(&conn->chan_lock);
146 	c = __l2cap_get_chan_by_dcid(conn, cid);
147 	if (c) {
148 		/* Only lock if chan reference is not 0 */
149 		c = l2cap_chan_hold_unless_zero(c);
150 		if (c)
151 			l2cap_chan_lock(c);
152 	}
153 	mutex_unlock(&conn->chan_lock);
154 
155 	return c;
156 }
157 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						    u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	list_for_each_entry(c, &conn->chan_l, list) {
164 		if (c->ident == ident)
165 			return c;
166 	}
167 	return NULL;
168 }
169 
l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
171 						  u8 ident)
172 {
173 	struct l2cap_chan *c;
174 
175 	mutex_lock(&conn->chan_lock);
176 	c = __l2cap_get_chan_by_ident(conn, ident);
177 	if (c) {
178 		/* Only lock if chan reference is not 0 */
179 		c = l2cap_chan_hold_unless_zero(c);
180 		if (c)
181 			l2cap_chan_lock(c);
182 	}
183 	mutex_unlock(&conn->chan_lock);
184 
185 	return c;
186 }
187 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
189 						      u8 src_type)
190 {
191 	struct l2cap_chan *c;
192 
193 	list_for_each_entry(c, &chan_list, global_l) {
194 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
195 			continue;
196 
197 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
198 			continue;
199 
200 		if (c->sport == psm && !bacmp(&c->src, src))
201 			return c;
202 	}
203 	return NULL;
204 }
205 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
207 {
208 	int err;
209 
210 	write_lock(&chan_list_lock);
211 
212 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
213 		err = -EADDRINUSE;
214 		goto done;
215 	}
216 
217 	if (psm) {
218 		chan->psm = psm;
219 		chan->sport = psm;
220 		err = 0;
221 	} else {
222 		u16 p, start, end, incr;
223 
224 		if (chan->src_type == BDADDR_BREDR) {
225 			start = L2CAP_PSM_DYN_START;
226 			end = L2CAP_PSM_AUTO_END;
227 			incr = 2;
228 		} else {
229 			start = L2CAP_PSM_LE_DYN_START;
230 			end = L2CAP_PSM_LE_DYN_END;
231 			incr = 1;
232 		}
233 
234 		err = -EINVAL;
235 		for (p = start; p <= end; p += incr)
236 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
237 							 chan->src_type)) {
238 				chan->psm   = cpu_to_le16(p);
239 				chan->sport = cpu_to_le16(p);
240 				err = 0;
241 				break;
242 			}
243 	}
244 
245 done:
246 	write_unlock(&chan_list_lock);
247 	return err;
248 }
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
250 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)251 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
252 {
253 	write_lock(&chan_list_lock);
254 
255 	/* Override the defaults (which are for conn-oriented) */
256 	chan->omtu = L2CAP_DEFAULT_MTU;
257 	chan->chan_type = L2CAP_CHAN_FIXED;
258 
259 	chan->scid = scid;
260 
261 	write_unlock(&chan_list_lock);
262 
263 	return 0;
264 }
265 
l2cap_alloc_cid(struct l2cap_conn * conn)266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
267 {
268 	u16 cid, dyn_end;
269 
270 	if (conn->hcon->type == LE_LINK)
271 		dyn_end = L2CAP_CID_LE_DYN_END;
272 	else
273 		dyn_end = L2CAP_CID_DYN_END;
274 
275 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 		if (!__l2cap_get_chan_by_scid(conn, cid))
277 			return cid;
278 	}
279 
280 	return 0;
281 }
282 
l2cap_state_change(struct l2cap_chan * chan,int state)283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
284 {
285 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 	       state_to_string(state));
287 
288 	chan->state = state;
289 	chan->ops->state_change(chan, state, 0);
290 }
291 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
293 						int state, int err)
294 {
295 	chan->state = state;
296 	chan->ops->state_change(chan, chan->state, err);
297 }
298 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
300 {
301 	chan->ops->state_change(chan, chan->state, err);
302 }
303 
__set_retrans_timer(struct l2cap_chan * chan)304 static void __set_retrans_timer(struct l2cap_chan *chan)
305 {
306 	if (!delayed_work_pending(&chan->monitor_timer) &&
307 	    chan->retrans_timeout) {
308 		l2cap_set_timer(chan, &chan->retrans_timer,
309 				msecs_to_jiffies(chan->retrans_timeout));
310 	}
311 }
312 
__set_monitor_timer(struct l2cap_chan * chan)313 static void __set_monitor_timer(struct l2cap_chan *chan)
314 {
315 	__clear_retrans_timer(chan);
316 	if (chan->monitor_timeout) {
317 		l2cap_set_timer(chan, &chan->monitor_timer,
318 				msecs_to_jiffies(chan->monitor_timeout));
319 	}
320 }
321 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
323 					       u16 seq)
324 {
325 	struct sk_buff *skb;
326 
327 	skb_queue_walk(head, skb) {
328 		if (bt_cb(skb)->l2cap.txseq == seq)
329 			return skb;
330 	}
331 
332 	return NULL;
333 }
334 
335 /* ---- L2CAP sequence number lists ---- */
336 
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338  * SREJ requests that are received and for frames that are to be
339  * retransmitted. These seq_list functions implement a singly-linked
340  * list in an array, where membership in the list can also be checked
341  * in constant time. Items can also be added to the tail of the list
342  * and removed from the head in constant time, without further memory
343  * allocs or frees.
344  */
345 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
347 {
348 	size_t alloc_size, i;
349 
350 	/* Allocated size is a power of 2 to map sequence numbers
351 	 * (which may be up to 14 bits) in to a smaller array that is
352 	 * sized for the negotiated ERTM transmit windows.
353 	 */
354 	alloc_size = roundup_pow_of_two(size);
355 
356 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
357 	if (!seq_list->list)
358 		return -ENOMEM;
359 
360 	seq_list->mask = alloc_size - 1;
361 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 	for (i = 0; i < alloc_size; i++)
364 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
365 
366 	return 0;
367 }
368 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
370 {
371 	kfree(seq_list->list);
372 }
373 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
375 					   u16 seq)
376 {
377 	/* Constant-time check for list membership */
378 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
379 }
380 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
382 {
383 	u16 seq = seq_list->head;
384 	u16 mask = seq_list->mask;
385 
386 	seq_list->head = seq_list->list[seq & mask];
387 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
388 
389 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
392 	}
393 
394 	return seq;
395 }
396 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
398 {
399 	u16 i;
400 
401 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	for (i = 0; i <= seq_list->mask; i++)
405 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
406 
407 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
409 }
410 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
412 {
413 	u16 mask = seq_list->mask;
414 
415 	/* All appends happen in constant time */
416 
417 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
418 		return;
419 
420 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 		seq_list->head = seq;
422 	else
423 		seq_list->list[seq_list->tail & mask] = seq;
424 
425 	seq_list->tail = seq;
426 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
427 }
428 
l2cap_chan_timeout(struct work_struct * work)429 static void l2cap_chan_timeout(struct work_struct *work)
430 {
431 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
432 					       chan_timer.work);
433 	struct l2cap_conn *conn = chan->conn;
434 	int reason;
435 
436 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
437 
438 	mutex_lock(&conn->chan_lock);
439 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
440 	 * this work. No need to call l2cap_chan_hold(chan) here again.
441 	 */
442 	l2cap_chan_lock(chan);
443 
444 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
445 		reason = ECONNREFUSED;
446 	else if (chan->state == BT_CONNECT &&
447 		 chan->sec_level != BT_SECURITY_SDP)
448 		reason = ECONNREFUSED;
449 	else
450 		reason = ETIMEDOUT;
451 
452 	l2cap_chan_close(chan, reason);
453 
454 	chan->ops->close(chan);
455 
456 	l2cap_chan_unlock(chan);
457 	l2cap_chan_put(chan);
458 
459 	mutex_unlock(&conn->chan_lock);
460 }
461 
l2cap_chan_create(void)462 struct l2cap_chan *l2cap_chan_create(void)
463 {
464 	struct l2cap_chan *chan;
465 
466 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
467 	if (!chan)
468 		return NULL;
469 
470 	skb_queue_head_init(&chan->tx_q);
471 	skb_queue_head_init(&chan->srej_q);
472 	mutex_init(&chan->lock);
473 
474 	/* Set default lock nesting level */
475 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
476 
477 	write_lock(&chan_list_lock);
478 	list_add(&chan->global_l, &chan_list);
479 	write_unlock(&chan_list_lock);
480 
481 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
482 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
483 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
484 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
485 
486 	chan->state = BT_OPEN;
487 
488 	kref_init(&chan->kref);
489 
490 	/* This flag is cleared in l2cap_chan_ready() */
491 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
492 
493 	BT_DBG("chan %p", chan);
494 
495 	return chan;
496 }
497 EXPORT_SYMBOL_GPL(l2cap_chan_create);
498 
l2cap_chan_destroy(struct kref * kref)499 static void l2cap_chan_destroy(struct kref *kref)
500 {
501 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
502 
503 	BT_DBG("chan %p", chan);
504 
505 	write_lock(&chan_list_lock);
506 	list_del(&chan->global_l);
507 	write_unlock(&chan_list_lock);
508 
509 	kfree(chan);
510 }
511 
l2cap_chan_hold(struct l2cap_chan * c)512 void l2cap_chan_hold(struct l2cap_chan *c)
513 {
514 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
515 
516 	kref_get(&c->kref);
517 }
518 
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)519 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
520 {
521 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
522 
523 	if (!kref_get_unless_zero(&c->kref))
524 		return NULL;
525 
526 	return c;
527 }
528 
l2cap_chan_put(struct l2cap_chan * c)529 void l2cap_chan_put(struct l2cap_chan *c)
530 {
531 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
532 
533 	kref_put(&c->kref, l2cap_chan_destroy);
534 }
535 EXPORT_SYMBOL_GPL(l2cap_chan_put);
536 
l2cap_chan_set_defaults(struct l2cap_chan * chan)537 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
538 {
539 	chan->fcs  = L2CAP_FCS_CRC16;
540 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
541 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
542 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
543 	chan->remote_max_tx = chan->max_tx;
544 	chan->remote_tx_win = chan->tx_win;
545 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
546 	chan->sec_level = BT_SECURITY_LOW;
547 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
548 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
549 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
550 
551 	chan->conf_state = 0;
552 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
553 
554 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
555 }
556 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
557 
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)558 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
559 {
560 	chan->sdu = NULL;
561 	chan->sdu_last_frag = NULL;
562 	chan->sdu_len = 0;
563 	chan->tx_credits = tx_credits;
564 	/* Derive MPS from connection MTU to stop HCI fragmentation */
565 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
566 	/* Give enough credits for a full packet */
567 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
568 
569 	skb_queue_head_init(&chan->tx_q);
570 }
571 
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)572 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
573 {
574 	l2cap_le_flowctl_init(chan, tx_credits);
575 
576 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
577 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
578 		chan->mps = L2CAP_ECRED_MIN_MPS;
579 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
580 	}
581 }
582 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)583 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
584 {
585 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
586 	       __le16_to_cpu(chan->psm), chan->dcid);
587 
588 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
589 
590 	chan->conn = conn;
591 
592 	switch (chan->chan_type) {
593 	case L2CAP_CHAN_CONN_ORIENTED:
594 		/* Alloc CID for connection-oriented socket */
595 		chan->scid = l2cap_alloc_cid(conn);
596 		if (conn->hcon->type == ACL_LINK)
597 			chan->omtu = L2CAP_DEFAULT_MTU;
598 		break;
599 
600 	case L2CAP_CHAN_CONN_LESS:
601 		/* Connectionless socket */
602 		chan->scid = L2CAP_CID_CONN_LESS;
603 		chan->dcid = L2CAP_CID_CONN_LESS;
604 		chan->omtu = L2CAP_DEFAULT_MTU;
605 		break;
606 
607 	case L2CAP_CHAN_FIXED:
608 		/* Caller will set CID and CID specific MTU values */
609 		break;
610 
611 	default:
612 		/* Raw socket can send/recv signalling messages only */
613 		chan->scid = L2CAP_CID_SIGNALING;
614 		chan->dcid = L2CAP_CID_SIGNALING;
615 		chan->omtu = L2CAP_DEFAULT_MTU;
616 	}
617 
618 	chan->local_id		= L2CAP_BESTEFFORT_ID;
619 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
620 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
621 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
622 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
623 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
624 
625 	l2cap_chan_hold(chan);
626 
627 	/* Only keep a reference for fixed channels if they requested it */
628 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
629 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
630 		hci_conn_hold(conn->hcon);
631 
632 	list_add(&chan->list, &conn->chan_l);
633 }
634 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)635 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
636 {
637 	mutex_lock(&conn->chan_lock);
638 	__l2cap_chan_add(conn, chan);
639 	mutex_unlock(&conn->chan_lock);
640 }
641 
l2cap_chan_del(struct l2cap_chan * chan,int err)642 void l2cap_chan_del(struct l2cap_chan *chan, int err)
643 {
644 	struct l2cap_conn *conn = chan->conn;
645 
646 	__clear_chan_timer(chan);
647 
648 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
649 	       state_to_string(chan->state));
650 
651 	chan->ops->teardown(chan, err);
652 
653 	if (conn) {
654 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
655 		/* Delete from channel list */
656 		list_del(&chan->list);
657 
658 		l2cap_chan_put(chan);
659 
660 		chan->conn = NULL;
661 
662 		/* Reference was only held for non-fixed channels or
663 		 * fixed channels that explicitly requested it using the
664 		 * FLAG_HOLD_HCI_CONN flag.
665 		 */
666 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
667 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
668 			hci_conn_drop(conn->hcon);
669 
670 		if (mgr && mgr->bredr_chan == chan)
671 			mgr->bredr_chan = NULL;
672 	}
673 
674 	if (chan->hs_hchan) {
675 		struct hci_chan *hs_hchan = chan->hs_hchan;
676 
677 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
678 		amp_disconnect_logical_link(hs_hchan);
679 	}
680 
681 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
682 		return;
683 
684 	switch (chan->mode) {
685 	case L2CAP_MODE_BASIC:
686 		break;
687 
688 	case L2CAP_MODE_LE_FLOWCTL:
689 	case L2CAP_MODE_EXT_FLOWCTL:
690 		skb_queue_purge(&chan->tx_q);
691 		break;
692 
693 	case L2CAP_MODE_ERTM:
694 		__clear_retrans_timer(chan);
695 		__clear_monitor_timer(chan);
696 		__clear_ack_timer(chan);
697 
698 		skb_queue_purge(&chan->srej_q);
699 
700 		l2cap_seq_list_free(&chan->srej_list);
701 		l2cap_seq_list_free(&chan->retrans_list);
702 		fallthrough;
703 
704 	case L2CAP_MODE_STREAMING:
705 		skb_queue_purge(&chan->tx_q);
706 		break;
707 	}
708 }
709 EXPORT_SYMBOL_GPL(l2cap_chan_del);
710 
__l2cap_chan_list_id(struct l2cap_conn * conn,u16 id,l2cap_chan_func_t func,void * data)711 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
712 				 l2cap_chan_func_t func, void *data)
713 {
714 	struct l2cap_chan *chan, *l;
715 
716 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
717 		if (chan->ident == id)
718 			func(chan, data);
719 	}
720 }
721 
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)722 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
723 			      void *data)
724 {
725 	struct l2cap_chan *chan;
726 
727 	list_for_each_entry(chan, &conn->chan_l, list) {
728 		func(chan, data);
729 	}
730 }
731 
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)732 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
733 		     void *data)
734 {
735 	if (!conn)
736 		return;
737 
738 	mutex_lock(&conn->chan_lock);
739 	__l2cap_chan_list(conn, func, data);
740 	mutex_unlock(&conn->chan_lock);
741 }
742 
743 EXPORT_SYMBOL_GPL(l2cap_chan_list);
744 
l2cap_conn_update_id_addr(struct work_struct * work)745 static void l2cap_conn_update_id_addr(struct work_struct *work)
746 {
747 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
748 					       id_addr_timer.work);
749 	struct hci_conn *hcon = conn->hcon;
750 	struct l2cap_chan *chan;
751 
752 	mutex_lock(&conn->chan_lock);
753 
754 	list_for_each_entry(chan, &conn->chan_l, list) {
755 		l2cap_chan_lock(chan);
756 		bacpy(&chan->dst, &hcon->dst);
757 		chan->dst_type = bdaddr_dst_type(hcon);
758 		l2cap_chan_unlock(chan);
759 	}
760 
761 	mutex_unlock(&conn->chan_lock);
762 }
763 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)764 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
765 {
766 	struct l2cap_conn *conn = chan->conn;
767 	struct l2cap_le_conn_rsp rsp;
768 	u16 result;
769 
770 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
771 		result = L2CAP_CR_LE_AUTHORIZATION;
772 	else
773 		result = L2CAP_CR_LE_BAD_PSM;
774 
775 	l2cap_state_change(chan, BT_DISCONN);
776 
777 	rsp.dcid    = cpu_to_le16(chan->scid);
778 	rsp.mtu     = cpu_to_le16(chan->imtu);
779 	rsp.mps     = cpu_to_le16(chan->mps);
780 	rsp.credits = cpu_to_le16(chan->rx_credits);
781 	rsp.result  = cpu_to_le16(result);
782 
783 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
784 		       &rsp);
785 }
786 
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)787 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
788 {
789 	l2cap_state_change(chan, BT_DISCONN);
790 
791 	__l2cap_ecred_conn_rsp_defer(chan);
792 }
793 
l2cap_chan_connect_reject(struct l2cap_chan * chan)794 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
795 {
796 	struct l2cap_conn *conn = chan->conn;
797 	struct l2cap_conn_rsp rsp;
798 	u16 result;
799 
800 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
801 		result = L2CAP_CR_SEC_BLOCK;
802 	else
803 		result = L2CAP_CR_BAD_PSM;
804 
805 	l2cap_state_change(chan, BT_DISCONN);
806 
807 	rsp.scid   = cpu_to_le16(chan->dcid);
808 	rsp.dcid   = cpu_to_le16(chan->scid);
809 	rsp.result = cpu_to_le16(result);
810 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
811 
812 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
813 }
814 
l2cap_chan_close(struct l2cap_chan * chan,int reason)815 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
816 {
817 	struct l2cap_conn *conn = chan->conn;
818 
819 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
820 
821 	switch (chan->state) {
822 	case BT_LISTEN:
823 		chan->ops->teardown(chan, 0);
824 		break;
825 
826 	case BT_CONNECTED:
827 	case BT_CONFIG:
828 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
829 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
830 			l2cap_send_disconn_req(chan, reason);
831 		} else
832 			l2cap_chan_del(chan, reason);
833 		break;
834 
835 	case BT_CONNECT2:
836 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
837 			if (conn->hcon->type == ACL_LINK)
838 				l2cap_chan_connect_reject(chan);
839 			else if (conn->hcon->type == LE_LINK) {
840 				switch (chan->mode) {
841 				case L2CAP_MODE_LE_FLOWCTL:
842 					l2cap_chan_le_connect_reject(chan);
843 					break;
844 				case L2CAP_MODE_EXT_FLOWCTL:
845 					l2cap_chan_ecred_connect_reject(chan);
846 					return;
847 				}
848 			}
849 		}
850 
851 		l2cap_chan_del(chan, reason);
852 		break;
853 
854 	case BT_CONNECT:
855 	case BT_DISCONN:
856 		l2cap_chan_del(chan, reason);
857 		break;
858 
859 	default:
860 		chan->ops->teardown(chan, 0);
861 		break;
862 	}
863 }
864 EXPORT_SYMBOL(l2cap_chan_close);
865 
l2cap_get_auth_type(struct l2cap_chan * chan)866 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
867 {
868 	switch (chan->chan_type) {
869 	case L2CAP_CHAN_RAW:
870 		switch (chan->sec_level) {
871 		case BT_SECURITY_HIGH:
872 		case BT_SECURITY_FIPS:
873 			return HCI_AT_DEDICATED_BONDING_MITM;
874 		case BT_SECURITY_MEDIUM:
875 			return HCI_AT_DEDICATED_BONDING;
876 		default:
877 			return HCI_AT_NO_BONDING;
878 		}
879 		break;
880 	case L2CAP_CHAN_CONN_LESS:
881 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
882 			if (chan->sec_level == BT_SECURITY_LOW)
883 				chan->sec_level = BT_SECURITY_SDP;
884 		}
885 		if (chan->sec_level == BT_SECURITY_HIGH ||
886 		    chan->sec_level == BT_SECURITY_FIPS)
887 			return HCI_AT_NO_BONDING_MITM;
888 		else
889 			return HCI_AT_NO_BONDING;
890 		break;
891 	case L2CAP_CHAN_CONN_ORIENTED:
892 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
893 			if (chan->sec_level == BT_SECURITY_LOW)
894 				chan->sec_level = BT_SECURITY_SDP;
895 
896 			if (chan->sec_level == BT_SECURITY_HIGH ||
897 			    chan->sec_level == BT_SECURITY_FIPS)
898 				return HCI_AT_NO_BONDING_MITM;
899 			else
900 				return HCI_AT_NO_BONDING;
901 		}
902 		fallthrough;
903 
904 	default:
905 		switch (chan->sec_level) {
906 		case BT_SECURITY_HIGH:
907 		case BT_SECURITY_FIPS:
908 			return HCI_AT_GENERAL_BONDING_MITM;
909 		case BT_SECURITY_MEDIUM:
910 			return HCI_AT_GENERAL_BONDING;
911 		default:
912 			return HCI_AT_NO_BONDING;
913 		}
914 		break;
915 	}
916 }
917 
918 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)919 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
920 {
921 	struct l2cap_conn *conn = chan->conn;
922 	__u8 auth_type;
923 
924 	if (conn->hcon->type == LE_LINK)
925 		return smp_conn_security(conn->hcon, chan->sec_level);
926 
927 	auth_type = l2cap_get_auth_type(chan);
928 
929 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
930 				 initiator);
931 }
932 
l2cap_get_ident(struct l2cap_conn * conn)933 static u8 l2cap_get_ident(struct l2cap_conn *conn)
934 {
935 	u8 id;
936 
937 	/* Get next available identificator.
938 	 *    1 - 128 are used by kernel.
939 	 *  129 - 199 are reserved.
940 	 *  200 - 254 are used by utilities like l2ping, etc.
941 	 */
942 
943 	mutex_lock(&conn->ident_lock);
944 
945 	if (++conn->tx_ident > 128)
946 		conn->tx_ident = 1;
947 
948 	id = conn->tx_ident;
949 
950 	mutex_unlock(&conn->ident_lock);
951 
952 	return id;
953 }
954 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)955 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
956 			   void *data)
957 {
958 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
959 	u8 flags;
960 
961 	BT_DBG("code 0x%2.2x", code);
962 
963 	if (!skb)
964 		return;
965 
966 	/* Use NO_FLUSH if supported or we have an LE link (which does
967 	 * not support auto-flushing packets) */
968 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
969 	    conn->hcon->type == LE_LINK)
970 		flags = ACL_START_NO_FLUSH;
971 	else
972 		flags = ACL_START;
973 
974 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
975 	skb->priority = HCI_PRIO_MAX;
976 
977 	hci_send_acl(conn->hchan, skb, flags);
978 }
979 
__chan_is_moving(struct l2cap_chan * chan)980 static bool __chan_is_moving(struct l2cap_chan *chan)
981 {
982 	return chan->move_state != L2CAP_MOVE_STABLE &&
983 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
984 }
985 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)986 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
987 {
988 	struct hci_conn *hcon = chan->conn->hcon;
989 	u16 flags;
990 
991 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
992 	       skb->priority);
993 
994 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
995 		if (chan->hs_hchan)
996 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
997 		else
998 			kfree_skb(skb);
999 
1000 		return;
1001 	}
1002 
1003 	/* Use NO_FLUSH for LE links (where this is the only option) or
1004 	 * if the BR/EDR link supports it and flushing has not been
1005 	 * explicitly requested (through FLAG_FLUSHABLE).
1006 	 */
1007 	if (hcon->type == LE_LINK ||
1008 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1009 	     lmp_no_flush_capable(hcon->hdev)))
1010 		flags = ACL_START_NO_FLUSH;
1011 	else
1012 		flags = ACL_START;
1013 
1014 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1015 	hci_send_acl(chan->conn->hchan, skb, flags);
1016 }
1017 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1018 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1019 {
1020 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1021 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1022 
1023 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1024 		/* S-Frame */
1025 		control->sframe = 1;
1026 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1027 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1028 
1029 		control->sar = 0;
1030 		control->txseq = 0;
1031 	} else {
1032 		/* I-Frame */
1033 		control->sframe = 0;
1034 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1035 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1036 
1037 		control->poll = 0;
1038 		control->super = 0;
1039 	}
1040 }
1041 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1042 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1043 {
1044 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1045 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1046 
1047 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1048 		/* S-Frame */
1049 		control->sframe = 1;
1050 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1051 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1052 
1053 		control->sar = 0;
1054 		control->txseq = 0;
1055 	} else {
1056 		/* I-Frame */
1057 		control->sframe = 0;
1058 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1059 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1060 
1061 		control->poll = 0;
1062 		control->super = 0;
1063 	}
1064 }
1065 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1066 static inline void __unpack_control(struct l2cap_chan *chan,
1067 				    struct sk_buff *skb)
1068 {
1069 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1070 		__unpack_extended_control(get_unaligned_le32(skb->data),
1071 					  &bt_cb(skb)->l2cap);
1072 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1073 	} else {
1074 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1075 					  &bt_cb(skb)->l2cap);
1076 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1077 	}
1078 }
1079 
__pack_extended_control(struct l2cap_ctrl * control)1080 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1081 {
1082 	u32 packed;
1083 
1084 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1085 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1086 
1087 	if (control->sframe) {
1088 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1089 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1090 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1091 	} else {
1092 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1093 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1094 	}
1095 
1096 	return packed;
1097 }
1098 
__pack_enhanced_control(struct l2cap_ctrl * control)1099 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1100 {
1101 	u16 packed;
1102 
1103 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1104 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1105 
1106 	if (control->sframe) {
1107 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1108 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1109 		packed |= L2CAP_CTRL_FRAME_TYPE;
1110 	} else {
1111 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1112 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1113 	}
1114 
1115 	return packed;
1116 }
1117 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1118 static inline void __pack_control(struct l2cap_chan *chan,
1119 				  struct l2cap_ctrl *control,
1120 				  struct sk_buff *skb)
1121 {
1122 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1123 		put_unaligned_le32(__pack_extended_control(control),
1124 				   skb->data + L2CAP_HDR_SIZE);
1125 	} else {
1126 		put_unaligned_le16(__pack_enhanced_control(control),
1127 				   skb->data + L2CAP_HDR_SIZE);
1128 	}
1129 }
1130 
__ertm_hdr_size(struct l2cap_chan * chan)1131 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1132 {
1133 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1134 		return L2CAP_EXT_HDR_SIZE;
1135 	else
1136 		return L2CAP_ENH_HDR_SIZE;
1137 }
1138 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1139 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1140 					       u32 control)
1141 {
1142 	struct sk_buff *skb;
1143 	struct l2cap_hdr *lh;
1144 	int hlen = __ertm_hdr_size(chan);
1145 
1146 	if (chan->fcs == L2CAP_FCS_CRC16)
1147 		hlen += L2CAP_FCS_SIZE;
1148 
1149 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1150 
1151 	if (!skb)
1152 		return ERR_PTR(-ENOMEM);
1153 
1154 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1155 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1156 	lh->cid = cpu_to_le16(chan->dcid);
1157 
1158 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1159 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1160 	else
1161 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1162 
1163 	if (chan->fcs == L2CAP_FCS_CRC16) {
1164 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1165 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1166 	}
1167 
1168 	skb->priority = HCI_PRIO_MAX;
1169 	return skb;
1170 }
1171 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1172 static void l2cap_send_sframe(struct l2cap_chan *chan,
1173 			      struct l2cap_ctrl *control)
1174 {
1175 	struct sk_buff *skb;
1176 	u32 control_field;
1177 
1178 	BT_DBG("chan %p, control %p", chan, control);
1179 
1180 	if (!control->sframe)
1181 		return;
1182 
1183 	if (__chan_is_moving(chan))
1184 		return;
1185 
1186 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1187 	    !control->poll)
1188 		control->final = 1;
1189 
1190 	if (control->super == L2CAP_SUPER_RR)
1191 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1192 	else if (control->super == L2CAP_SUPER_RNR)
1193 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1194 
1195 	if (control->super != L2CAP_SUPER_SREJ) {
1196 		chan->last_acked_seq = control->reqseq;
1197 		__clear_ack_timer(chan);
1198 	}
1199 
1200 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1201 	       control->final, control->poll, control->super);
1202 
1203 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1204 		control_field = __pack_extended_control(control);
1205 	else
1206 		control_field = __pack_enhanced_control(control);
1207 
1208 	skb = l2cap_create_sframe_pdu(chan, control_field);
1209 	if (!IS_ERR(skb))
1210 		l2cap_do_send(chan, skb);
1211 }
1212 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1213 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1214 {
1215 	struct l2cap_ctrl control;
1216 
1217 	BT_DBG("chan %p, poll %d", chan, poll);
1218 
1219 	memset(&control, 0, sizeof(control));
1220 	control.sframe = 1;
1221 	control.poll = poll;
1222 
1223 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1224 		control.super = L2CAP_SUPER_RNR;
1225 	else
1226 		control.super = L2CAP_SUPER_RR;
1227 
1228 	control.reqseq = chan->buffer_seq;
1229 	l2cap_send_sframe(chan, &control);
1230 }
1231 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1232 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1233 {
1234 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1235 		return true;
1236 
1237 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1238 }
1239 
__amp_capable(struct l2cap_chan * chan)1240 static bool __amp_capable(struct l2cap_chan *chan)
1241 {
1242 	struct l2cap_conn *conn = chan->conn;
1243 	struct hci_dev *hdev;
1244 	bool amp_available = false;
1245 
1246 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1247 		return false;
1248 
1249 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1250 		return false;
1251 
1252 	read_lock(&hci_dev_list_lock);
1253 	list_for_each_entry(hdev, &hci_dev_list, list) {
1254 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1255 		    test_bit(HCI_UP, &hdev->flags)) {
1256 			amp_available = true;
1257 			break;
1258 		}
1259 	}
1260 	read_unlock(&hci_dev_list_lock);
1261 
1262 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1263 		return amp_available;
1264 
1265 	return false;
1266 }
1267 
l2cap_check_efs(struct l2cap_chan * chan)1268 static bool l2cap_check_efs(struct l2cap_chan *chan)
1269 {
1270 	/* Check EFS parameters */
1271 	return true;
1272 }
1273 
l2cap_send_conn_req(struct l2cap_chan * chan)1274 void l2cap_send_conn_req(struct l2cap_chan *chan)
1275 {
1276 	struct l2cap_conn *conn = chan->conn;
1277 	struct l2cap_conn_req req;
1278 
1279 	req.scid = cpu_to_le16(chan->scid);
1280 	req.psm  = chan->psm;
1281 
1282 	chan->ident = l2cap_get_ident(conn);
1283 
1284 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1285 
1286 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1287 }
1288 
l2cap_send_create_chan_req(struct l2cap_chan * chan,u8 amp_id)1289 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1290 {
1291 	struct l2cap_create_chan_req req;
1292 	req.scid = cpu_to_le16(chan->scid);
1293 	req.psm  = chan->psm;
1294 	req.amp_id = amp_id;
1295 
1296 	chan->ident = l2cap_get_ident(chan->conn);
1297 
1298 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1299 		       sizeof(req), &req);
1300 }
1301 
l2cap_move_setup(struct l2cap_chan * chan)1302 static void l2cap_move_setup(struct l2cap_chan *chan)
1303 {
1304 	struct sk_buff *skb;
1305 
1306 	BT_DBG("chan %p", chan);
1307 
1308 	if (chan->mode != L2CAP_MODE_ERTM)
1309 		return;
1310 
1311 	__clear_retrans_timer(chan);
1312 	__clear_monitor_timer(chan);
1313 	__clear_ack_timer(chan);
1314 
1315 	chan->retry_count = 0;
1316 	skb_queue_walk(&chan->tx_q, skb) {
1317 		if (bt_cb(skb)->l2cap.retries)
1318 			bt_cb(skb)->l2cap.retries = 1;
1319 		else
1320 			break;
1321 	}
1322 
1323 	chan->expected_tx_seq = chan->buffer_seq;
1324 
1325 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1326 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1327 	l2cap_seq_list_clear(&chan->retrans_list);
1328 	l2cap_seq_list_clear(&chan->srej_list);
1329 	skb_queue_purge(&chan->srej_q);
1330 
1331 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1332 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1333 
1334 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1335 }
1336 
l2cap_move_done(struct l2cap_chan * chan)1337 static void l2cap_move_done(struct l2cap_chan *chan)
1338 {
1339 	u8 move_role = chan->move_role;
1340 	BT_DBG("chan %p", chan);
1341 
1342 	chan->move_state = L2CAP_MOVE_STABLE;
1343 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1344 
1345 	if (chan->mode != L2CAP_MODE_ERTM)
1346 		return;
1347 
1348 	switch (move_role) {
1349 	case L2CAP_MOVE_ROLE_INITIATOR:
1350 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1351 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1352 		break;
1353 	case L2CAP_MOVE_ROLE_RESPONDER:
1354 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1355 		break;
1356 	}
1357 }
1358 
l2cap_chan_ready(struct l2cap_chan * chan)1359 static void l2cap_chan_ready(struct l2cap_chan *chan)
1360 {
1361 	/* The channel may have already been flagged as connected in
1362 	 * case of receiving data before the L2CAP info req/rsp
1363 	 * procedure is complete.
1364 	 */
1365 	if (chan->state == BT_CONNECTED)
1366 		return;
1367 
1368 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1369 	chan->conf_state = 0;
1370 	__clear_chan_timer(chan);
1371 
1372 	switch (chan->mode) {
1373 	case L2CAP_MODE_LE_FLOWCTL:
1374 	case L2CAP_MODE_EXT_FLOWCTL:
1375 		if (!chan->tx_credits)
1376 			chan->ops->suspend(chan);
1377 		break;
1378 	}
1379 
1380 	chan->state = BT_CONNECTED;
1381 
1382 	chan->ops->ready(chan);
1383 }
1384 
l2cap_le_connect(struct l2cap_chan * chan)1385 static void l2cap_le_connect(struct l2cap_chan *chan)
1386 {
1387 	struct l2cap_conn *conn = chan->conn;
1388 	struct l2cap_le_conn_req req;
1389 
1390 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1391 		return;
1392 
1393 	if (!chan->imtu)
1394 		chan->imtu = chan->conn->mtu;
1395 
1396 	l2cap_le_flowctl_init(chan, 0);
1397 
1398 	memset(&req, 0, sizeof(req));
1399 	req.psm     = chan->psm;
1400 	req.scid    = cpu_to_le16(chan->scid);
1401 	req.mtu     = cpu_to_le16(chan->imtu);
1402 	req.mps     = cpu_to_le16(chan->mps);
1403 	req.credits = cpu_to_le16(chan->rx_credits);
1404 
1405 	chan->ident = l2cap_get_ident(conn);
1406 
1407 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1408 		       sizeof(req), &req);
1409 }
1410 
1411 struct l2cap_ecred_conn_data {
1412 	struct {
1413 		struct l2cap_ecred_conn_req req;
1414 		__le16 scid[5];
1415 	} __packed pdu;
1416 	struct l2cap_chan *chan;
1417 	struct pid *pid;
1418 	int count;
1419 };
1420 
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1421 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1422 {
1423 	struct l2cap_ecred_conn_data *conn = data;
1424 	struct pid *pid;
1425 
1426 	if (chan == conn->chan)
1427 		return;
1428 
1429 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1430 		return;
1431 
1432 	pid = chan->ops->get_peer_pid(chan);
1433 
1434 	/* Only add deferred channels with the same PID/PSM */
1435 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1436 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1437 		return;
1438 
1439 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1440 		return;
1441 
1442 	l2cap_ecred_init(chan, 0);
1443 
1444 	/* Set the same ident so we can match on the rsp */
1445 	chan->ident = conn->chan->ident;
1446 
1447 	/* Include all channels deferred */
1448 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1449 
1450 	conn->count++;
1451 }
1452 
l2cap_ecred_connect(struct l2cap_chan * chan)1453 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1454 {
1455 	struct l2cap_conn *conn = chan->conn;
1456 	struct l2cap_ecred_conn_data data;
1457 
1458 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1459 		return;
1460 
1461 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1462 		return;
1463 
1464 	l2cap_ecred_init(chan, 0);
1465 
1466 	memset(&data, 0, sizeof(data));
1467 	data.pdu.req.psm     = chan->psm;
1468 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1469 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1470 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1471 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1472 
1473 	chan->ident = l2cap_get_ident(conn);
1474 
1475 	data.count = 1;
1476 	data.chan = chan;
1477 	data.pid = chan->ops->get_peer_pid(chan);
1478 
1479 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1480 
1481 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1482 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1483 		       &data.pdu);
1484 }
1485 
l2cap_le_start(struct l2cap_chan * chan)1486 static void l2cap_le_start(struct l2cap_chan *chan)
1487 {
1488 	struct l2cap_conn *conn = chan->conn;
1489 
1490 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1491 		return;
1492 
1493 	if (!chan->psm) {
1494 		l2cap_chan_ready(chan);
1495 		return;
1496 	}
1497 
1498 	if (chan->state == BT_CONNECT) {
1499 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1500 			l2cap_ecred_connect(chan);
1501 		else
1502 			l2cap_le_connect(chan);
1503 	}
1504 }
1505 
l2cap_start_connection(struct l2cap_chan * chan)1506 static void l2cap_start_connection(struct l2cap_chan *chan)
1507 {
1508 	if (__amp_capable(chan)) {
1509 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1510 		a2mp_discover_amp(chan);
1511 	} else if (chan->conn->hcon->type == LE_LINK) {
1512 		l2cap_le_start(chan);
1513 	} else {
1514 		l2cap_send_conn_req(chan);
1515 	}
1516 }
1517 
l2cap_request_info(struct l2cap_conn * conn)1518 static void l2cap_request_info(struct l2cap_conn *conn)
1519 {
1520 	struct l2cap_info_req req;
1521 
1522 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1523 		return;
1524 
1525 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1526 
1527 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1528 	conn->info_ident = l2cap_get_ident(conn);
1529 
1530 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1531 
1532 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1533 		       sizeof(req), &req);
1534 }
1535 
l2cap_check_enc_key_size(struct hci_conn * hcon)1536 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1537 {
1538 	/* The minimum encryption key size needs to be enforced by the
1539 	 * host stack before establishing any L2CAP connections. The
1540 	 * specification in theory allows a minimum of 1, but to align
1541 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1542 	 *
1543 	 * This check might also be called for unencrypted connections
1544 	 * that have no key size requirements. Ensure that the link is
1545 	 * actually encrypted before enforcing a key size.
1546 	 */
1547 	int min_key_size = hcon->hdev->min_enc_key_size;
1548 
1549 	/* On FIPS security level, key size must be 16 bytes */
1550 	if (hcon->sec_level == BT_SECURITY_FIPS)
1551 		min_key_size = 16;
1552 
1553 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1554 		hcon->enc_key_size >= min_key_size);
1555 }
1556 
l2cap_do_start(struct l2cap_chan * chan)1557 static void l2cap_do_start(struct l2cap_chan *chan)
1558 {
1559 	struct l2cap_conn *conn = chan->conn;
1560 
1561 	if (conn->hcon->type == LE_LINK) {
1562 		l2cap_le_start(chan);
1563 		return;
1564 	}
1565 
1566 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1567 		l2cap_request_info(conn);
1568 		return;
1569 	}
1570 
1571 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1572 		return;
1573 
1574 	if (!l2cap_chan_check_security(chan, true) ||
1575 	    !__l2cap_no_conn_pending(chan))
1576 		return;
1577 
1578 	if (l2cap_check_enc_key_size(conn->hcon))
1579 		l2cap_start_connection(chan);
1580 	else
1581 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1582 }
1583 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1584 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1585 {
1586 	u32 local_feat_mask = l2cap_feat_mask;
1587 	if (!disable_ertm)
1588 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1589 
1590 	switch (mode) {
1591 	case L2CAP_MODE_ERTM:
1592 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1593 	case L2CAP_MODE_STREAMING:
1594 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1595 	default:
1596 		return 0x00;
1597 	}
1598 }
1599 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1600 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1601 {
1602 	struct l2cap_conn *conn = chan->conn;
1603 	struct l2cap_disconn_req req;
1604 
1605 	if (!conn)
1606 		return;
1607 
1608 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1609 		__clear_retrans_timer(chan);
1610 		__clear_monitor_timer(chan);
1611 		__clear_ack_timer(chan);
1612 	}
1613 
1614 	if (chan->scid == L2CAP_CID_A2MP) {
1615 		l2cap_state_change(chan, BT_DISCONN);
1616 		return;
1617 	}
1618 
1619 	req.dcid = cpu_to_le16(chan->dcid);
1620 	req.scid = cpu_to_le16(chan->scid);
1621 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1622 		       sizeof(req), &req);
1623 
1624 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1625 }
1626 
1627 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1628 static void l2cap_conn_start(struct l2cap_conn *conn)
1629 {
1630 	struct l2cap_chan *chan, *tmp;
1631 
1632 	BT_DBG("conn %p", conn);
1633 
1634 	mutex_lock(&conn->chan_lock);
1635 
1636 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1637 		l2cap_chan_lock(chan);
1638 
1639 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1640 			l2cap_chan_ready(chan);
1641 			l2cap_chan_unlock(chan);
1642 			continue;
1643 		}
1644 
1645 		if (chan->state == BT_CONNECT) {
1646 			if (!l2cap_chan_check_security(chan, true) ||
1647 			    !__l2cap_no_conn_pending(chan)) {
1648 				l2cap_chan_unlock(chan);
1649 				continue;
1650 			}
1651 
1652 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1653 			    && test_bit(CONF_STATE2_DEVICE,
1654 					&chan->conf_state)) {
1655 				l2cap_chan_close(chan, ECONNRESET);
1656 				l2cap_chan_unlock(chan);
1657 				continue;
1658 			}
1659 
1660 			if (l2cap_check_enc_key_size(conn->hcon))
1661 				l2cap_start_connection(chan);
1662 			else
1663 				l2cap_chan_close(chan, ECONNREFUSED);
1664 
1665 		} else if (chan->state == BT_CONNECT2) {
1666 			struct l2cap_conn_rsp rsp;
1667 			char buf[128];
1668 			rsp.scid = cpu_to_le16(chan->dcid);
1669 			rsp.dcid = cpu_to_le16(chan->scid);
1670 
1671 			if (l2cap_chan_check_security(chan, false)) {
1672 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1673 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1674 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1675 					chan->ops->defer(chan);
1676 
1677 				} else {
1678 					l2cap_state_change(chan, BT_CONFIG);
1679 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1680 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1681 				}
1682 			} else {
1683 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1684 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1685 			}
1686 
1687 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1688 				       sizeof(rsp), &rsp);
1689 
1690 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1691 			    rsp.result != L2CAP_CR_SUCCESS) {
1692 				l2cap_chan_unlock(chan);
1693 				continue;
1694 			}
1695 
1696 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1697 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1698 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1699 			chan->num_conf_req++;
1700 		}
1701 
1702 		l2cap_chan_unlock(chan);
1703 	}
1704 
1705 	mutex_unlock(&conn->chan_lock);
1706 }
1707 
l2cap_le_conn_ready(struct l2cap_conn * conn)1708 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1709 {
1710 	struct hci_conn *hcon = conn->hcon;
1711 	struct hci_dev *hdev = hcon->hdev;
1712 
1713 	BT_DBG("%s conn %p", hdev->name, conn);
1714 
1715 	/* For outgoing pairing which doesn't necessarily have an
1716 	 * associated socket (e.g. mgmt_pair_device).
1717 	 */
1718 	if (hcon->out)
1719 		smp_conn_security(hcon, hcon->pending_sec_level);
1720 
1721 	/* For LE peripheral connections, make sure the connection interval
1722 	 * is in the range of the minimum and maximum interval that has
1723 	 * been configured for this connection. If not, then trigger
1724 	 * the connection update procedure.
1725 	 */
1726 	if (hcon->role == HCI_ROLE_SLAVE &&
1727 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1728 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1729 		struct l2cap_conn_param_update_req req;
1730 
1731 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1732 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1733 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1734 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1735 
1736 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1737 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1738 	}
1739 }
1740 
l2cap_conn_ready(struct l2cap_conn * conn)1741 static void l2cap_conn_ready(struct l2cap_conn *conn)
1742 {
1743 	struct l2cap_chan *chan;
1744 	struct hci_conn *hcon = conn->hcon;
1745 
1746 	BT_DBG("conn %p", conn);
1747 
1748 	if (hcon->type == ACL_LINK)
1749 		l2cap_request_info(conn);
1750 
1751 	mutex_lock(&conn->chan_lock);
1752 
1753 	list_for_each_entry(chan, &conn->chan_l, list) {
1754 
1755 		l2cap_chan_lock(chan);
1756 
1757 		if (chan->scid == L2CAP_CID_A2MP) {
1758 			l2cap_chan_unlock(chan);
1759 			continue;
1760 		}
1761 
1762 		if (hcon->type == LE_LINK) {
1763 			l2cap_le_start(chan);
1764 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1765 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1766 				l2cap_chan_ready(chan);
1767 		} else if (chan->state == BT_CONNECT) {
1768 			l2cap_do_start(chan);
1769 		}
1770 
1771 		l2cap_chan_unlock(chan);
1772 	}
1773 
1774 	mutex_unlock(&conn->chan_lock);
1775 
1776 	if (hcon->type == LE_LINK)
1777 		l2cap_le_conn_ready(conn);
1778 
1779 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1780 }
1781 
1782 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1783 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1784 {
1785 	struct l2cap_chan *chan;
1786 
1787 	BT_DBG("conn %p", conn);
1788 
1789 	mutex_lock(&conn->chan_lock);
1790 
1791 	list_for_each_entry(chan, &conn->chan_l, list) {
1792 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1793 			l2cap_chan_set_err(chan, err);
1794 	}
1795 
1796 	mutex_unlock(&conn->chan_lock);
1797 }
1798 
l2cap_info_timeout(struct work_struct * work)1799 static void l2cap_info_timeout(struct work_struct *work)
1800 {
1801 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1802 					       info_timer.work);
1803 
1804 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1805 	conn->info_ident = 0;
1806 
1807 	l2cap_conn_start(conn);
1808 }
1809 
1810 /*
1811  * l2cap_user
1812  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1813  * callback is called during registration. The ->remove callback is called
1814  * during unregistration.
1815  * An l2cap_user object can either be explicitly unregistered or when the
1816  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1817  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1818  * External modules must own a reference to the l2cap_conn object if they intend
1819  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1820  * any time if they don't.
1821  */
1822 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1823 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1824 {
1825 	struct hci_dev *hdev = conn->hcon->hdev;
1826 	int ret;
1827 
1828 	/* We need to check whether l2cap_conn is registered. If it is not, we
1829 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1830 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1831 	 * relies on the parent hci_conn object to be locked. This itself relies
1832 	 * on the hci_dev object to be locked. So we must lock the hci device
1833 	 * here, too. */
1834 
1835 	hci_dev_lock(hdev);
1836 
1837 	if (!list_empty(&user->list)) {
1838 		ret = -EINVAL;
1839 		goto out_unlock;
1840 	}
1841 
1842 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1843 	if (!conn->hchan) {
1844 		ret = -ENODEV;
1845 		goto out_unlock;
1846 	}
1847 
1848 	ret = user->probe(conn, user);
1849 	if (ret)
1850 		goto out_unlock;
1851 
1852 	list_add(&user->list, &conn->users);
1853 	ret = 0;
1854 
1855 out_unlock:
1856 	hci_dev_unlock(hdev);
1857 	return ret;
1858 }
1859 EXPORT_SYMBOL(l2cap_register_user);
1860 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1861 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1862 {
1863 	struct hci_dev *hdev = conn->hcon->hdev;
1864 
1865 	hci_dev_lock(hdev);
1866 
1867 	if (list_empty(&user->list))
1868 		goto out_unlock;
1869 
1870 	list_del_init(&user->list);
1871 	user->remove(conn, user);
1872 
1873 out_unlock:
1874 	hci_dev_unlock(hdev);
1875 }
1876 EXPORT_SYMBOL(l2cap_unregister_user);
1877 
l2cap_unregister_all_users(struct l2cap_conn * conn)1878 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1879 {
1880 	struct l2cap_user *user;
1881 
1882 	while (!list_empty(&conn->users)) {
1883 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1884 		list_del_init(&user->list);
1885 		user->remove(conn, user);
1886 	}
1887 }
1888 
l2cap_conn_del(struct hci_conn * hcon,int err)1889 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1890 {
1891 	struct l2cap_conn *conn = hcon->l2cap_data;
1892 	struct l2cap_chan *chan, *l;
1893 
1894 	if (!conn)
1895 		return;
1896 
1897 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1898 
1899 	kfree_skb(conn->rx_skb);
1900 
1901 	skb_queue_purge(&conn->pending_rx);
1902 
1903 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1904 	 * might block if we are running on a worker from the same workqueue
1905 	 * pending_rx_work is waiting on.
1906 	 */
1907 	if (work_pending(&conn->pending_rx_work))
1908 		cancel_work_sync(&conn->pending_rx_work);
1909 
1910 	cancel_delayed_work_sync(&conn->id_addr_timer);
1911 
1912 	l2cap_unregister_all_users(conn);
1913 
1914 	/* Force the connection to be immediately dropped */
1915 	hcon->disc_timeout = 0;
1916 
1917 	mutex_lock(&conn->chan_lock);
1918 
1919 	/* Kill channels */
1920 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1921 		l2cap_chan_hold(chan);
1922 		l2cap_chan_lock(chan);
1923 
1924 		l2cap_chan_del(chan, err);
1925 
1926 		chan->ops->close(chan);
1927 
1928 		l2cap_chan_unlock(chan);
1929 		l2cap_chan_put(chan);
1930 	}
1931 
1932 	mutex_unlock(&conn->chan_lock);
1933 
1934 	hci_chan_del(conn->hchan);
1935 
1936 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1937 		cancel_delayed_work_sync(&conn->info_timer);
1938 
1939 	hcon->l2cap_data = NULL;
1940 	conn->hchan = NULL;
1941 	l2cap_conn_put(conn);
1942 }
1943 
l2cap_conn_free(struct kref * ref)1944 static void l2cap_conn_free(struct kref *ref)
1945 {
1946 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1947 
1948 	hci_conn_put(conn->hcon);
1949 	kfree(conn);
1950 }
1951 
l2cap_conn_get(struct l2cap_conn * conn)1952 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1953 {
1954 	kref_get(&conn->ref);
1955 	return conn;
1956 }
1957 EXPORT_SYMBOL(l2cap_conn_get);
1958 
l2cap_conn_put(struct l2cap_conn * conn)1959 void l2cap_conn_put(struct l2cap_conn *conn)
1960 {
1961 	kref_put(&conn->ref, l2cap_conn_free);
1962 }
1963 EXPORT_SYMBOL(l2cap_conn_put);
1964 
1965 /* ---- Socket interface ---- */
1966 
1967 /* Find socket with psm and source / destination bdaddr.
1968  * Returns closest match.
1969  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1970 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1971 						   bdaddr_t *src,
1972 						   bdaddr_t *dst,
1973 						   u8 link_type)
1974 {
1975 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1976 
1977 	read_lock(&chan_list_lock);
1978 
1979 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1980 		if (state && c->state != state)
1981 			continue;
1982 
1983 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1984 			continue;
1985 
1986 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1987 			continue;
1988 
1989 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1990 			int src_match, dst_match;
1991 			int src_any, dst_any;
1992 
1993 			/* Exact match. */
1994 			src_match = !bacmp(&c->src, src);
1995 			dst_match = !bacmp(&c->dst, dst);
1996 			if (src_match && dst_match) {
1997 				if (!l2cap_chan_hold_unless_zero(c))
1998 					continue;
1999 
2000 				read_unlock(&chan_list_lock);
2001 				return c;
2002 			}
2003 
2004 			/* Closest match */
2005 			src_any = !bacmp(&c->src, BDADDR_ANY);
2006 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
2007 			if ((src_match && dst_any) || (src_any && dst_match) ||
2008 			    (src_any && dst_any))
2009 				c1 = c;
2010 		}
2011 	}
2012 
2013 	if (c1)
2014 		c1 = l2cap_chan_hold_unless_zero(c1);
2015 
2016 	read_unlock(&chan_list_lock);
2017 
2018 	return c1;
2019 }
2020 
l2cap_monitor_timeout(struct work_struct * work)2021 static void l2cap_monitor_timeout(struct work_struct *work)
2022 {
2023 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2024 					       monitor_timer.work);
2025 
2026 	BT_DBG("chan %p", chan);
2027 
2028 	l2cap_chan_lock(chan);
2029 
2030 	if (!chan->conn) {
2031 		l2cap_chan_unlock(chan);
2032 		l2cap_chan_put(chan);
2033 		return;
2034 	}
2035 
2036 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2037 
2038 	l2cap_chan_unlock(chan);
2039 	l2cap_chan_put(chan);
2040 }
2041 
l2cap_retrans_timeout(struct work_struct * work)2042 static void l2cap_retrans_timeout(struct work_struct *work)
2043 {
2044 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2045 					       retrans_timer.work);
2046 
2047 	BT_DBG("chan %p", chan);
2048 
2049 	l2cap_chan_lock(chan);
2050 
2051 	if (!chan->conn) {
2052 		l2cap_chan_unlock(chan);
2053 		l2cap_chan_put(chan);
2054 		return;
2055 	}
2056 
2057 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2058 	l2cap_chan_unlock(chan);
2059 	l2cap_chan_put(chan);
2060 }
2061 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)2062 static void l2cap_streaming_send(struct l2cap_chan *chan,
2063 				 struct sk_buff_head *skbs)
2064 {
2065 	struct sk_buff *skb;
2066 	struct l2cap_ctrl *control;
2067 
2068 	BT_DBG("chan %p, skbs %p", chan, skbs);
2069 
2070 	if (__chan_is_moving(chan))
2071 		return;
2072 
2073 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2074 
2075 	while (!skb_queue_empty(&chan->tx_q)) {
2076 
2077 		skb = skb_dequeue(&chan->tx_q);
2078 
2079 		bt_cb(skb)->l2cap.retries = 1;
2080 		control = &bt_cb(skb)->l2cap;
2081 
2082 		control->reqseq = 0;
2083 		control->txseq = chan->next_tx_seq;
2084 
2085 		__pack_control(chan, control, skb);
2086 
2087 		if (chan->fcs == L2CAP_FCS_CRC16) {
2088 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2089 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2090 		}
2091 
2092 		l2cap_do_send(chan, skb);
2093 
2094 		BT_DBG("Sent txseq %u", control->txseq);
2095 
2096 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2097 		chan->frames_sent++;
2098 	}
2099 }
2100 
l2cap_ertm_send(struct l2cap_chan * chan)2101 static int l2cap_ertm_send(struct l2cap_chan *chan)
2102 {
2103 	struct sk_buff *skb, *tx_skb;
2104 	struct l2cap_ctrl *control;
2105 	int sent = 0;
2106 
2107 	BT_DBG("chan %p", chan);
2108 
2109 	if (chan->state != BT_CONNECTED)
2110 		return -ENOTCONN;
2111 
2112 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2113 		return 0;
2114 
2115 	if (__chan_is_moving(chan))
2116 		return 0;
2117 
2118 	while (chan->tx_send_head &&
2119 	       chan->unacked_frames < chan->remote_tx_win &&
2120 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2121 
2122 		skb = chan->tx_send_head;
2123 
2124 		bt_cb(skb)->l2cap.retries = 1;
2125 		control = &bt_cb(skb)->l2cap;
2126 
2127 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2128 			control->final = 1;
2129 
2130 		control->reqseq = chan->buffer_seq;
2131 		chan->last_acked_seq = chan->buffer_seq;
2132 		control->txseq = chan->next_tx_seq;
2133 
2134 		__pack_control(chan, control, skb);
2135 
2136 		if (chan->fcs == L2CAP_FCS_CRC16) {
2137 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2138 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2139 		}
2140 
2141 		/* Clone after data has been modified. Data is assumed to be
2142 		   read-only (for locking purposes) on cloned sk_buffs.
2143 		 */
2144 		tx_skb = skb_clone(skb, GFP_KERNEL);
2145 
2146 		if (!tx_skb)
2147 			break;
2148 
2149 		__set_retrans_timer(chan);
2150 
2151 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2152 		chan->unacked_frames++;
2153 		chan->frames_sent++;
2154 		sent++;
2155 
2156 		if (skb_queue_is_last(&chan->tx_q, skb))
2157 			chan->tx_send_head = NULL;
2158 		else
2159 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2160 
2161 		l2cap_do_send(chan, tx_skb);
2162 		BT_DBG("Sent txseq %u", control->txseq);
2163 	}
2164 
2165 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2166 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2167 
2168 	return sent;
2169 }
2170 
l2cap_ertm_resend(struct l2cap_chan * chan)2171 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2172 {
2173 	struct l2cap_ctrl control;
2174 	struct sk_buff *skb;
2175 	struct sk_buff *tx_skb;
2176 	u16 seq;
2177 
2178 	BT_DBG("chan %p", chan);
2179 
2180 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2181 		return;
2182 
2183 	if (__chan_is_moving(chan))
2184 		return;
2185 
2186 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2187 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2188 
2189 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2190 		if (!skb) {
2191 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2192 			       seq);
2193 			continue;
2194 		}
2195 
2196 		bt_cb(skb)->l2cap.retries++;
2197 		control = bt_cb(skb)->l2cap;
2198 
2199 		if (chan->max_tx != 0 &&
2200 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2201 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2202 			l2cap_send_disconn_req(chan, ECONNRESET);
2203 			l2cap_seq_list_clear(&chan->retrans_list);
2204 			break;
2205 		}
2206 
2207 		control.reqseq = chan->buffer_seq;
2208 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2209 			control.final = 1;
2210 		else
2211 			control.final = 0;
2212 
2213 		if (skb_cloned(skb)) {
2214 			/* Cloned sk_buffs are read-only, so we need a
2215 			 * writeable copy
2216 			 */
2217 			tx_skb = skb_copy(skb, GFP_KERNEL);
2218 		} else {
2219 			tx_skb = skb_clone(skb, GFP_KERNEL);
2220 		}
2221 
2222 		if (!tx_skb) {
2223 			l2cap_seq_list_clear(&chan->retrans_list);
2224 			break;
2225 		}
2226 
2227 		/* Update skb contents */
2228 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2229 			put_unaligned_le32(__pack_extended_control(&control),
2230 					   tx_skb->data + L2CAP_HDR_SIZE);
2231 		} else {
2232 			put_unaligned_le16(__pack_enhanced_control(&control),
2233 					   tx_skb->data + L2CAP_HDR_SIZE);
2234 		}
2235 
2236 		/* Update FCS */
2237 		if (chan->fcs == L2CAP_FCS_CRC16) {
2238 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2239 					tx_skb->len - L2CAP_FCS_SIZE);
2240 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2241 						L2CAP_FCS_SIZE);
2242 		}
2243 
2244 		l2cap_do_send(chan, tx_skb);
2245 
2246 		BT_DBG("Resent txseq %d", control.txseq);
2247 
2248 		chan->last_acked_seq = chan->buffer_seq;
2249 	}
2250 }
2251 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2252 static void l2cap_retransmit(struct l2cap_chan *chan,
2253 			     struct l2cap_ctrl *control)
2254 {
2255 	BT_DBG("chan %p, control %p", chan, control);
2256 
2257 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2258 	l2cap_ertm_resend(chan);
2259 }
2260 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2261 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2262 				 struct l2cap_ctrl *control)
2263 {
2264 	struct sk_buff *skb;
2265 
2266 	BT_DBG("chan %p, control %p", chan, control);
2267 
2268 	if (control->poll)
2269 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2270 
2271 	l2cap_seq_list_clear(&chan->retrans_list);
2272 
2273 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2274 		return;
2275 
2276 	if (chan->unacked_frames) {
2277 		skb_queue_walk(&chan->tx_q, skb) {
2278 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2279 			    skb == chan->tx_send_head)
2280 				break;
2281 		}
2282 
2283 		skb_queue_walk_from(&chan->tx_q, skb) {
2284 			if (skb == chan->tx_send_head)
2285 				break;
2286 
2287 			l2cap_seq_list_append(&chan->retrans_list,
2288 					      bt_cb(skb)->l2cap.txseq);
2289 		}
2290 
2291 		l2cap_ertm_resend(chan);
2292 	}
2293 }
2294 
l2cap_send_ack(struct l2cap_chan * chan)2295 static void l2cap_send_ack(struct l2cap_chan *chan)
2296 {
2297 	struct l2cap_ctrl control;
2298 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2299 					 chan->last_acked_seq);
2300 	int threshold;
2301 
2302 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2303 	       chan, chan->last_acked_seq, chan->buffer_seq);
2304 
2305 	memset(&control, 0, sizeof(control));
2306 	control.sframe = 1;
2307 
2308 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2309 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2310 		__clear_ack_timer(chan);
2311 		control.super = L2CAP_SUPER_RNR;
2312 		control.reqseq = chan->buffer_seq;
2313 		l2cap_send_sframe(chan, &control);
2314 	} else {
2315 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2316 			l2cap_ertm_send(chan);
2317 			/* If any i-frames were sent, they included an ack */
2318 			if (chan->buffer_seq == chan->last_acked_seq)
2319 				frames_to_ack = 0;
2320 		}
2321 
2322 		/* Ack now if the window is 3/4ths full.
2323 		 * Calculate without mul or div
2324 		 */
2325 		threshold = chan->ack_win;
2326 		threshold += threshold << 1;
2327 		threshold >>= 2;
2328 
2329 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2330 		       threshold);
2331 
2332 		if (frames_to_ack >= threshold) {
2333 			__clear_ack_timer(chan);
2334 			control.super = L2CAP_SUPER_RR;
2335 			control.reqseq = chan->buffer_seq;
2336 			l2cap_send_sframe(chan, &control);
2337 			frames_to_ack = 0;
2338 		}
2339 
2340 		if (frames_to_ack)
2341 			__set_ack_timer(chan);
2342 	}
2343 }
2344 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2345 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2346 					 struct msghdr *msg, int len,
2347 					 int count, struct sk_buff *skb)
2348 {
2349 	struct l2cap_conn *conn = chan->conn;
2350 	struct sk_buff **frag;
2351 	int sent = 0;
2352 
2353 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2354 		return -EFAULT;
2355 
2356 	sent += count;
2357 	len  -= count;
2358 
2359 	/* Continuation fragments (no L2CAP header) */
2360 	frag = &skb_shinfo(skb)->frag_list;
2361 	while (len) {
2362 		struct sk_buff *tmp;
2363 
2364 		count = min_t(unsigned int, conn->mtu, len);
2365 
2366 		tmp = chan->ops->alloc_skb(chan, 0, count,
2367 					   msg->msg_flags & MSG_DONTWAIT);
2368 		if (IS_ERR(tmp))
2369 			return PTR_ERR(tmp);
2370 
2371 		*frag = tmp;
2372 
2373 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2374 				   &msg->msg_iter))
2375 			return -EFAULT;
2376 
2377 		sent += count;
2378 		len  -= count;
2379 
2380 		skb->len += (*frag)->len;
2381 		skb->data_len += (*frag)->len;
2382 
2383 		frag = &(*frag)->next;
2384 	}
2385 
2386 	return sent;
2387 }
2388 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2389 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2390 						 struct msghdr *msg, size_t len)
2391 {
2392 	struct l2cap_conn *conn = chan->conn;
2393 	struct sk_buff *skb;
2394 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2395 	struct l2cap_hdr *lh;
2396 
2397 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2398 	       __le16_to_cpu(chan->psm), len);
2399 
2400 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2401 
2402 	skb = chan->ops->alloc_skb(chan, hlen, count,
2403 				   msg->msg_flags & MSG_DONTWAIT);
2404 	if (IS_ERR(skb))
2405 		return skb;
2406 
2407 	/* Create L2CAP header */
2408 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2409 	lh->cid = cpu_to_le16(chan->dcid);
2410 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2411 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2412 
2413 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2414 	if (unlikely(err < 0)) {
2415 		kfree_skb(skb);
2416 		return ERR_PTR(err);
2417 	}
2418 	return skb;
2419 }
2420 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2421 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2422 					      struct msghdr *msg, size_t len)
2423 {
2424 	struct l2cap_conn *conn = chan->conn;
2425 	struct sk_buff *skb;
2426 	int err, count;
2427 	struct l2cap_hdr *lh;
2428 
2429 	BT_DBG("chan %p len %zu", chan, len);
2430 
2431 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2432 
2433 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2434 				   msg->msg_flags & MSG_DONTWAIT);
2435 	if (IS_ERR(skb))
2436 		return skb;
2437 
2438 	/* Create L2CAP header */
2439 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2440 	lh->cid = cpu_to_le16(chan->dcid);
2441 	lh->len = cpu_to_le16(len);
2442 
2443 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2444 	if (unlikely(err < 0)) {
2445 		kfree_skb(skb);
2446 		return ERR_PTR(err);
2447 	}
2448 	return skb;
2449 }
2450 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2451 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2452 					       struct msghdr *msg, size_t len,
2453 					       u16 sdulen)
2454 {
2455 	struct l2cap_conn *conn = chan->conn;
2456 	struct sk_buff *skb;
2457 	int err, count, hlen;
2458 	struct l2cap_hdr *lh;
2459 
2460 	BT_DBG("chan %p len %zu", chan, len);
2461 
2462 	if (!conn)
2463 		return ERR_PTR(-ENOTCONN);
2464 
2465 	hlen = __ertm_hdr_size(chan);
2466 
2467 	if (sdulen)
2468 		hlen += L2CAP_SDULEN_SIZE;
2469 
2470 	if (chan->fcs == L2CAP_FCS_CRC16)
2471 		hlen += L2CAP_FCS_SIZE;
2472 
2473 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2474 
2475 	skb = chan->ops->alloc_skb(chan, hlen, count,
2476 				   msg->msg_flags & MSG_DONTWAIT);
2477 	if (IS_ERR(skb))
2478 		return skb;
2479 
2480 	/* Create L2CAP header */
2481 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2482 	lh->cid = cpu_to_le16(chan->dcid);
2483 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2484 
2485 	/* Control header is populated later */
2486 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2487 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2488 	else
2489 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2490 
2491 	if (sdulen)
2492 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2493 
2494 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2495 	if (unlikely(err < 0)) {
2496 		kfree_skb(skb);
2497 		return ERR_PTR(err);
2498 	}
2499 
2500 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2501 	bt_cb(skb)->l2cap.retries = 0;
2502 	return skb;
2503 }
2504 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2505 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2506 			     struct sk_buff_head *seg_queue,
2507 			     struct msghdr *msg, size_t len)
2508 {
2509 	struct sk_buff *skb;
2510 	u16 sdu_len;
2511 	size_t pdu_len;
2512 	u8 sar;
2513 
2514 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2515 
2516 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2517 	 * so fragmented skbs are not used.  The HCI layer's handling
2518 	 * of fragmented skbs is not compatible with ERTM's queueing.
2519 	 */
2520 
2521 	/* PDU size is derived from the HCI MTU */
2522 	pdu_len = chan->conn->mtu;
2523 
2524 	/* Constrain PDU size for BR/EDR connections */
2525 	if (!chan->hs_hcon)
2526 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2527 
2528 	/* Adjust for largest possible L2CAP overhead. */
2529 	if (chan->fcs)
2530 		pdu_len -= L2CAP_FCS_SIZE;
2531 
2532 	pdu_len -= __ertm_hdr_size(chan);
2533 
2534 	/* Remote device may have requested smaller PDUs */
2535 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2536 
2537 	if (len <= pdu_len) {
2538 		sar = L2CAP_SAR_UNSEGMENTED;
2539 		sdu_len = 0;
2540 		pdu_len = len;
2541 	} else {
2542 		sar = L2CAP_SAR_START;
2543 		sdu_len = len;
2544 	}
2545 
2546 	while (len > 0) {
2547 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2548 
2549 		if (IS_ERR(skb)) {
2550 			__skb_queue_purge(seg_queue);
2551 			return PTR_ERR(skb);
2552 		}
2553 
2554 		bt_cb(skb)->l2cap.sar = sar;
2555 		__skb_queue_tail(seg_queue, skb);
2556 
2557 		len -= pdu_len;
2558 		if (sdu_len)
2559 			sdu_len = 0;
2560 
2561 		if (len <= pdu_len) {
2562 			sar = L2CAP_SAR_END;
2563 			pdu_len = len;
2564 		} else {
2565 			sar = L2CAP_SAR_CONTINUE;
2566 		}
2567 	}
2568 
2569 	return 0;
2570 }
2571 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2572 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2573 						   struct msghdr *msg,
2574 						   size_t len, u16 sdulen)
2575 {
2576 	struct l2cap_conn *conn = chan->conn;
2577 	struct sk_buff *skb;
2578 	int err, count, hlen;
2579 	struct l2cap_hdr *lh;
2580 
2581 	BT_DBG("chan %p len %zu", chan, len);
2582 
2583 	if (!conn)
2584 		return ERR_PTR(-ENOTCONN);
2585 
2586 	hlen = L2CAP_HDR_SIZE;
2587 
2588 	if (sdulen)
2589 		hlen += L2CAP_SDULEN_SIZE;
2590 
2591 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2592 
2593 	skb = chan->ops->alloc_skb(chan, hlen, count,
2594 				   msg->msg_flags & MSG_DONTWAIT);
2595 	if (IS_ERR(skb))
2596 		return skb;
2597 
2598 	/* Create L2CAP header */
2599 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2600 	lh->cid = cpu_to_le16(chan->dcid);
2601 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2602 
2603 	if (sdulen)
2604 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2605 
2606 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2607 	if (unlikely(err < 0)) {
2608 		kfree_skb(skb);
2609 		return ERR_PTR(err);
2610 	}
2611 
2612 	return skb;
2613 }
2614 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2615 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2616 				struct sk_buff_head *seg_queue,
2617 				struct msghdr *msg, size_t len)
2618 {
2619 	struct sk_buff *skb;
2620 	size_t pdu_len;
2621 	u16 sdu_len;
2622 
2623 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2624 
2625 	sdu_len = len;
2626 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2627 
2628 	while (len > 0) {
2629 		if (len <= pdu_len)
2630 			pdu_len = len;
2631 
2632 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2633 		if (IS_ERR(skb)) {
2634 			__skb_queue_purge(seg_queue);
2635 			return PTR_ERR(skb);
2636 		}
2637 
2638 		__skb_queue_tail(seg_queue, skb);
2639 
2640 		len -= pdu_len;
2641 
2642 		if (sdu_len) {
2643 			sdu_len = 0;
2644 			pdu_len += L2CAP_SDULEN_SIZE;
2645 		}
2646 	}
2647 
2648 	return 0;
2649 }
2650 
l2cap_le_flowctl_send(struct l2cap_chan * chan)2651 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2652 {
2653 	int sent = 0;
2654 
2655 	BT_DBG("chan %p", chan);
2656 
2657 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2658 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2659 		chan->tx_credits--;
2660 		sent++;
2661 	}
2662 
2663 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2664 	       skb_queue_len(&chan->tx_q));
2665 }
2666 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2667 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2668 {
2669 	struct sk_buff *skb;
2670 	int err;
2671 	struct sk_buff_head seg_queue;
2672 
2673 	if (!chan->conn)
2674 		return -ENOTCONN;
2675 
2676 	/* Connectionless channel */
2677 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2678 		skb = l2cap_create_connless_pdu(chan, msg, len);
2679 		if (IS_ERR(skb))
2680 			return PTR_ERR(skb);
2681 
2682 		l2cap_do_send(chan, skb);
2683 		return len;
2684 	}
2685 
2686 	switch (chan->mode) {
2687 	case L2CAP_MODE_LE_FLOWCTL:
2688 	case L2CAP_MODE_EXT_FLOWCTL:
2689 		/* Check outgoing MTU */
2690 		if (len > chan->omtu)
2691 			return -EMSGSIZE;
2692 
2693 		__skb_queue_head_init(&seg_queue);
2694 
2695 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2696 
2697 		if (chan->state != BT_CONNECTED) {
2698 			__skb_queue_purge(&seg_queue);
2699 			err = -ENOTCONN;
2700 		}
2701 
2702 		if (err)
2703 			return err;
2704 
2705 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2706 
2707 		l2cap_le_flowctl_send(chan);
2708 
2709 		if (!chan->tx_credits)
2710 			chan->ops->suspend(chan);
2711 
2712 		err = len;
2713 
2714 		break;
2715 
2716 	case L2CAP_MODE_BASIC:
2717 		/* Check outgoing MTU */
2718 		if (len > chan->omtu)
2719 			return -EMSGSIZE;
2720 
2721 		/* Create a basic PDU */
2722 		skb = l2cap_create_basic_pdu(chan, msg, len);
2723 		if (IS_ERR(skb))
2724 			return PTR_ERR(skb);
2725 
2726 		l2cap_do_send(chan, skb);
2727 		err = len;
2728 		break;
2729 
2730 	case L2CAP_MODE_ERTM:
2731 	case L2CAP_MODE_STREAMING:
2732 		/* Check outgoing MTU */
2733 		if (len > chan->omtu) {
2734 			err = -EMSGSIZE;
2735 			break;
2736 		}
2737 
2738 		__skb_queue_head_init(&seg_queue);
2739 
2740 		/* Do segmentation before calling in to the state machine,
2741 		 * since it's possible to block while waiting for memory
2742 		 * allocation.
2743 		 */
2744 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2745 
2746 		if (err)
2747 			break;
2748 
2749 		if (chan->mode == L2CAP_MODE_ERTM)
2750 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2751 		else
2752 			l2cap_streaming_send(chan, &seg_queue);
2753 
2754 		err = len;
2755 
2756 		/* If the skbs were not queued for sending, they'll still be in
2757 		 * seg_queue and need to be purged.
2758 		 */
2759 		__skb_queue_purge(&seg_queue);
2760 		break;
2761 
2762 	default:
2763 		BT_DBG("bad state %1.1x", chan->mode);
2764 		err = -EBADFD;
2765 	}
2766 
2767 	return err;
2768 }
2769 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2770 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2771 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2772 {
2773 	struct l2cap_ctrl control;
2774 	u16 seq;
2775 
2776 	BT_DBG("chan %p, txseq %u", chan, txseq);
2777 
2778 	memset(&control, 0, sizeof(control));
2779 	control.sframe = 1;
2780 	control.super = L2CAP_SUPER_SREJ;
2781 
2782 	for (seq = chan->expected_tx_seq; seq != txseq;
2783 	     seq = __next_seq(chan, seq)) {
2784 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2785 			control.reqseq = seq;
2786 			l2cap_send_sframe(chan, &control);
2787 			l2cap_seq_list_append(&chan->srej_list, seq);
2788 		}
2789 	}
2790 
2791 	chan->expected_tx_seq = __next_seq(chan, txseq);
2792 }
2793 
l2cap_send_srej_tail(struct l2cap_chan * chan)2794 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2795 {
2796 	struct l2cap_ctrl control;
2797 
2798 	BT_DBG("chan %p", chan);
2799 
2800 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2801 		return;
2802 
2803 	memset(&control, 0, sizeof(control));
2804 	control.sframe = 1;
2805 	control.super = L2CAP_SUPER_SREJ;
2806 	control.reqseq = chan->srej_list.tail;
2807 	l2cap_send_sframe(chan, &control);
2808 }
2809 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2810 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2811 {
2812 	struct l2cap_ctrl control;
2813 	u16 initial_head;
2814 	u16 seq;
2815 
2816 	BT_DBG("chan %p, txseq %u", chan, txseq);
2817 
2818 	memset(&control, 0, sizeof(control));
2819 	control.sframe = 1;
2820 	control.super = L2CAP_SUPER_SREJ;
2821 
2822 	/* Capture initial list head to allow only one pass through the list. */
2823 	initial_head = chan->srej_list.head;
2824 
2825 	do {
2826 		seq = l2cap_seq_list_pop(&chan->srej_list);
2827 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2828 			break;
2829 
2830 		control.reqseq = seq;
2831 		l2cap_send_sframe(chan, &control);
2832 		l2cap_seq_list_append(&chan->srej_list, seq);
2833 	} while (chan->srej_list.head != initial_head);
2834 }
2835 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2836 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2837 {
2838 	struct sk_buff *acked_skb;
2839 	u16 ackseq;
2840 
2841 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2842 
2843 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2844 		return;
2845 
2846 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2847 	       chan->expected_ack_seq, chan->unacked_frames);
2848 
2849 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2850 	     ackseq = __next_seq(chan, ackseq)) {
2851 
2852 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2853 		if (acked_skb) {
2854 			skb_unlink(acked_skb, &chan->tx_q);
2855 			kfree_skb(acked_skb);
2856 			chan->unacked_frames--;
2857 		}
2858 	}
2859 
2860 	chan->expected_ack_seq = reqseq;
2861 
2862 	if (chan->unacked_frames == 0)
2863 		__clear_retrans_timer(chan);
2864 
2865 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2866 }
2867 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2868 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2869 {
2870 	BT_DBG("chan %p", chan);
2871 
2872 	chan->expected_tx_seq = chan->buffer_seq;
2873 	l2cap_seq_list_clear(&chan->srej_list);
2874 	skb_queue_purge(&chan->srej_q);
2875 	chan->rx_state = L2CAP_RX_STATE_RECV;
2876 }
2877 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2878 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2879 				struct l2cap_ctrl *control,
2880 				struct sk_buff_head *skbs, u8 event)
2881 {
2882 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2883 	       event);
2884 
2885 	switch (event) {
2886 	case L2CAP_EV_DATA_REQUEST:
2887 		if (chan->tx_send_head == NULL)
2888 			chan->tx_send_head = skb_peek(skbs);
2889 
2890 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2891 		l2cap_ertm_send(chan);
2892 		break;
2893 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2894 		BT_DBG("Enter LOCAL_BUSY");
2895 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2896 
2897 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2898 			/* The SREJ_SENT state must be aborted if we are to
2899 			 * enter the LOCAL_BUSY state.
2900 			 */
2901 			l2cap_abort_rx_srej_sent(chan);
2902 		}
2903 
2904 		l2cap_send_ack(chan);
2905 
2906 		break;
2907 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2908 		BT_DBG("Exit LOCAL_BUSY");
2909 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2910 
2911 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2912 			struct l2cap_ctrl local_control;
2913 
2914 			memset(&local_control, 0, sizeof(local_control));
2915 			local_control.sframe = 1;
2916 			local_control.super = L2CAP_SUPER_RR;
2917 			local_control.poll = 1;
2918 			local_control.reqseq = chan->buffer_seq;
2919 			l2cap_send_sframe(chan, &local_control);
2920 
2921 			chan->retry_count = 1;
2922 			__set_monitor_timer(chan);
2923 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2924 		}
2925 		break;
2926 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2927 		l2cap_process_reqseq(chan, control->reqseq);
2928 		break;
2929 	case L2CAP_EV_EXPLICIT_POLL:
2930 		l2cap_send_rr_or_rnr(chan, 1);
2931 		chan->retry_count = 1;
2932 		__set_monitor_timer(chan);
2933 		__clear_ack_timer(chan);
2934 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2935 		break;
2936 	case L2CAP_EV_RETRANS_TO:
2937 		l2cap_send_rr_or_rnr(chan, 1);
2938 		chan->retry_count = 1;
2939 		__set_monitor_timer(chan);
2940 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2941 		break;
2942 	case L2CAP_EV_RECV_FBIT:
2943 		/* Nothing to process */
2944 		break;
2945 	default:
2946 		break;
2947 	}
2948 }
2949 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2950 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2951 				  struct l2cap_ctrl *control,
2952 				  struct sk_buff_head *skbs, u8 event)
2953 {
2954 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2955 	       event);
2956 
2957 	switch (event) {
2958 	case L2CAP_EV_DATA_REQUEST:
2959 		if (chan->tx_send_head == NULL)
2960 			chan->tx_send_head = skb_peek(skbs);
2961 		/* Queue data, but don't send. */
2962 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2963 		break;
2964 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2965 		BT_DBG("Enter LOCAL_BUSY");
2966 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2967 
2968 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2969 			/* The SREJ_SENT state must be aborted if we are to
2970 			 * enter the LOCAL_BUSY state.
2971 			 */
2972 			l2cap_abort_rx_srej_sent(chan);
2973 		}
2974 
2975 		l2cap_send_ack(chan);
2976 
2977 		break;
2978 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2979 		BT_DBG("Exit LOCAL_BUSY");
2980 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2981 
2982 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2983 			struct l2cap_ctrl local_control;
2984 			memset(&local_control, 0, sizeof(local_control));
2985 			local_control.sframe = 1;
2986 			local_control.super = L2CAP_SUPER_RR;
2987 			local_control.poll = 1;
2988 			local_control.reqseq = chan->buffer_seq;
2989 			l2cap_send_sframe(chan, &local_control);
2990 
2991 			chan->retry_count = 1;
2992 			__set_monitor_timer(chan);
2993 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2994 		}
2995 		break;
2996 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2997 		l2cap_process_reqseq(chan, control->reqseq);
2998 		fallthrough;
2999 
3000 	case L2CAP_EV_RECV_FBIT:
3001 		if (control && control->final) {
3002 			__clear_monitor_timer(chan);
3003 			if (chan->unacked_frames > 0)
3004 				__set_retrans_timer(chan);
3005 			chan->retry_count = 0;
3006 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3007 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3008 		}
3009 		break;
3010 	case L2CAP_EV_EXPLICIT_POLL:
3011 		/* Ignore */
3012 		break;
3013 	case L2CAP_EV_MONITOR_TO:
3014 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3015 			l2cap_send_rr_or_rnr(chan, 1);
3016 			__set_monitor_timer(chan);
3017 			chan->retry_count++;
3018 		} else {
3019 			l2cap_send_disconn_req(chan, ECONNABORTED);
3020 		}
3021 		break;
3022 	default:
3023 		break;
3024 	}
3025 }
3026 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)3027 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3028 		     struct sk_buff_head *skbs, u8 event)
3029 {
3030 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3031 	       chan, control, skbs, event, chan->tx_state);
3032 
3033 	switch (chan->tx_state) {
3034 	case L2CAP_TX_STATE_XMIT:
3035 		l2cap_tx_state_xmit(chan, control, skbs, event);
3036 		break;
3037 	case L2CAP_TX_STATE_WAIT_F:
3038 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3039 		break;
3040 	default:
3041 		/* Ignore event */
3042 		break;
3043 	}
3044 }
3045 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)3046 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3047 			     struct l2cap_ctrl *control)
3048 {
3049 	BT_DBG("chan %p, control %p", chan, control);
3050 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3051 }
3052 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)3053 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3054 				  struct l2cap_ctrl *control)
3055 {
3056 	BT_DBG("chan %p, control %p", chan, control);
3057 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3058 }
3059 
3060 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)3061 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3062 {
3063 	struct sk_buff *nskb;
3064 	struct l2cap_chan *chan;
3065 
3066 	BT_DBG("conn %p", conn);
3067 
3068 	mutex_lock(&conn->chan_lock);
3069 
3070 	list_for_each_entry(chan, &conn->chan_l, list) {
3071 		if (chan->chan_type != L2CAP_CHAN_RAW)
3072 			continue;
3073 
3074 		/* Don't send frame to the channel it came from */
3075 		if (bt_cb(skb)->l2cap.chan == chan)
3076 			continue;
3077 
3078 		nskb = skb_clone(skb, GFP_KERNEL);
3079 		if (!nskb)
3080 			continue;
3081 		if (chan->ops->recv(chan, nskb))
3082 			kfree_skb(nskb);
3083 	}
3084 
3085 	mutex_unlock(&conn->chan_lock);
3086 }
3087 
3088 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)3089 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3090 				       u8 ident, u16 dlen, void *data)
3091 {
3092 	struct sk_buff *skb, **frag;
3093 	struct l2cap_cmd_hdr *cmd;
3094 	struct l2cap_hdr *lh;
3095 	int len, count;
3096 
3097 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3098 	       conn, code, ident, dlen);
3099 
3100 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3101 		return NULL;
3102 
3103 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3104 	count = min_t(unsigned int, conn->mtu, len);
3105 
3106 	skb = bt_skb_alloc(count, GFP_KERNEL);
3107 	if (!skb)
3108 		return NULL;
3109 
3110 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3111 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3112 
3113 	if (conn->hcon->type == LE_LINK)
3114 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3115 	else
3116 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3117 
3118 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3119 	cmd->code  = code;
3120 	cmd->ident = ident;
3121 	cmd->len   = cpu_to_le16(dlen);
3122 
3123 	if (dlen) {
3124 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3125 		skb_put_data(skb, data, count);
3126 		data += count;
3127 	}
3128 
3129 	len -= skb->len;
3130 
3131 	/* Continuation fragments (no L2CAP header) */
3132 	frag = &skb_shinfo(skb)->frag_list;
3133 	while (len) {
3134 		count = min_t(unsigned int, conn->mtu, len);
3135 
3136 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3137 		if (!*frag)
3138 			goto fail;
3139 
3140 		skb_put_data(*frag, data, count);
3141 
3142 		len  -= count;
3143 		data += count;
3144 
3145 		frag = &(*frag)->next;
3146 	}
3147 
3148 	return skb;
3149 
3150 fail:
3151 	kfree_skb(skb);
3152 	return NULL;
3153 }
3154 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3155 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3156 				     unsigned long *val)
3157 {
3158 	struct l2cap_conf_opt *opt = *ptr;
3159 	int len;
3160 
3161 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3162 	*ptr += len;
3163 
3164 	*type = opt->type;
3165 	*olen = opt->len;
3166 
3167 	switch (opt->len) {
3168 	case 1:
3169 		*val = *((u8 *) opt->val);
3170 		break;
3171 
3172 	case 2:
3173 		*val = get_unaligned_le16(opt->val);
3174 		break;
3175 
3176 	case 4:
3177 		*val = get_unaligned_le32(opt->val);
3178 		break;
3179 
3180 	default:
3181 		*val = (unsigned long) opt->val;
3182 		break;
3183 	}
3184 
3185 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3186 	return len;
3187 }
3188 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3189 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3190 {
3191 	struct l2cap_conf_opt *opt = *ptr;
3192 
3193 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3194 
3195 	if (size < L2CAP_CONF_OPT_SIZE + len)
3196 		return;
3197 
3198 	opt->type = type;
3199 	opt->len  = len;
3200 
3201 	switch (len) {
3202 	case 1:
3203 		*((u8 *) opt->val)  = val;
3204 		break;
3205 
3206 	case 2:
3207 		put_unaligned_le16(val, opt->val);
3208 		break;
3209 
3210 	case 4:
3211 		put_unaligned_le32(val, opt->val);
3212 		break;
3213 
3214 	default:
3215 		memcpy(opt->val, (void *) val, len);
3216 		break;
3217 	}
3218 
3219 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3220 }
3221 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3222 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3223 {
3224 	struct l2cap_conf_efs efs;
3225 
3226 	switch (chan->mode) {
3227 	case L2CAP_MODE_ERTM:
3228 		efs.id		= chan->local_id;
3229 		efs.stype	= chan->local_stype;
3230 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3231 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3232 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3233 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3234 		break;
3235 
3236 	case L2CAP_MODE_STREAMING:
3237 		efs.id		= 1;
3238 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3239 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3240 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3241 		efs.acc_lat	= 0;
3242 		efs.flush_to	= 0;
3243 		break;
3244 
3245 	default:
3246 		return;
3247 	}
3248 
3249 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3250 			   (unsigned long) &efs, size);
3251 }
3252 
l2cap_ack_timeout(struct work_struct * work)3253 static void l2cap_ack_timeout(struct work_struct *work)
3254 {
3255 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3256 					       ack_timer.work);
3257 	u16 frames_to_ack;
3258 
3259 	BT_DBG("chan %p", chan);
3260 
3261 	l2cap_chan_lock(chan);
3262 
3263 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3264 				     chan->last_acked_seq);
3265 
3266 	if (frames_to_ack)
3267 		l2cap_send_rr_or_rnr(chan, 0);
3268 
3269 	l2cap_chan_unlock(chan);
3270 	l2cap_chan_put(chan);
3271 }
3272 
l2cap_ertm_init(struct l2cap_chan * chan)3273 int l2cap_ertm_init(struct l2cap_chan *chan)
3274 {
3275 	int err;
3276 
3277 	chan->next_tx_seq = 0;
3278 	chan->expected_tx_seq = 0;
3279 	chan->expected_ack_seq = 0;
3280 	chan->unacked_frames = 0;
3281 	chan->buffer_seq = 0;
3282 	chan->frames_sent = 0;
3283 	chan->last_acked_seq = 0;
3284 	chan->sdu = NULL;
3285 	chan->sdu_last_frag = NULL;
3286 	chan->sdu_len = 0;
3287 
3288 	skb_queue_head_init(&chan->tx_q);
3289 
3290 	chan->local_amp_id = AMP_ID_BREDR;
3291 	chan->move_id = AMP_ID_BREDR;
3292 	chan->move_state = L2CAP_MOVE_STABLE;
3293 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3294 
3295 	if (chan->mode != L2CAP_MODE_ERTM)
3296 		return 0;
3297 
3298 	chan->rx_state = L2CAP_RX_STATE_RECV;
3299 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3300 
3301 	skb_queue_head_init(&chan->srej_q);
3302 
3303 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3304 	if (err < 0)
3305 		return err;
3306 
3307 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3308 	if (err < 0)
3309 		l2cap_seq_list_free(&chan->srej_list);
3310 
3311 	return err;
3312 }
3313 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3314 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3315 {
3316 	switch (mode) {
3317 	case L2CAP_MODE_STREAMING:
3318 	case L2CAP_MODE_ERTM:
3319 		if (l2cap_mode_supported(mode, remote_feat_mask))
3320 			return mode;
3321 		fallthrough;
3322 	default:
3323 		return L2CAP_MODE_BASIC;
3324 	}
3325 }
3326 
__l2cap_ews_supported(struct l2cap_conn * conn)3327 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3328 {
3329 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3330 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3331 }
3332 
__l2cap_efs_supported(struct l2cap_conn * conn)3333 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3334 {
3335 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3336 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3337 }
3338 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3339 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3340 				      struct l2cap_conf_rfc *rfc)
3341 {
3342 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3343 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3344 
3345 		/* Class 1 devices have must have ERTM timeouts
3346 		 * exceeding the Link Supervision Timeout.  The
3347 		 * default Link Supervision Timeout for AMP
3348 		 * controllers is 10 seconds.
3349 		 *
3350 		 * Class 1 devices use 0xffffffff for their
3351 		 * best-effort flush timeout, so the clamping logic
3352 		 * will result in a timeout that meets the above
3353 		 * requirement.  ERTM timeouts are 16-bit values, so
3354 		 * the maximum timeout is 65.535 seconds.
3355 		 */
3356 
3357 		/* Convert timeout to milliseconds and round */
3358 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3359 
3360 		/* This is the recommended formula for class 2 devices
3361 		 * that start ERTM timers when packets are sent to the
3362 		 * controller.
3363 		 */
3364 		ertm_to = 3 * ertm_to + 500;
3365 
3366 		if (ertm_to > 0xffff)
3367 			ertm_to = 0xffff;
3368 
3369 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3370 		rfc->monitor_timeout = rfc->retrans_timeout;
3371 	} else {
3372 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3373 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3374 	}
3375 }
3376 
l2cap_txwin_setup(struct l2cap_chan * chan)3377 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3378 {
3379 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3380 	    __l2cap_ews_supported(chan->conn)) {
3381 		/* use extended control field */
3382 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3383 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3384 	} else {
3385 		chan->tx_win = min_t(u16, chan->tx_win,
3386 				     L2CAP_DEFAULT_TX_WINDOW);
3387 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3388 	}
3389 	chan->ack_win = chan->tx_win;
3390 }
3391 
l2cap_mtu_auto(struct l2cap_chan * chan)3392 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3393 {
3394 	struct hci_conn *conn = chan->conn->hcon;
3395 
3396 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3397 
3398 	/* The 2-DH1 packet has between 2 and 56 information bytes
3399 	 * (including the 2-byte payload header)
3400 	 */
3401 	if (!(conn->pkt_type & HCI_2DH1))
3402 		chan->imtu = 54;
3403 
3404 	/* The 3-DH1 packet has between 2 and 85 information bytes
3405 	 * (including the 2-byte payload header)
3406 	 */
3407 	if (!(conn->pkt_type & HCI_3DH1))
3408 		chan->imtu = 83;
3409 
3410 	/* The 2-DH3 packet has between 2 and 369 information bytes
3411 	 * (including the 2-byte payload header)
3412 	 */
3413 	if (!(conn->pkt_type & HCI_2DH3))
3414 		chan->imtu = 367;
3415 
3416 	/* The 3-DH3 packet has between 2 and 554 information bytes
3417 	 * (including the 2-byte payload header)
3418 	 */
3419 	if (!(conn->pkt_type & HCI_3DH3))
3420 		chan->imtu = 552;
3421 
3422 	/* The 2-DH5 packet has between 2 and 681 information bytes
3423 	 * (including the 2-byte payload header)
3424 	 */
3425 	if (!(conn->pkt_type & HCI_2DH5))
3426 		chan->imtu = 679;
3427 
3428 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3429 	 * (including the 2-byte payload header)
3430 	 */
3431 	if (!(conn->pkt_type & HCI_3DH5))
3432 		chan->imtu = 1021;
3433 }
3434 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3435 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3436 {
3437 	struct l2cap_conf_req *req = data;
3438 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3439 	void *ptr = req->data;
3440 	void *endptr = data + data_size;
3441 	u16 size;
3442 
3443 	BT_DBG("chan %p", chan);
3444 
3445 	if (chan->num_conf_req || chan->num_conf_rsp)
3446 		goto done;
3447 
3448 	switch (chan->mode) {
3449 	case L2CAP_MODE_STREAMING:
3450 	case L2CAP_MODE_ERTM:
3451 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3452 			break;
3453 
3454 		if (__l2cap_efs_supported(chan->conn))
3455 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3456 
3457 		fallthrough;
3458 	default:
3459 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3460 		break;
3461 	}
3462 
3463 done:
3464 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3465 		if (!chan->imtu)
3466 			l2cap_mtu_auto(chan);
3467 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3468 				   endptr - ptr);
3469 	}
3470 
3471 	switch (chan->mode) {
3472 	case L2CAP_MODE_BASIC:
3473 		if (disable_ertm)
3474 			break;
3475 
3476 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3477 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3478 			break;
3479 
3480 		rfc.mode            = L2CAP_MODE_BASIC;
3481 		rfc.txwin_size      = 0;
3482 		rfc.max_transmit    = 0;
3483 		rfc.retrans_timeout = 0;
3484 		rfc.monitor_timeout = 0;
3485 		rfc.max_pdu_size    = 0;
3486 
3487 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3488 				   (unsigned long) &rfc, endptr - ptr);
3489 		break;
3490 
3491 	case L2CAP_MODE_ERTM:
3492 		rfc.mode            = L2CAP_MODE_ERTM;
3493 		rfc.max_transmit    = chan->max_tx;
3494 
3495 		__l2cap_set_ertm_timeouts(chan, &rfc);
3496 
3497 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3498 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3499 			     L2CAP_FCS_SIZE);
3500 		rfc.max_pdu_size = cpu_to_le16(size);
3501 
3502 		l2cap_txwin_setup(chan);
3503 
3504 		rfc.txwin_size = min_t(u16, chan->tx_win,
3505 				       L2CAP_DEFAULT_TX_WINDOW);
3506 
3507 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3508 				   (unsigned long) &rfc, endptr - ptr);
3509 
3510 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3511 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3512 
3513 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3514 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3515 					   chan->tx_win, endptr - ptr);
3516 
3517 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3518 			if (chan->fcs == L2CAP_FCS_NONE ||
3519 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3520 				chan->fcs = L2CAP_FCS_NONE;
3521 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3522 						   chan->fcs, endptr - ptr);
3523 			}
3524 		break;
3525 
3526 	case L2CAP_MODE_STREAMING:
3527 		l2cap_txwin_setup(chan);
3528 		rfc.mode            = L2CAP_MODE_STREAMING;
3529 		rfc.txwin_size      = 0;
3530 		rfc.max_transmit    = 0;
3531 		rfc.retrans_timeout = 0;
3532 		rfc.monitor_timeout = 0;
3533 
3534 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3535 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3536 			     L2CAP_FCS_SIZE);
3537 		rfc.max_pdu_size = cpu_to_le16(size);
3538 
3539 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3540 				   (unsigned long) &rfc, endptr - ptr);
3541 
3542 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3543 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3544 
3545 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3546 			if (chan->fcs == L2CAP_FCS_NONE ||
3547 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3548 				chan->fcs = L2CAP_FCS_NONE;
3549 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3550 						   chan->fcs, endptr - ptr);
3551 			}
3552 		break;
3553 	}
3554 
3555 	req->dcid  = cpu_to_le16(chan->dcid);
3556 	req->flags = cpu_to_le16(0);
3557 
3558 	return ptr - data;
3559 }
3560 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3561 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3562 {
3563 	struct l2cap_conf_rsp *rsp = data;
3564 	void *ptr = rsp->data;
3565 	void *endptr = data + data_size;
3566 	void *req = chan->conf_req;
3567 	int len = chan->conf_len;
3568 	int type, hint, olen;
3569 	unsigned long val;
3570 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3571 	struct l2cap_conf_efs efs;
3572 	u8 remote_efs = 0;
3573 	u16 mtu = L2CAP_DEFAULT_MTU;
3574 	u16 result = L2CAP_CONF_SUCCESS;
3575 	u16 size;
3576 
3577 	BT_DBG("chan %p", chan);
3578 
3579 	while (len >= L2CAP_CONF_OPT_SIZE) {
3580 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3581 		if (len < 0)
3582 			break;
3583 
3584 		hint  = type & L2CAP_CONF_HINT;
3585 		type &= L2CAP_CONF_MASK;
3586 
3587 		switch (type) {
3588 		case L2CAP_CONF_MTU:
3589 			if (olen != 2)
3590 				break;
3591 			mtu = val;
3592 			break;
3593 
3594 		case L2CAP_CONF_FLUSH_TO:
3595 			if (olen != 2)
3596 				break;
3597 			chan->flush_to = val;
3598 			break;
3599 
3600 		case L2CAP_CONF_QOS:
3601 			break;
3602 
3603 		case L2CAP_CONF_RFC:
3604 			if (olen != sizeof(rfc))
3605 				break;
3606 			memcpy(&rfc, (void *) val, olen);
3607 			break;
3608 
3609 		case L2CAP_CONF_FCS:
3610 			if (olen != 1)
3611 				break;
3612 			if (val == L2CAP_FCS_NONE)
3613 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3614 			break;
3615 
3616 		case L2CAP_CONF_EFS:
3617 			if (olen != sizeof(efs))
3618 				break;
3619 			remote_efs = 1;
3620 			memcpy(&efs, (void *) val, olen);
3621 			break;
3622 
3623 		case L2CAP_CONF_EWS:
3624 			if (olen != 2)
3625 				break;
3626 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3627 				return -ECONNREFUSED;
3628 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3629 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3630 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3631 			chan->remote_tx_win = val;
3632 			break;
3633 
3634 		default:
3635 			if (hint)
3636 				break;
3637 			result = L2CAP_CONF_UNKNOWN;
3638 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3639 			break;
3640 		}
3641 	}
3642 
3643 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3644 		goto done;
3645 
3646 	switch (chan->mode) {
3647 	case L2CAP_MODE_STREAMING:
3648 	case L2CAP_MODE_ERTM:
3649 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3650 			chan->mode = l2cap_select_mode(rfc.mode,
3651 						       chan->conn->feat_mask);
3652 			break;
3653 		}
3654 
3655 		if (remote_efs) {
3656 			if (__l2cap_efs_supported(chan->conn))
3657 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3658 			else
3659 				return -ECONNREFUSED;
3660 		}
3661 
3662 		if (chan->mode != rfc.mode)
3663 			return -ECONNREFUSED;
3664 
3665 		break;
3666 	}
3667 
3668 done:
3669 	if (chan->mode != rfc.mode) {
3670 		result = L2CAP_CONF_UNACCEPT;
3671 		rfc.mode = chan->mode;
3672 
3673 		if (chan->num_conf_rsp == 1)
3674 			return -ECONNREFUSED;
3675 
3676 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3677 				   (unsigned long) &rfc, endptr - ptr);
3678 	}
3679 
3680 	if (result == L2CAP_CONF_SUCCESS) {
3681 		/* Configure output options and let the other side know
3682 		 * which ones we don't like. */
3683 
3684 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3685 			result = L2CAP_CONF_UNACCEPT;
3686 		else {
3687 			chan->omtu = mtu;
3688 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3689 		}
3690 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3691 
3692 		if (remote_efs) {
3693 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3694 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3695 			    efs.stype != chan->local_stype) {
3696 
3697 				result = L2CAP_CONF_UNACCEPT;
3698 
3699 				if (chan->num_conf_req >= 1)
3700 					return -ECONNREFUSED;
3701 
3702 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3703 						   sizeof(efs),
3704 						   (unsigned long) &efs, endptr - ptr);
3705 			} else {
3706 				/* Send PENDING Conf Rsp */
3707 				result = L2CAP_CONF_PENDING;
3708 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3709 			}
3710 		}
3711 
3712 		switch (rfc.mode) {
3713 		case L2CAP_MODE_BASIC:
3714 			chan->fcs = L2CAP_FCS_NONE;
3715 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3716 			break;
3717 
3718 		case L2CAP_MODE_ERTM:
3719 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3720 				chan->remote_tx_win = rfc.txwin_size;
3721 			else
3722 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3723 
3724 			chan->remote_max_tx = rfc.max_transmit;
3725 
3726 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3727 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3728 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3729 			rfc.max_pdu_size = cpu_to_le16(size);
3730 			chan->remote_mps = size;
3731 
3732 			__l2cap_set_ertm_timeouts(chan, &rfc);
3733 
3734 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3735 
3736 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3737 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3738 
3739 			if (remote_efs &&
3740 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3741 				chan->remote_id = efs.id;
3742 				chan->remote_stype = efs.stype;
3743 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3744 				chan->remote_flush_to =
3745 					le32_to_cpu(efs.flush_to);
3746 				chan->remote_acc_lat =
3747 					le32_to_cpu(efs.acc_lat);
3748 				chan->remote_sdu_itime =
3749 					le32_to_cpu(efs.sdu_itime);
3750 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3751 						   sizeof(efs),
3752 						   (unsigned long) &efs, endptr - ptr);
3753 			}
3754 			break;
3755 
3756 		case L2CAP_MODE_STREAMING:
3757 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3758 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3759 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3760 			rfc.max_pdu_size = cpu_to_le16(size);
3761 			chan->remote_mps = size;
3762 
3763 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3764 
3765 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3766 					   (unsigned long) &rfc, endptr - ptr);
3767 
3768 			break;
3769 
3770 		default:
3771 			result = L2CAP_CONF_UNACCEPT;
3772 
3773 			memset(&rfc, 0, sizeof(rfc));
3774 			rfc.mode = chan->mode;
3775 		}
3776 
3777 		if (result == L2CAP_CONF_SUCCESS)
3778 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3779 	}
3780 	rsp->scid   = cpu_to_le16(chan->dcid);
3781 	rsp->result = cpu_to_le16(result);
3782 	rsp->flags  = cpu_to_le16(0);
3783 
3784 	return ptr - data;
3785 }
3786 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3787 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3788 				void *data, size_t size, u16 *result)
3789 {
3790 	struct l2cap_conf_req *req = data;
3791 	void *ptr = req->data;
3792 	void *endptr = data + size;
3793 	int type, olen;
3794 	unsigned long val;
3795 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3796 	struct l2cap_conf_efs efs;
3797 
3798 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3799 
3800 	while (len >= L2CAP_CONF_OPT_SIZE) {
3801 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3802 		if (len < 0)
3803 			break;
3804 
3805 		switch (type) {
3806 		case L2CAP_CONF_MTU:
3807 			if (olen != 2)
3808 				break;
3809 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3810 				*result = L2CAP_CONF_UNACCEPT;
3811 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3812 			} else
3813 				chan->imtu = val;
3814 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3815 					   endptr - ptr);
3816 			break;
3817 
3818 		case L2CAP_CONF_FLUSH_TO:
3819 			if (olen != 2)
3820 				break;
3821 			chan->flush_to = val;
3822 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3823 					   chan->flush_to, endptr - ptr);
3824 			break;
3825 
3826 		case L2CAP_CONF_RFC:
3827 			if (olen != sizeof(rfc))
3828 				break;
3829 			memcpy(&rfc, (void *)val, olen);
3830 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3831 			    rfc.mode != chan->mode)
3832 				return -ECONNREFUSED;
3833 			chan->fcs = 0;
3834 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3835 					   (unsigned long) &rfc, endptr - ptr);
3836 			break;
3837 
3838 		case L2CAP_CONF_EWS:
3839 			if (olen != 2)
3840 				break;
3841 			chan->ack_win = min_t(u16, val, chan->ack_win);
3842 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3843 					   chan->tx_win, endptr - ptr);
3844 			break;
3845 
3846 		case L2CAP_CONF_EFS:
3847 			if (olen != sizeof(efs))
3848 				break;
3849 			memcpy(&efs, (void *)val, olen);
3850 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3851 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3852 			    efs.stype != chan->local_stype)
3853 				return -ECONNREFUSED;
3854 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3855 					   (unsigned long) &efs, endptr - ptr);
3856 			break;
3857 
3858 		case L2CAP_CONF_FCS:
3859 			if (olen != 1)
3860 				break;
3861 			if (*result == L2CAP_CONF_PENDING)
3862 				if (val == L2CAP_FCS_NONE)
3863 					set_bit(CONF_RECV_NO_FCS,
3864 						&chan->conf_state);
3865 			break;
3866 		}
3867 	}
3868 
3869 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3870 		return -ECONNREFUSED;
3871 
3872 	chan->mode = rfc.mode;
3873 
3874 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3875 		switch (rfc.mode) {
3876 		case L2CAP_MODE_ERTM:
3877 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3878 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3879 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3880 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3881 				chan->ack_win = min_t(u16, chan->ack_win,
3882 						      rfc.txwin_size);
3883 
3884 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3885 				chan->local_msdu = le16_to_cpu(efs.msdu);
3886 				chan->local_sdu_itime =
3887 					le32_to_cpu(efs.sdu_itime);
3888 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3889 				chan->local_flush_to =
3890 					le32_to_cpu(efs.flush_to);
3891 			}
3892 			break;
3893 
3894 		case L2CAP_MODE_STREAMING:
3895 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3896 		}
3897 	}
3898 
3899 	req->dcid   = cpu_to_le16(chan->dcid);
3900 	req->flags  = cpu_to_le16(0);
3901 
3902 	return ptr - data;
3903 }
3904 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3905 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3906 				u16 result, u16 flags)
3907 {
3908 	struct l2cap_conf_rsp *rsp = data;
3909 	void *ptr = rsp->data;
3910 
3911 	BT_DBG("chan %p", chan);
3912 
3913 	rsp->scid   = cpu_to_le16(chan->dcid);
3914 	rsp->result = cpu_to_le16(result);
3915 	rsp->flags  = cpu_to_le16(flags);
3916 
3917 	return ptr - data;
3918 }
3919 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3920 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3921 {
3922 	struct l2cap_le_conn_rsp rsp;
3923 	struct l2cap_conn *conn = chan->conn;
3924 
3925 	BT_DBG("chan %p", chan);
3926 
3927 	rsp.dcid    = cpu_to_le16(chan->scid);
3928 	rsp.mtu     = cpu_to_le16(chan->imtu);
3929 	rsp.mps     = cpu_to_le16(chan->mps);
3930 	rsp.credits = cpu_to_le16(chan->rx_credits);
3931 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3932 
3933 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3934 		       &rsp);
3935 }
3936 
l2cap_ecred_list_defer(struct l2cap_chan * chan,void * data)3937 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3938 {
3939 	int *result = data;
3940 
3941 	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3942 		return;
3943 
3944 	switch (chan->state) {
3945 	case BT_CONNECT2:
3946 		/* If channel still pending accept add to result */
3947 		(*result)++;
3948 		return;
3949 	case BT_CONNECTED:
3950 		return;
3951 	default:
3952 		/* If not connected or pending accept it has been refused */
3953 		*result = -ECONNREFUSED;
3954 		return;
3955 	}
3956 }
3957 
3958 struct l2cap_ecred_rsp_data {
3959 	struct {
3960 		struct l2cap_ecred_conn_rsp rsp;
3961 		__le16 scid[L2CAP_ECRED_MAX_CID];
3962 	} __packed pdu;
3963 	int count;
3964 };
3965 
l2cap_ecred_rsp_defer(struct l2cap_chan * chan,void * data)3966 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3967 {
3968 	struct l2cap_ecred_rsp_data *rsp = data;
3969 
3970 	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3971 		return;
3972 
3973 	/* Reset ident so only one response is sent */
3974 	chan->ident = 0;
3975 
3976 	/* Include all channels pending with the same ident */
3977 	if (!rsp->pdu.rsp.result)
3978 		rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
3979 	else
3980 		l2cap_chan_del(chan, ECONNRESET);
3981 }
3982 
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3983 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3984 {
3985 	struct l2cap_conn *conn = chan->conn;
3986 	struct l2cap_ecred_rsp_data data;
3987 	u16 id = chan->ident;
3988 	int result = 0;
3989 
3990 	if (!id)
3991 		return;
3992 
3993 	BT_DBG("chan %p id %d", chan, id);
3994 
3995 	memset(&data, 0, sizeof(data));
3996 
3997 	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3998 	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
3999 	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
4000 	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
4001 
4002 	/* Verify that all channels are ready */
4003 	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
4004 
4005 	if (result > 0)
4006 		return;
4007 
4008 	if (result < 0)
4009 		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
4010 
4011 	/* Build response */
4012 	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
4013 
4014 	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
4015 		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
4016 		       &data.pdu);
4017 }
4018 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)4019 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4020 {
4021 	struct l2cap_conn_rsp rsp;
4022 	struct l2cap_conn *conn = chan->conn;
4023 	u8 buf[128];
4024 	u8 rsp_code;
4025 
4026 	rsp.scid   = cpu_to_le16(chan->dcid);
4027 	rsp.dcid   = cpu_to_le16(chan->scid);
4028 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4029 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4030 
4031 	if (chan->hs_hcon)
4032 		rsp_code = L2CAP_CREATE_CHAN_RSP;
4033 	else
4034 		rsp_code = L2CAP_CONN_RSP;
4035 
4036 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4037 
4038 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4039 
4040 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4041 		return;
4042 
4043 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4044 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4045 	chan->num_conf_req++;
4046 }
4047 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)4048 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4049 {
4050 	int type, olen;
4051 	unsigned long val;
4052 	/* Use sane default values in case a misbehaving remote device
4053 	 * did not send an RFC or extended window size option.
4054 	 */
4055 	u16 txwin_ext = chan->ack_win;
4056 	struct l2cap_conf_rfc rfc = {
4057 		.mode = chan->mode,
4058 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4059 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4060 		.max_pdu_size = cpu_to_le16(chan->imtu),
4061 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4062 	};
4063 
4064 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4065 
4066 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4067 		return;
4068 
4069 	while (len >= L2CAP_CONF_OPT_SIZE) {
4070 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4071 		if (len < 0)
4072 			break;
4073 
4074 		switch (type) {
4075 		case L2CAP_CONF_RFC:
4076 			if (olen != sizeof(rfc))
4077 				break;
4078 			memcpy(&rfc, (void *)val, olen);
4079 			break;
4080 		case L2CAP_CONF_EWS:
4081 			if (olen != 2)
4082 				break;
4083 			txwin_ext = val;
4084 			break;
4085 		}
4086 	}
4087 
4088 	switch (rfc.mode) {
4089 	case L2CAP_MODE_ERTM:
4090 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4091 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4092 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4093 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4094 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4095 		else
4096 			chan->ack_win = min_t(u16, chan->ack_win,
4097 					      rfc.txwin_size);
4098 		break;
4099 	case L2CAP_MODE_STREAMING:
4100 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4101 	}
4102 }
4103 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4104 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4105 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4106 				    u8 *data)
4107 {
4108 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4109 
4110 	if (cmd_len < sizeof(*rej))
4111 		return -EPROTO;
4112 
4113 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4114 		return 0;
4115 
4116 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4117 	    cmd->ident == conn->info_ident) {
4118 		cancel_delayed_work(&conn->info_timer);
4119 
4120 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4121 		conn->info_ident = 0;
4122 
4123 		l2cap_conn_start(conn);
4124 	}
4125 
4126 	return 0;
4127 }
4128 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code,u8 amp_id)4129 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4130 					struct l2cap_cmd_hdr *cmd,
4131 					u8 *data, u8 rsp_code, u8 amp_id)
4132 {
4133 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4134 	struct l2cap_conn_rsp rsp;
4135 	struct l2cap_chan *chan = NULL, *pchan;
4136 	int result, status = L2CAP_CS_NO_INFO;
4137 
4138 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4139 	__le16 psm = req->psm;
4140 
4141 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4142 
4143 	/* Check if we have socket listening on psm */
4144 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4145 					 &conn->hcon->dst, ACL_LINK);
4146 	if (!pchan) {
4147 		result = L2CAP_CR_BAD_PSM;
4148 		goto sendresp;
4149 	}
4150 
4151 	mutex_lock(&conn->chan_lock);
4152 	l2cap_chan_lock(pchan);
4153 
4154 	/* Check if the ACL is secure enough (if not SDP) */
4155 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4156 	    !hci_conn_check_link_mode(conn->hcon)) {
4157 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4158 		result = L2CAP_CR_SEC_BLOCK;
4159 		goto response;
4160 	}
4161 
4162 	result = L2CAP_CR_NO_MEM;
4163 
4164 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4165 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4166 		result = L2CAP_CR_INVALID_SCID;
4167 		goto response;
4168 	}
4169 
4170 	/* Check if we already have channel with that dcid */
4171 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4172 		result = L2CAP_CR_SCID_IN_USE;
4173 		goto response;
4174 	}
4175 
4176 	chan = pchan->ops->new_connection(pchan);
4177 	if (!chan)
4178 		goto response;
4179 
4180 	/* For certain devices (ex: HID mouse), support for authentication,
4181 	 * pairing and bonding is optional. For such devices, inorder to avoid
4182 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4183 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4184 	 */
4185 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4186 
4187 	bacpy(&chan->src, &conn->hcon->src);
4188 	bacpy(&chan->dst, &conn->hcon->dst);
4189 	chan->src_type = bdaddr_src_type(conn->hcon);
4190 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4191 	chan->psm  = psm;
4192 	chan->dcid = scid;
4193 	chan->local_amp_id = amp_id;
4194 
4195 	__l2cap_chan_add(conn, chan);
4196 
4197 	dcid = chan->scid;
4198 
4199 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4200 
4201 	chan->ident = cmd->ident;
4202 
4203 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4204 		if (l2cap_chan_check_security(chan, false)) {
4205 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4206 				l2cap_state_change(chan, BT_CONNECT2);
4207 				result = L2CAP_CR_PEND;
4208 				status = L2CAP_CS_AUTHOR_PEND;
4209 				chan->ops->defer(chan);
4210 			} else {
4211 				/* Force pending result for AMP controllers.
4212 				 * The connection will succeed after the
4213 				 * physical link is up.
4214 				 */
4215 				if (amp_id == AMP_ID_BREDR) {
4216 					l2cap_state_change(chan, BT_CONFIG);
4217 					result = L2CAP_CR_SUCCESS;
4218 				} else {
4219 					l2cap_state_change(chan, BT_CONNECT2);
4220 					result = L2CAP_CR_PEND;
4221 				}
4222 				status = L2CAP_CS_NO_INFO;
4223 			}
4224 		} else {
4225 			l2cap_state_change(chan, BT_CONNECT2);
4226 			result = L2CAP_CR_PEND;
4227 			status = L2CAP_CS_AUTHEN_PEND;
4228 		}
4229 	} else {
4230 		l2cap_state_change(chan, BT_CONNECT2);
4231 		result = L2CAP_CR_PEND;
4232 		status = L2CAP_CS_NO_INFO;
4233 	}
4234 
4235 response:
4236 	l2cap_chan_unlock(pchan);
4237 	mutex_unlock(&conn->chan_lock);
4238 	l2cap_chan_put(pchan);
4239 
4240 sendresp:
4241 	rsp.scid   = cpu_to_le16(scid);
4242 	rsp.dcid   = cpu_to_le16(dcid);
4243 	rsp.result = cpu_to_le16(result);
4244 	rsp.status = cpu_to_le16(status);
4245 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4246 
4247 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4248 		struct l2cap_info_req info;
4249 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4250 
4251 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4252 		conn->info_ident = l2cap_get_ident(conn);
4253 
4254 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4255 
4256 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4257 			       sizeof(info), &info);
4258 	}
4259 
4260 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4261 	    result == L2CAP_CR_SUCCESS) {
4262 		u8 buf[128];
4263 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4264 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4265 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4266 		chan->num_conf_req++;
4267 	}
4268 
4269 	return chan;
4270 }
4271 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4272 static int l2cap_connect_req(struct l2cap_conn *conn,
4273 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4274 {
4275 	struct hci_dev *hdev = conn->hcon->hdev;
4276 	struct hci_conn *hcon = conn->hcon;
4277 
4278 	if (cmd_len < sizeof(struct l2cap_conn_req))
4279 		return -EPROTO;
4280 
4281 	hci_dev_lock(hdev);
4282 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4283 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4284 		mgmt_device_connected(hdev, hcon, NULL, 0);
4285 	hci_dev_unlock(hdev);
4286 
4287 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4288 	return 0;
4289 }
4290 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4291 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4292 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4293 				    u8 *data)
4294 {
4295 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4296 	u16 scid, dcid, result, status;
4297 	struct l2cap_chan *chan;
4298 	u8 req[128];
4299 	int err;
4300 
4301 	if (cmd_len < sizeof(*rsp))
4302 		return -EPROTO;
4303 
4304 	scid   = __le16_to_cpu(rsp->scid);
4305 	dcid   = __le16_to_cpu(rsp->dcid);
4306 	result = __le16_to_cpu(rsp->result);
4307 	status = __le16_to_cpu(rsp->status);
4308 
4309 	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4310 					   dcid > L2CAP_CID_DYN_END))
4311 		return -EPROTO;
4312 
4313 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4314 	       dcid, scid, result, status);
4315 
4316 	mutex_lock(&conn->chan_lock);
4317 
4318 	if (scid) {
4319 		chan = __l2cap_get_chan_by_scid(conn, scid);
4320 		if (!chan) {
4321 			err = -EBADSLT;
4322 			goto unlock;
4323 		}
4324 	} else {
4325 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4326 		if (!chan) {
4327 			err = -EBADSLT;
4328 			goto unlock;
4329 		}
4330 	}
4331 
4332 	chan = l2cap_chan_hold_unless_zero(chan);
4333 	if (!chan) {
4334 		err = -EBADSLT;
4335 		goto unlock;
4336 	}
4337 
4338 	err = 0;
4339 
4340 	l2cap_chan_lock(chan);
4341 
4342 	switch (result) {
4343 	case L2CAP_CR_SUCCESS:
4344 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4345 			err = -EBADSLT;
4346 			break;
4347 		}
4348 
4349 		l2cap_state_change(chan, BT_CONFIG);
4350 		chan->ident = 0;
4351 		chan->dcid = dcid;
4352 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4353 
4354 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4355 			break;
4356 
4357 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4358 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4359 		chan->num_conf_req++;
4360 		break;
4361 
4362 	case L2CAP_CR_PEND:
4363 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4364 		break;
4365 
4366 	default:
4367 		l2cap_chan_del(chan, ECONNREFUSED);
4368 		break;
4369 	}
4370 
4371 	l2cap_chan_unlock(chan);
4372 	l2cap_chan_put(chan);
4373 
4374 unlock:
4375 	mutex_unlock(&conn->chan_lock);
4376 
4377 	return err;
4378 }
4379 
set_default_fcs(struct l2cap_chan * chan)4380 static inline void set_default_fcs(struct l2cap_chan *chan)
4381 {
4382 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4383 	 * sides request it.
4384 	 */
4385 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4386 		chan->fcs = L2CAP_FCS_NONE;
4387 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4388 		chan->fcs = L2CAP_FCS_CRC16;
4389 }
4390 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4391 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4392 				    u8 ident, u16 flags)
4393 {
4394 	struct l2cap_conn *conn = chan->conn;
4395 
4396 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4397 	       flags);
4398 
4399 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4400 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4401 
4402 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4403 		       l2cap_build_conf_rsp(chan, data,
4404 					    L2CAP_CONF_SUCCESS, flags), data);
4405 }
4406 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4407 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4408 				   u16 scid, u16 dcid)
4409 {
4410 	struct l2cap_cmd_rej_cid rej;
4411 
4412 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4413 	rej.scid = __cpu_to_le16(scid);
4414 	rej.dcid = __cpu_to_le16(dcid);
4415 
4416 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4417 }
4418 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4419 static inline int l2cap_config_req(struct l2cap_conn *conn,
4420 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4421 				   u8 *data)
4422 {
4423 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4424 	u16 dcid, flags;
4425 	u8 rsp[64];
4426 	struct l2cap_chan *chan;
4427 	int len, err = 0;
4428 
4429 	if (cmd_len < sizeof(*req))
4430 		return -EPROTO;
4431 
4432 	dcid  = __le16_to_cpu(req->dcid);
4433 	flags = __le16_to_cpu(req->flags);
4434 
4435 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4436 
4437 	chan = l2cap_get_chan_by_scid(conn, dcid);
4438 	if (!chan) {
4439 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4440 		return 0;
4441 	}
4442 
4443 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4444 	    chan->state != BT_CONNECTED) {
4445 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4446 				       chan->dcid);
4447 		goto unlock;
4448 	}
4449 
4450 	/* Reject if config buffer is too small. */
4451 	len = cmd_len - sizeof(*req);
4452 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4453 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4454 			       l2cap_build_conf_rsp(chan, rsp,
4455 			       L2CAP_CONF_REJECT, flags), rsp);
4456 		goto unlock;
4457 	}
4458 
4459 	/* Store config. */
4460 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4461 	chan->conf_len += len;
4462 
4463 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4464 		/* Incomplete config. Send empty response. */
4465 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4466 			       l2cap_build_conf_rsp(chan, rsp,
4467 			       L2CAP_CONF_SUCCESS, flags), rsp);
4468 		goto unlock;
4469 	}
4470 
4471 	/* Complete config. */
4472 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4473 	if (len < 0) {
4474 		l2cap_send_disconn_req(chan, ECONNRESET);
4475 		goto unlock;
4476 	}
4477 
4478 	chan->ident = cmd->ident;
4479 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4480 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4481 		chan->num_conf_rsp++;
4482 
4483 	/* Reset config buffer. */
4484 	chan->conf_len = 0;
4485 
4486 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4487 		goto unlock;
4488 
4489 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4490 		set_default_fcs(chan);
4491 
4492 		if (chan->mode == L2CAP_MODE_ERTM ||
4493 		    chan->mode == L2CAP_MODE_STREAMING)
4494 			err = l2cap_ertm_init(chan);
4495 
4496 		if (err < 0)
4497 			l2cap_send_disconn_req(chan, -err);
4498 		else
4499 			l2cap_chan_ready(chan);
4500 
4501 		goto unlock;
4502 	}
4503 
4504 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4505 		u8 buf[64];
4506 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4507 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4508 		chan->num_conf_req++;
4509 	}
4510 
4511 	/* Got Conf Rsp PENDING from remote side and assume we sent
4512 	   Conf Rsp PENDING in the code above */
4513 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4514 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4515 
4516 		/* check compatibility */
4517 
4518 		/* Send rsp for BR/EDR channel */
4519 		if (!chan->hs_hcon)
4520 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4521 		else
4522 			chan->ident = cmd->ident;
4523 	}
4524 
4525 unlock:
4526 	l2cap_chan_unlock(chan);
4527 	l2cap_chan_put(chan);
4528 	return err;
4529 }
4530 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4531 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4532 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4533 				   u8 *data)
4534 {
4535 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4536 	u16 scid, flags, result;
4537 	struct l2cap_chan *chan;
4538 	int len = cmd_len - sizeof(*rsp);
4539 	int err = 0;
4540 
4541 	if (cmd_len < sizeof(*rsp))
4542 		return -EPROTO;
4543 
4544 	scid   = __le16_to_cpu(rsp->scid);
4545 	flags  = __le16_to_cpu(rsp->flags);
4546 	result = __le16_to_cpu(rsp->result);
4547 
4548 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4549 	       result, len);
4550 
4551 	chan = l2cap_get_chan_by_scid(conn, scid);
4552 	if (!chan)
4553 		return 0;
4554 
4555 	switch (result) {
4556 	case L2CAP_CONF_SUCCESS:
4557 		l2cap_conf_rfc_get(chan, rsp->data, len);
4558 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4559 		break;
4560 
4561 	case L2CAP_CONF_PENDING:
4562 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4563 
4564 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4565 			char buf[64];
4566 
4567 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4568 						   buf, sizeof(buf), &result);
4569 			if (len < 0) {
4570 				l2cap_send_disconn_req(chan, ECONNRESET);
4571 				goto done;
4572 			}
4573 
4574 			if (!chan->hs_hcon) {
4575 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4576 							0);
4577 			} else {
4578 				if (l2cap_check_efs(chan)) {
4579 					amp_create_logical_link(chan);
4580 					chan->ident = cmd->ident;
4581 				}
4582 			}
4583 		}
4584 		goto done;
4585 
4586 	case L2CAP_CONF_UNKNOWN:
4587 	case L2CAP_CONF_UNACCEPT:
4588 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4589 			char req[64];
4590 
4591 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4592 				l2cap_send_disconn_req(chan, ECONNRESET);
4593 				goto done;
4594 			}
4595 
4596 			/* throw out any old stored conf requests */
4597 			result = L2CAP_CONF_SUCCESS;
4598 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4599 						   req, sizeof(req), &result);
4600 			if (len < 0) {
4601 				l2cap_send_disconn_req(chan, ECONNRESET);
4602 				goto done;
4603 			}
4604 
4605 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4606 				       L2CAP_CONF_REQ, len, req);
4607 			chan->num_conf_req++;
4608 			if (result != L2CAP_CONF_SUCCESS)
4609 				goto done;
4610 			break;
4611 		}
4612 		fallthrough;
4613 
4614 	default:
4615 		l2cap_chan_set_err(chan, ECONNRESET);
4616 
4617 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4618 		l2cap_send_disconn_req(chan, ECONNRESET);
4619 		goto done;
4620 	}
4621 
4622 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4623 		goto done;
4624 
4625 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4626 
4627 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4628 		set_default_fcs(chan);
4629 
4630 		if (chan->mode == L2CAP_MODE_ERTM ||
4631 		    chan->mode == L2CAP_MODE_STREAMING)
4632 			err = l2cap_ertm_init(chan);
4633 
4634 		if (err < 0)
4635 			l2cap_send_disconn_req(chan, -err);
4636 		else
4637 			l2cap_chan_ready(chan);
4638 	}
4639 
4640 done:
4641 	l2cap_chan_unlock(chan);
4642 	l2cap_chan_put(chan);
4643 	return err;
4644 }
4645 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4646 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4647 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4648 				       u8 *data)
4649 {
4650 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4651 	struct l2cap_disconn_rsp rsp;
4652 	u16 dcid, scid;
4653 	struct l2cap_chan *chan;
4654 
4655 	if (cmd_len != sizeof(*req))
4656 		return -EPROTO;
4657 
4658 	scid = __le16_to_cpu(req->scid);
4659 	dcid = __le16_to_cpu(req->dcid);
4660 
4661 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4662 
4663 	chan = l2cap_get_chan_by_scid(conn, dcid);
4664 	if (!chan) {
4665 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4666 		return 0;
4667 	}
4668 
4669 	rsp.dcid = cpu_to_le16(chan->scid);
4670 	rsp.scid = cpu_to_le16(chan->dcid);
4671 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4672 
4673 	chan->ops->set_shutdown(chan);
4674 
4675 	l2cap_chan_unlock(chan);
4676 	mutex_lock(&conn->chan_lock);
4677 	l2cap_chan_lock(chan);
4678 	l2cap_chan_del(chan, ECONNRESET);
4679 	mutex_unlock(&conn->chan_lock);
4680 
4681 	chan->ops->close(chan);
4682 
4683 	l2cap_chan_unlock(chan);
4684 	l2cap_chan_put(chan);
4685 
4686 	return 0;
4687 }
4688 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4689 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4690 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4691 				       u8 *data)
4692 {
4693 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4694 	u16 dcid, scid;
4695 	struct l2cap_chan *chan;
4696 
4697 	if (cmd_len != sizeof(*rsp))
4698 		return -EPROTO;
4699 
4700 	scid = __le16_to_cpu(rsp->scid);
4701 	dcid = __le16_to_cpu(rsp->dcid);
4702 
4703 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4704 
4705 	chan = l2cap_get_chan_by_scid(conn, scid);
4706 	if (!chan) {
4707 		return 0;
4708 	}
4709 
4710 	if (chan->state != BT_DISCONN) {
4711 		l2cap_chan_unlock(chan);
4712 		l2cap_chan_put(chan);
4713 		return 0;
4714 	}
4715 
4716 	l2cap_chan_unlock(chan);
4717 	mutex_lock(&conn->chan_lock);
4718 	l2cap_chan_lock(chan);
4719 	l2cap_chan_del(chan, 0);
4720 	mutex_unlock(&conn->chan_lock);
4721 
4722 	chan->ops->close(chan);
4723 
4724 	l2cap_chan_unlock(chan);
4725 	l2cap_chan_put(chan);
4726 
4727 	return 0;
4728 }
4729 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4730 static inline int l2cap_information_req(struct l2cap_conn *conn,
4731 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4732 					u8 *data)
4733 {
4734 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4735 	u16 type;
4736 
4737 	if (cmd_len != sizeof(*req))
4738 		return -EPROTO;
4739 
4740 	type = __le16_to_cpu(req->type);
4741 
4742 	BT_DBG("type 0x%4.4x", type);
4743 
4744 	if (type == L2CAP_IT_FEAT_MASK) {
4745 		u8 buf[8];
4746 		u32 feat_mask = l2cap_feat_mask;
4747 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4748 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4749 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4750 		if (!disable_ertm)
4751 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4752 				| L2CAP_FEAT_FCS;
4753 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4754 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4755 				| L2CAP_FEAT_EXT_WINDOW;
4756 
4757 		put_unaligned_le32(feat_mask, rsp->data);
4758 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4759 			       buf);
4760 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4761 		u8 buf[12];
4762 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4763 
4764 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4765 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4766 		rsp->data[0] = conn->local_fixed_chan;
4767 		memset(rsp->data + 1, 0, 7);
4768 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4769 			       buf);
4770 	} else {
4771 		struct l2cap_info_rsp rsp;
4772 		rsp.type   = cpu_to_le16(type);
4773 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4774 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4775 			       &rsp);
4776 	}
4777 
4778 	return 0;
4779 }
4780 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4781 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4782 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4783 					u8 *data)
4784 {
4785 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4786 	u16 type, result;
4787 
4788 	if (cmd_len < sizeof(*rsp))
4789 		return -EPROTO;
4790 
4791 	type   = __le16_to_cpu(rsp->type);
4792 	result = __le16_to_cpu(rsp->result);
4793 
4794 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4795 
4796 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4797 	if (cmd->ident != conn->info_ident ||
4798 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4799 		return 0;
4800 
4801 	cancel_delayed_work(&conn->info_timer);
4802 
4803 	if (result != L2CAP_IR_SUCCESS) {
4804 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4805 		conn->info_ident = 0;
4806 
4807 		l2cap_conn_start(conn);
4808 
4809 		return 0;
4810 	}
4811 
4812 	switch (type) {
4813 	case L2CAP_IT_FEAT_MASK:
4814 		conn->feat_mask = get_unaligned_le32(rsp->data);
4815 
4816 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4817 			struct l2cap_info_req req;
4818 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4819 
4820 			conn->info_ident = l2cap_get_ident(conn);
4821 
4822 			l2cap_send_cmd(conn, conn->info_ident,
4823 				       L2CAP_INFO_REQ, sizeof(req), &req);
4824 		} else {
4825 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4826 			conn->info_ident = 0;
4827 
4828 			l2cap_conn_start(conn);
4829 		}
4830 		break;
4831 
4832 	case L2CAP_IT_FIXED_CHAN:
4833 		conn->remote_fixed_chan = rsp->data[0];
4834 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4835 		conn->info_ident = 0;
4836 
4837 		l2cap_conn_start(conn);
4838 		break;
4839 	}
4840 
4841 	return 0;
4842 }
4843 
l2cap_create_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4844 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4845 				    struct l2cap_cmd_hdr *cmd,
4846 				    u16 cmd_len, void *data)
4847 {
4848 	struct l2cap_create_chan_req *req = data;
4849 	struct l2cap_create_chan_rsp rsp;
4850 	struct l2cap_chan *chan;
4851 	struct hci_dev *hdev;
4852 	u16 psm, scid;
4853 
4854 	if (cmd_len != sizeof(*req))
4855 		return -EPROTO;
4856 
4857 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4858 		return -EINVAL;
4859 
4860 	psm = le16_to_cpu(req->psm);
4861 	scid = le16_to_cpu(req->scid);
4862 
4863 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4864 
4865 	/* For controller id 0 make BR/EDR connection */
4866 	if (req->amp_id == AMP_ID_BREDR) {
4867 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4868 			      req->amp_id);
4869 		return 0;
4870 	}
4871 
4872 	/* Validate AMP controller id */
4873 	hdev = hci_dev_get(req->amp_id);
4874 	if (!hdev)
4875 		goto error;
4876 
4877 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4878 		hci_dev_put(hdev);
4879 		goto error;
4880 	}
4881 
4882 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4883 			     req->amp_id);
4884 	if (chan) {
4885 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4886 		struct hci_conn *hs_hcon;
4887 
4888 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4889 						  &conn->hcon->dst);
4890 		if (!hs_hcon) {
4891 			hci_dev_put(hdev);
4892 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4893 					       chan->dcid);
4894 			return 0;
4895 		}
4896 
4897 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4898 
4899 		mgr->bredr_chan = chan;
4900 		chan->hs_hcon = hs_hcon;
4901 		chan->fcs = L2CAP_FCS_NONE;
4902 		conn->mtu = hdev->block_mtu;
4903 	}
4904 
4905 	hci_dev_put(hdev);
4906 
4907 	return 0;
4908 
4909 error:
4910 	rsp.dcid = 0;
4911 	rsp.scid = cpu_to_le16(scid);
4912 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4913 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4914 
4915 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4916 		       sizeof(rsp), &rsp);
4917 
4918 	return 0;
4919 }
4920 
l2cap_send_move_chan_req(struct l2cap_chan * chan,u8 dest_amp_id)4921 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4922 {
4923 	struct l2cap_move_chan_req req;
4924 	u8 ident;
4925 
4926 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4927 
4928 	ident = l2cap_get_ident(chan->conn);
4929 	chan->ident = ident;
4930 
4931 	req.icid = cpu_to_le16(chan->scid);
4932 	req.dest_amp_id = dest_amp_id;
4933 
4934 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4935 		       &req);
4936 
4937 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4938 }
4939 
l2cap_send_move_chan_rsp(struct l2cap_chan * chan,u16 result)4940 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4941 {
4942 	struct l2cap_move_chan_rsp rsp;
4943 
4944 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4945 
4946 	rsp.icid = cpu_to_le16(chan->dcid);
4947 	rsp.result = cpu_to_le16(result);
4948 
4949 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4950 		       sizeof(rsp), &rsp);
4951 }
4952 
l2cap_send_move_chan_cfm(struct l2cap_chan * chan,u16 result)4953 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4954 {
4955 	struct l2cap_move_chan_cfm cfm;
4956 
4957 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4958 
4959 	chan->ident = l2cap_get_ident(chan->conn);
4960 
4961 	cfm.icid = cpu_to_le16(chan->scid);
4962 	cfm.result = cpu_to_le16(result);
4963 
4964 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4965 		       sizeof(cfm), &cfm);
4966 
4967 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4968 }
4969 
l2cap_send_move_chan_cfm_icid(struct l2cap_conn * conn,u16 icid)4970 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4971 {
4972 	struct l2cap_move_chan_cfm cfm;
4973 
4974 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4975 
4976 	cfm.icid = cpu_to_le16(icid);
4977 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4978 
4979 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4980 		       sizeof(cfm), &cfm);
4981 }
4982 
l2cap_send_move_chan_cfm_rsp(struct l2cap_conn * conn,u8 ident,u16 icid)4983 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4984 					 u16 icid)
4985 {
4986 	struct l2cap_move_chan_cfm_rsp rsp;
4987 
4988 	BT_DBG("icid 0x%4.4x", icid);
4989 
4990 	rsp.icid = cpu_to_le16(icid);
4991 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4992 }
4993 
__release_logical_link(struct l2cap_chan * chan)4994 static void __release_logical_link(struct l2cap_chan *chan)
4995 {
4996 	chan->hs_hchan = NULL;
4997 	chan->hs_hcon = NULL;
4998 
4999 	/* Placeholder - release the logical link */
5000 }
5001 
l2cap_logical_fail(struct l2cap_chan * chan)5002 static void l2cap_logical_fail(struct l2cap_chan *chan)
5003 {
5004 	/* Logical link setup failed */
5005 	if (chan->state != BT_CONNECTED) {
5006 		/* Create channel failure, disconnect */
5007 		l2cap_send_disconn_req(chan, ECONNRESET);
5008 		return;
5009 	}
5010 
5011 	switch (chan->move_role) {
5012 	case L2CAP_MOVE_ROLE_RESPONDER:
5013 		l2cap_move_done(chan);
5014 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
5015 		break;
5016 	case L2CAP_MOVE_ROLE_INITIATOR:
5017 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
5018 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
5019 			/* Remote has only sent pending or
5020 			 * success responses, clean up
5021 			 */
5022 			l2cap_move_done(chan);
5023 		}
5024 
5025 		/* Other amp move states imply that the move
5026 		 * has already aborted
5027 		 */
5028 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5029 		break;
5030 	}
5031 }
5032 
l2cap_logical_finish_create(struct l2cap_chan * chan,struct hci_chan * hchan)5033 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5034 					struct hci_chan *hchan)
5035 {
5036 	struct l2cap_conf_rsp rsp;
5037 
5038 	chan->hs_hchan = hchan;
5039 	chan->hs_hcon->l2cap_data = chan->conn;
5040 
5041 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5042 
5043 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5044 		int err;
5045 
5046 		set_default_fcs(chan);
5047 
5048 		err = l2cap_ertm_init(chan);
5049 		if (err < 0)
5050 			l2cap_send_disconn_req(chan, -err);
5051 		else
5052 			l2cap_chan_ready(chan);
5053 	}
5054 }
5055 
l2cap_logical_finish_move(struct l2cap_chan * chan,struct hci_chan * hchan)5056 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5057 				      struct hci_chan *hchan)
5058 {
5059 	chan->hs_hcon = hchan->conn;
5060 	chan->hs_hcon->l2cap_data = chan->conn;
5061 
5062 	BT_DBG("move_state %d", chan->move_state);
5063 
5064 	switch (chan->move_state) {
5065 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5066 		/* Move confirm will be sent after a success
5067 		 * response is received
5068 		 */
5069 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5070 		break;
5071 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5072 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5073 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5074 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5075 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5076 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5077 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5078 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5079 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5080 		}
5081 		break;
5082 	default:
5083 		/* Move was not in expected state, free the channel */
5084 		__release_logical_link(chan);
5085 
5086 		chan->move_state = L2CAP_MOVE_STABLE;
5087 	}
5088 }
5089 
5090 /* Call with chan locked */
l2cap_logical_cfm(struct l2cap_chan * chan,struct hci_chan * hchan,u8 status)5091 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5092 		       u8 status)
5093 {
5094 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5095 
5096 	if (status) {
5097 		l2cap_logical_fail(chan);
5098 		__release_logical_link(chan);
5099 		return;
5100 	}
5101 
5102 	if (chan->state != BT_CONNECTED) {
5103 		/* Ignore logical link if channel is on BR/EDR */
5104 		if (chan->local_amp_id != AMP_ID_BREDR)
5105 			l2cap_logical_finish_create(chan, hchan);
5106 	} else {
5107 		l2cap_logical_finish_move(chan, hchan);
5108 	}
5109 }
5110 
l2cap_move_start(struct l2cap_chan * chan)5111 void l2cap_move_start(struct l2cap_chan *chan)
5112 {
5113 	BT_DBG("chan %p", chan);
5114 
5115 	if (chan->local_amp_id == AMP_ID_BREDR) {
5116 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5117 			return;
5118 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5119 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5120 		/* Placeholder - start physical link setup */
5121 	} else {
5122 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5123 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5124 		chan->move_id = 0;
5125 		l2cap_move_setup(chan);
5126 		l2cap_send_move_chan_req(chan, 0);
5127 	}
5128 }
5129 
l2cap_do_create(struct l2cap_chan * chan,int result,u8 local_amp_id,u8 remote_amp_id)5130 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5131 			    u8 local_amp_id, u8 remote_amp_id)
5132 {
5133 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5134 	       local_amp_id, remote_amp_id);
5135 
5136 	chan->fcs = L2CAP_FCS_NONE;
5137 
5138 	/* Outgoing channel on AMP */
5139 	if (chan->state == BT_CONNECT) {
5140 		if (result == L2CAP_CR_SUCCESS) {
5141 			chan->local_amp_id = local_amp_id;
5142 			l2cap_send_create_chan_req(chan, remote_amp_id);
5143 		} else {
5144 			/* Revert to BR/EDR connect */
5145 			l2cap_send_conn_req(chan);
5146 		}
5147 
5148 		return;
5149 	}
5150 
5151 	/* Incoming channel on AMP */
5152 	if (__l2cap_no_conn_pending(chan)) {
5153 		struct l2cap_conn_rsp rsp;
5154 		char buf[128];
5155 		rsp.scid = cpu_to_le16(chan->dcid);
5156 		rsp.dcid = cpu_to_le16(chan->scid);
5157 
5158 		if (result == L2CAP_CR_SUCCESS) {
5159 			/* Send successful response */
5160 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5161 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5162 		} else {
5163 			/* Send negative response */
5164 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5165 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5166 		}
5167 
5168 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5169 			       sizeof(rsp), &rsp);
5170 
5171 		if (result == L2CAP_CR_SUCCESS) {
5172 			l2cap_state_change(chan, BT_CONFIG);
5173 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5174 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5175 				       L2CAP_CONF_REQ,
5176 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5177 			chan->num_conf_req++;
5178 		}
5179 	}
5180 }
5181 
l2cap_do_move_initiate(struct l2cap_chan * chan,u8 local_amp_id,u8 remote_amp_id)5182 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5183 				   u8 remote_amp_id)
5184 {
5185 	l2cap_move_setup(chan);
5186 	chan->move_id = local_amp_id;
5187 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5188 
5189 	l2cap_send_move_chan_req(chan, remote_amp_id);
5190 }
5191 
l2cap_do_move_respond(struct l2cap_chan * chan,int result)5192 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5193 {
5194 	struct hci_chan *hchan = NULL;
5195 
5196 	/* Placeholder - get hci_chan for logical link */
5197 
5198 	if (hchan) {
5199 		if (hchan->state == BT_CONNECTED) {
5200 			/* Logical link is ready to go */
5201 			chan->hs_hcon = hchan->conn;
5202 			chan->hs_hcon->l2cap_data = chan->conn;
5203 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5204 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5205 
5206 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5207 		} else {
5208 			/* Wait for logical link to be ready */
5209 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5210 		}
5211 	} else {
5212 		/* Logical link not available */
5213 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5214 	}
5215 }
5216 
l2cap_do_move_cancel(struct l2cap_chan * chan,int result)5217 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5218 {
5219 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5220 		u8 rsp_result;
5221 		if (result == -EINVAL)
5222 			rsp_result = L2CAP_MR_BAD_ID;
5223 		else
5224 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5225 
5226 		l2cap_send_move_chan_rsp(chan, rsp_result);
5227 	}
5228 
5229 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5230 	chan->move_state = L2CAP_MOVE_STABLE;
5231 
5232 	/* Restart data transmission */
5233 	l2cap_ertm_send(chan);
5234 }
5235 
5236 /* Invoke with locked chan */
__l2cap_physical_cfm(struct l2cap_chan * chan,int result)5237 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5238 {
5239 	u8 local_amp_id = chan->local_amp_id;
5240 	u8 remote_amp_id = chan->remote_amp_id;
5241 
5242 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5243 	       chan, result, local_amp_id, remote_amp_id);
5244 
5245 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5246 		return;
5247 
5248 	if (chan->state != BT_CONNECTED) {
5249 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5250 	} else if (result != L2CAP_MR_SUCCESS) {
5251 		l2cap_do_move_cancel(chan, result);
5252 	} else {
5253 		switch (chan->move_role) {
5254 		case L2CAP_MOVE_ROLE_INITIATOR:
5255 			l2cap_do_move_initiate(chan, local_amp_id,
5256 					       remote_amp_id);
5257 			break;
5258 		case L2CAP_MOVE_ROLE_RESPONDER:
5259 			l2cap_do_move_respond(chan, result);
5260 			break;
5261 		default:
5262 			l2cap_do_move_cancel(chan, result);
5263 			break;
5264 		}
5265 	}
5266 }
5267 
l2cap_move_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5268 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5269 					 struct l2cap_cmd_hdr *cmd,
5270 					 u16 cmd_len, void *data)
5271 {
5272 	struct l2cap_move_chan_req *req = data;
5273 	struct l2cap_move_chan_rsp rsp;
5274 	struct l2cap_chan *chan;
5275 	u16 icid = 0;
5276 	u16 result = L2CAP_MR_NOT_ALLOWED;
5277 
5278 	if (cmd_len != sizeof(*req))
5279 		return -EPROTO;
5280 
5281 	icid = le16_to_cpu(req->icid);
5282 
5283 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5284 
5285 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5286 		return -EINVAL;
5287 
5288 	chan = l2cap_get_chan_by_dcid(conn, icid);
5289 	if (!chan) {
5290 		rsp.icid = cpu_to_le16(icid);
5291 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5292 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5293 			       sizeof(rsp), &rsp);
5294 		return 0;
5295 	}
5296 
5297 	chan->ident = cmd->ident;
5298 
5299 	if (chan->scid < L2CAP_CID_DYN_START ||
5300 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5301 	    (chan->mode != L2CAP_MODE_ERTM &&
5302 	     chan->mode != L2CAP_MODE_STREAMING)) {
5303 		result = L2CAP_MR_NOT_ALLOWED;
5304 		goto send_move_response;
5305 	}
5306 
5307 	if (chan->local_amp_id == req->dest_amp_id) {
5308 		result = L2CAP_MR_SAME_ID;
5309 		goto send_move_response;
5310 	}
5311 
5312 	if (req->dest_amp_id != AMP_ID_BREDR) {
5313 		struct hci_dev *hdev;
5314 		hdev = hci_dev_get(req->dest_amp_id);
5315 		if (!hdev || hdev->dev_type != HCI_AMP ||
5316 		    !test_bit(HCI_UP, &hdev->flags)) {
5317 			if (hdev)
5318 				hci_dev_put(hdev);
5319 
5320 			result = L2CAP_MR_BAD_ID;
5321 			goto send_move_response;
5322 		}
5323 		hci_dev_put(hdev);
5324 	}
5325 
5326 	/* Detect a move collision.  Only send a collision response
5327 	 * if this side has "lost", otherwise proceed with the move.
5328 	 * The winner has the larger bd_addr.
5329 	 */
5330 	if ((__chan_is_moving(chan) ||
5331 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5332 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5333 		result = L2CAP_MR_COLLISION;
5334 		goto send_move_response;
5335 	}
5336 
5337 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5338 	l2cap_move_setup(chan);
5339 	chan->move_id = req->dest_amp_id;
5340 
5341 	if (req->dest_amp_id == AMP_ID_BREDR) {
5342 		/* Moving to BR/EDR */
5343 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5344 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5345 			result = L2CAP_MR_PEND;
5346 		} else {
5347 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5348 			result = L2CAP_MR_SUCCESS;
5349 		}
5350 	} else {
5351 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5352 		/* Placeholder - uncomment when amp functions are available */
5353 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5354 		result = L2CAP_MR_PEND;
5355 	}
5356 
5357 send_move_response:
5358 	l2cap_send_move_chan_rsp(chan, result);
5359 
5360 	l2cap_chan_unlock(chan);
5361 	l2cap_chan_put(chan);
5362 
5363 	return 0;
5364 }
5365 
l2cap_move_continue(struct l2cap_conn * conn,u16 icid,u16 result)5366 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5367 {
5368 	struct l2cap_chan *chan;
5369 	struct hci_chan *hchan = NULL;
5370 
5371 	chan = l2cap_get_chan_by_scid(conn, icid);
5372 	if (!chan) {
5373 		l2cap_send_move_chan_cfm_icid(conn, icid);
5374 		return;
5375 	}
5376 
5377 	__clear_chan_timer(chan);
5378 	if (result == L2CAP_MR_PEND)
5379 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5380 
5381 	switch (chan->move_state) {
5382 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5383 		/* Move confirm will be sent when logical link
5384 		 * is complete.
5385 		 */
5386 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5387 		break;
5388 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5389 		if (result == L2CAP_MR_PEND) {
5390 			break;
5391 		} else if (test_bit(CONN_LOCAL_BUSY,
5392 				    &chan->conn_state)) {
5393 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5394 		} else {
5395 			/* Logical link is up or moving to BR/EDR,
5396 			 * proceed with move
5397 			 */
5398 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5399 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5400 		}
5401 		break;
5402 	case L2CAP_MOVE_WAIT_RSP:
5403 		/* Moving to AMP */
5404 		if (result == L2CAP_MR_SUCCESS) {
5405 			/* Remote is ready, send confirm immediately
5406 			 * after logical link is ready
5407 			 */
5408 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5409 		} else {
5410 			/* Both logical link and move success
5411 			 * are required to confirm
5412 			 */
5413 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5414 		}
5415 
5416 		/* Placeholder - get hci_chan for logical link */
5417 		if (!hchan) {
5418 			/* Logical link not available */
5419 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5420 			break;
5421 		}
5422 
5423 		/* If the logical link is not yet connected, do not
5424 		 * send confirmation.
5425 		 */
5426 		if (hchan->state != BT_CONNECTED)
5427 			break;
5428 
5429 		/* Logical link is already ready to go */
5430 
5431 		chan->hs_hcon = hchan->conn;
5432 		chan->hs_hcon->l2cap_data = chan->conn;
5433 
5434 		if (result == L2CAP_MR_SUCCESS) {
5435 			/* Can confirm now */
5436 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5437 		} else {
5438 			/* Now only need move success
5439 			 * to confirm
5440 			 */
5441 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5442 		}
5443 
5444 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5445 		break;
5446 	default:
5447 		/* Any other amp move state means the move failed. */
5448 		chan->move_id = chan->local_amp_id;
5449 		l2cap_move_done(chan);
5450 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5451 	}
5452 
5453 	l2cap_chan_unlock(chan);
5454 	l2cap_chan_put(chan);
5455 }
5456 
l2cap_move_fail(struct l2cap_conn * conn,u8 ident,u16 icid,u16 result)5457 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5458 			    u16 result)
5459 {
5460 	struct l2cap_chan *chan;
5461 
5462 	chan = l2cap_get_chan_by_ident(conn, ident);
5463 	if (!chan) {
5464 		/* Could not locate channel, icid is best guess */
5465 		l2cap_send_move_chan_cfm_icid(conn, icid);
5466 		return;
5467 	}
5468 
5469 	__clear_chan_timer(chan);
5470 
5471 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5472 		if (result == L2CAP_MR_COLLISION) {
5473 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5474 		} else {
5475 			/* Cleanup - cancel move */
5476 			chan->move_id = chan->local_amp_id;
5477 			l2cap_move_done(chan);
5478 		}
5479 	}
5480 
5481 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5482 
5483 	l2cap_chan_unlock(chan);
5484 	l2cap_chan_put(chan);
5485 }
5486 
l2cap_move_channel_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5487 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5488 				  struct l2cap_cmd_hdr *cmd,
5489 				  u16 cmd_len, void *data)
5490 {
5491 	struct l2cap_move_chan_rsp *rsp = data;
5492 	u16 icid, result;
5493 
5494 	if (cmd_len != sizeof(*rsp))
5495 		return -EPROTO;
5496 
5497 	icid = le16_to_cpu(rsp->icid);
5498 	result = le16_to_cpu(rsp->result);
5499 
5500 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5501 
5502 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5503 		l2cap_move_continue(conn, icid, result);
5504 	else
5505 		l2cap_move_fail(conn, cmd->ident, icid, result);
5506 
5507 	return 0;
5508 }
5509 
l2cap_move_channel_confirm(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5510 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5511 				      struct l2cap_cmd_hdr *cmd,
5512 				      u16 cmd_len, void *data)
5513 {
5514 	struct l2cap_move_chan_cfm *cfm = data;
5515 	struct l2cap_chan *chan;
5516 	u16 icid, result;
5517 
5518 	if (cmd_len != sizeof(*cfm))
5519 		return -EPROTO;
5520 
5521 	icid = le16_to_cpu(cfm->icid);
5522 	result = le16_to_cpu(cfm->result);
5523 
5524 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5525 
5526 	chan = l2cap_get_chan_by_dcid(conn, icid);
5527 	if (!chan) {
5528 		/* Spec requires a response even if the icid was not found */
5529 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5530 		return 0;
5531 	}
5532 
5533 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5534 		if (result == L2CAP_MC_CONFIRMED) {
5535 			chan->local_amp_id = chan->move_id;
5536 			if (chan->local_amp_id == AMP_ID_BREDR)
5537 				__release_logical_link(chan);
5538 		} else {
5539 			chan->move_id = chan->local_amp_id;
5540 		}
5541 
5542 		l2cap_move_done(chan);
5543 	}
5544 
5545 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5546 
5547 	l2cap_chan_unlock(chan);
5548 	l2cap_chan_put(chan);
5549 
5550 	return 0;
5551 }
5552 
l2cap_move_channel_confirm_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5553 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5554 						 struct l2cap_cmd_hdr *cmd,
5555 						 u16 cmd_len, void *data)
5556 {
5557 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5558 	struct l2cap_chan *chan;
5559 	u16 icid;
5560 
5561 	if (cmd_len != sizeof(*rsp))
5562 		return -EPROTO;
5563 
5564 	icid = le16_to_cpu(rsp->icid);
5565 
5566 	BT_DBG("icid 0x%4.4x", icid);
5567 
5568 	chan = l2cap_get_chan_by_scid(conn, icid);
5569 	if (!chan)
5570 		return 0;
5571 
5572 	__clear_chan_timer(chan);
5573 
5574 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5575 		chan->local_amp_id = chan->move_id;
5576 
5577 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5578 			__release_logical_link(chan);
5579 
5580 		l2cap_move_done(chan);
5581 	}
5582 
5583 	l2cap_chan_unlock(chan);
5584 	l2cap_chan_put(chan);
5585 
5586 	return 0;
5587 }
5588 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5589 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5590 					      struct l2cap_cmd_hdr *cmd,
5591 					      u16 cmd_len, u8 *data)
5592 {
5593 	struct hci_conn *hcon = conn->hcon;
5594 	struct l2cap_conn_param_update_req *req;
5595 	struct l2cap_conn_param_update_rsp rsp;
5596 	u16 min, max, latency, to_multiplier;
5597 	int err;
5598 
5599 	if (hcon->role != HCI_ROLE_MASTER)
5600 		return -EINVAL;
5601 
5602 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5603 		return -EPROTO;
5604 
5605 	req = (struct l2cap_conn_param_update_req *) data;
5606 	min		= __le16_to_cpu(req->min);
5607 	max		= __le16_to_cpu(req->max);
5608 	latency		= __le16_to_cpu(req->latency);
5609 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5610 
5611 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5612 	       min, max, latency, to_multiplier);
5613 
5614 	memset(&rsp, 0, sizeof(rsp));
5615 
5616 	if (max > hcon->le_conn_max_interval) {
5617 		BT_DBG("requested connection interval exceeds current bounds.");
5618 		err = -EINVAL;
5619 	} else {
5620 		err = hci_check_conn_params(min, max, latency, to_multiplier);
5621 	}
5622 
5623 	if (err)
5624 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5625 	else
5626 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5627 
5628 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5629 		       sizeof(rsp), &rsp);
5630 
5631 	if (!err) {
5632 		u8 store_hint;
5633 
5634 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5635 						to_multiplier);
5636 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5637 				    store_hint, min, max, latency,
5638 				    to_multiplier);
5639 
5640 	}
5641 
5642 	return 0;
5643 }
5644 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5645 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5646 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5647 				u8 *data)
5648 {
5649 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5650 	struct hci_conn *hcon = conn->hcon;
5651 	u16 dcid, mtu, mps, credits, result;
5652 	struct l2cap_chan *chan;
5653 	int err, sec_level;
5654 
5655 	if (cmd_len < sizeof(*rsp))
5656 		return -EPROTO;
5657 
5658 	dcid    = __le16_to_cpu(rsp->dcid);
5659 	mtu     = __le16_to_cpu(rsp->mtu);
5660 	mps     = __le16_to_cpu(rsp->mps);
5661 	credits = __le16_to_cpu(rsp->credits);
5662 	result  = __le16_to_cpu(rsp->result);
5663 
5664 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5665 					   dcid < L2CAP_CID_DYN_START ||
5666 					   dcid > L2CAP_CID_LE_DYN_END))
5667 		return -EPROTO;
5668 
5669 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5670 	       dcid, mtu, mps, credits, result);
5671 
5672 	mutex_lock(&conn->chan_lock);
5673 
5674 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5675 	if (!chan) {
5676 		err = -EBADSLT;
5677 		goto unlock;
5678 	}
5679 
5680 	err = 0;
5681 
5682 	l2cap_chan_lock(chan);
5683 
5684 	switch (result) {
5685 	case L2CAP_CR_LE_SUCCESS:
5686 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5687 			err = -EBADSLT;
5688 			break;
5689 		}
5690 
5691 		chan->ident = 0;
5692 		chan->dcid = dcid;
5693 		chan->omtu = mtu;
5694 		chan->remote_mps = mps;
5695 		chan->tx_credits = credits;
5696 		l2cap_chan_ready(chan);
5697 		break;
5698 
5699 	case L2CAP_CR_LE_AUTHENTICATION:
5700 	case L2CAP_CR_LE_ENCRYPTION:
5701 		/* If we already have MITM protection we can't do
5702 		 * anything.
5703 		 */
5704 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5705 			l2cap_chan_del(chan, ECONNREFUSED);
5706 			break;
5707 		}
5708 
5709 		sec_level = hcon->sec_level + 1;
5710 		if (chan->sec_level < sec_level)
5711 			chan->sec_level = sec_level;
5712 
5713 		/* We'll need to send a new Connect Request */
5714 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5715 
5716 		smp_conn_security(hcon, chan->sec_level);
5717 		break;
5718 
5719 	default:
5720 		l2cap_chan_del(chan, ECONNREFUSED);
5721 		break;
5722 	}
5723 
5724 	l2cap_chan_unlock(chan);
5725 
5726 unlock:
5727 	mutex_unlock(&conn->chan_lock);
5728 
5729 	return err;
5730 }
5731 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5732 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5733 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5734 				      u8 *data)
5735 {
5736 	int err = 0;
5737 
5738 	switch (cmd->code) {
5739 	case L2CAP_COMMAND_REJ:
5740 		l2cap_command_rej(conn, cmd, cmd_len, data);
5741 		break;
5742 
5743 	case L2CAP_CONN_REQ:
5744 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5745 		break;
5746 
5747 	case L2CAP_CONN_RSP:
5748 	case L2CAP_CREATE_CHAN_RSP:
5749 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5750 		break;
5751 
5752 	case L2CAP_CONF_REQ:
5753 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5754 		break;
5755 
5756 	case L2CAP_CONF_RSP:
5757 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5758 		break;
5759 
5760 	case L2CAP_DISCONN_REQ:
5761 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5762 		break;
5763 
5764 	case L2CAP_DISCONN_RSP:
5765 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5766 		break;
5767 
5768 	case L2CAP_ECHO_REQ:
5769 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5770 		break;
5771 
5772 	case L2CAP_ECHO_RSP:
5773 		break;
5774 
5775 	case L2CAP_INFO_REQ:
5776 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5777 		break;
5778 
5779 	case L2CAP_INFO_RSP:
5780 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5781 		break;
5782 
5783 	case L2CAP_CREATE_CHAN_REQ:
5784 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5785 		break;
5786 
5787 	case L2CAP_MOVE_CHAN_REQ:
5788 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5789 		break;
5790 
5791 	case L2CAP_MOVE_CHAN_RSP:
5792 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5793 		break;
5794 
5795 	case L2CAP_MOVE_CHAN_CFM:
5796 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5797 		break;
5798 
5799 	case L2CAP_MOVE_CHAN_CFM_RSP:
5800 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5801 		break;
5802 
5803 	default:
5804 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5805 		err = -EINVAL;
5806 		break;
5807 	}
5808 
5809 	return err;
5810 }
5811 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5812 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5813 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5814 				u8 *data)
5815 {
5816 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5817 	struct l2cap_le_conn_rsp rsp;
5818 	struct l2cap_chan *chan, *pchan;
5819 	u16 dcid, scid, credits, mtu, mps;
5820 	__le16 psm;
5821 	u8 result;
5822 
5823 	if (cmd_len != sizeof(*req))
5824 		return -EPROTO;
5825 
5826 	scid = __le16_to_cpu(req->scid);
5827 	mtu  = __le16_to_cpu(req->mtu);
5828 	mps  = __le16_to_cpu(req->mps);
5829 	psm  = req->psm;
5830 	dcid = 0;
5831 	credits = 0;
5832 
5833 	if (mtu < 23 || mps < 23)
5834 		return -EPROTO;
5835 
5836 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5837 	       scid, mtu, mps);
5838 
5839 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5840 	 * page 1059:
5841 	 *
5842 	 * Valid range: 0x0001-0x00ff
5843 	 *
5844 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5845 	 */
5846 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5847 		result = L2CAP_CR_LE_BAD_PSM;
5848 		chan = NULL;
5849 		goto response;
5850 	}
5851 
5852 	/* Check if we have socket listening on psm */
5853 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5854 					 &conn->hcon->dst, LE_LINK);
5855 	if (!pchan) {
5856 		result = L2CAP_CR_LE_BAD_PSM;
5857 		chan = NULL;
5858 		goto response;
5859 	}
5860 
5861 	mutex_lock(&conn->chan_lock);
5862 	l2cap_chan_lock(pchan);
5863 
5864 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5865 				     SMP_ALLOW_STK)) {
5866 		result = L2CAP_CR_LE_AUTHENTICATION;
5867 		chan = NULL;
5868 		goto response_unlock;
5869 	}
5870 
5871 	/* Check for valid dynamic CID range */
5872 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5873 		result = L2CAP_CR_LE_INVALID_SCID;
5874 		chan = NULL;
5875 		goto response_unlock;
5876 	}
5877 
5878 	/* Check if we already have channel with that dcid */
5879 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5880 		result = L2CAP_CR_LE_SCID_IN_USE;
5881 		chan = NULL;
5882 		goto response_unlock;
5883 	}
5884 
5885 	chan = pchan->ops->new_connection(pchan);
5886 	if (!chan) {
5887 		result = L2CAP_CR_LE_NO_MEM;
5888 		goto response_unlock;
5889 	}
5890 
5891 	bacpy(&chan->src, &conn->hcon->src);
5892 	bacpy(&chan->dst, &conn->hcon->dst);
5893 	chan->src_type = bdaddr_src_type(conn->hcon);
5894 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5895 	chan->psm  = psm;
5896 	chan->dcid = scid;
5897 	chan->omtu = mtu;
5898 	chan->remote_mps = mps;
5899 
5900 	__l2cap_chan_add(conn, chan);
5901 
5902 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5903 
5904 	dcid = chan->scid;
5905 	credits = chan->rx_credits;
5906 
5907 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5908 
5909 	chan->ident = cmd->ident;
5910 
5911 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5912 		l2cap_state_change(chan, BT_CONNECT2);
5913 		/* The following result value is actually not defined
5914 		 * for LE CoC but we use it to let the function know
5915 		 * that it should bail out after doing its cleanup
5916 		 * instead of sending a response.
5917 		 */
5918 		result = L2CAP_CR_PEND;
5919 		chan->ops->defer(chan);
5920 	} else {
5921 		l2cap_chan_ready(chan);
5922 		result = L2CAP_CR_LE_SUCCESS;
5923 	}
5924 
5925 response_unlock:
5926 	l2cap_chan_unlock(pchan);
5927 	mutex_unlock(&conn->chan_lock);
5928 	l2cap_chan_put(pchan);
5929 
5930 	if (result == L2CAP_CR_PEND)
5931 		return 0;
5932 
5933 response:
5934 	if (chan) {
5935 		rsp.mtu = cpu_to_le16(chan->imtu);
5936 		rsp.mps = cpu_to_le16(chan->mps);
5937 	} else {
5938 		rsp.mtu = 0;
5939 		rsp.mps = 0;
5940 	}
5941 
5942 	rsp.dcid    = cpu_to_le16(dcid);
5943 	rsp.credits = cpu_to_le16(credits);
5944 	rsp.result  = cpu_to_le16(result);
5945 
5946 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5947 
5948 	return 0;
5949 }
5950 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5951 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5952 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5953 				   u8 *data)
5954 {
5955 	struct l2cap_le_credits *pkt;
5956 	struct l2cap_chan *chan;
5957 	u16 cid, credits, max_credits;
5958 
5959 	if (cmd_len != sizeof(*pkt))
5960 		return -EPROTO;
5961 
5962 	pkt = (struct l2cap_le_credits *) data;
5963 	cid	= __le16_to_cpu(pkt->cid);
5964 	credits	= __le16_to_cpu(pkt->credits);
5965 
5966 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5967 
5968 	chan = l2cap_get_chan_by_dcid(conn, cid);
5969 	if (!chan)
5970 		return -EBADSLT;
5971 
5972 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5973 	if (credits > max_credits) {
5974 		BT_ERR("LE credits overflow");
5975 		l2cap_send_disconn_req(chan, ECONNRESET);
5976 
5977 		/* Return 0 so that we don't trigger an unnecessary
5978 		 * command reject packet.
5979 		 */
5980 		goto unlock;
5981 	}
5982 
5983 	chan->tx_credits += credits;
5984 
5985 	/* Resume sending */
5986 	l2cap_le_flowctl_send(chan);
5987 
5988 	if (chan->tx_credits)
5989 		chan->ops->resume(chan);
5990 
5991 unlock:
5992 	l2cap_chan_unlock(chan);
5993 	l2cap_chan_put(chan);
5994 
5995 	return 0;
5996 }
5997 
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5998 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5999 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6000 				       u8 *data)
6001 {
6002 	struct l2cap_ecred_conn_req *req = (void *) data;
6003 	struct {
6004 		struct l2cap_ecred_conn_rsp rsp;
6005 		__le16 dcid[L2CAP_ECRED_MAX_CID];
6006 	} __packed pdu;
6007 	struct l2cap_chan *chan, *pchan;
6008 	u16 mtu, mps;
6009 	__le16 psm;
6010 	u8 result, len = 0;
6011 	int i, num_scid;
6012 	bool defer = false;
6013 
6014 	if (!enable_ecred)
6015 		return -EINVAL;
6016 
6017 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
6018 		result = L2CAP_CR_LE_INVALID_PARAMS;
6019 		goto response;
6020 	}
6021 
6022 	cmd_len -= sizeof(*req);
6023 	num_scid = cmd_len / sizeof(u16);
6024 
6025 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
6026 		result = L2CAP_CR_LE_INVALID_PARAMS;
6027 		goto response;
6028 	}
6029 
6030 	mtu  = __le16_to_cpu(req->mtu);
6031 	mps  = __le16_to_cpu(req->mps);
6032 
6033 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
6034 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
6035 		goto response;
6036 	}
6037 
6038 	psm  = req->psm;
6039 
6040 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
6041 	 * page 1059:
6042 	 *
6043 	 * Valid range: 0x0001-0x00ff
6044 	 *
6045 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
6046 	 */
6047 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
6048 		result = L2CAP_CR_LE_BAD_PSM;
6049 		goto response;
6050 	}
6051 
6052 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6053 
6054 	memset(&pdu, 0, sizeof(pdu));
6055 
6056 	/* Check if we have socket listening on psm */
6057 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6058 					 &conn->hcon->dst, LE_LINK);
6059 	if (!pchan) {
6060 		result = L2CAP_CR_LE_BAD_PSM;
6061 		goto response;
6062 	}
6063 
6064 	mutex_lock(&conn->chan_lock);
6065 	l2cap_chan_lock(pchan);
6066 
6067 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6068 				     SMP_ALLOW_STK)) {
6069 		result = L2CAP_CR_LE_AUTHENTICATION;
6070 		goto unlock;
6071 	}
6072 
6073 	result = L2CAP_CR_LE_SUCCESS;
6074 
6075 	for (i = 0; i < num_scid; i++) {
6076 		u16 scid = __le16_to_cpu(req->scid[i]);
6077 
6078 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6079 
6080 		pdu.dcid[i] = 0x0000;
6081 		len += sizeof(*pdu.dcid);
6082 
6083 		/* Check for valid dynamic CID range */
6084 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6085 			result = L2CAP_CR_LE_INVALID_SCID;
6086 			continue;
6087 		}
6088 
6089 		/* Check if we already have channel with that dcid */
6090 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6091 			result = L2CAP_CR_LE_SCID_IN_USE;
6092 			continue;
6093 		}
6094 
6095 		chan = pchan->ops->new_connection(pchan);
6096 		if (!chan) {
6097 			result = L2CAP_CR_LE_NO_MEM;
6098 			continue;
6099 		}
6100 
6101 		bacpy(&chan->src, &conn->hcon->src);
6102 		bacpy(&chan->dst, &conn->hcon->dst);
6103 		chan->src_type = bdaddr_src_type(conn->hcon);
6104 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6105 		chan->psm  = psm;
6106 		chan->dcid = scid;
6107 		chan->omtu = mtu;
6108 		chan->remote_mps = mps;
6109 
6110 		__l2cap_chan_add(conn, chan);
6111 
6112 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6113 
6114 		/* Init response */
6115 		if (!pdu.rsp.credits) {
6116 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6117 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6118 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6119 		}
6120 
6121 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6122 
6123 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6124 
6125 		chan->ident = cmd->ident;
6126 		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
6127 
6128 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6129 			l2cap_state_change(chan, BT_CONNECT2);
6130 			defer = true;
6131 			chan->ops->defer(chan);
6132 		} else {
6133 			l2cap_chan_ready(chan);
6134 		}
6135 	}
6136 
6137 unlock:
6138 	l2cap_chan_unlock(pchan);
6139 	mutex_unlock(&conn->chan_lock);
6140 	l2cap_chan_put(pchan);
6141 
6142 response:
6143 	pdu.rsp.result = cpu_to_le16(result);
6144 
6145 	if (defer)
6146 		return 0;
6147 
6148 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6149 		       sizeof(pdu.rsp) + len, &pdu);
6150 
6151 	return 0;
6152 }
6153 
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6154 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6155 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6156 				       u8 *data)
6157 {
6158 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6159 	struct hci_conn *hcon = conn->hcon;
6160 	u16 mtu, mps, credits, result;
6161 	struct l2cap_chan *chan, *tmp;
6162 	int err = 0, sec_level;
6163 	int i = 0;
6164 
6165 	if (cmd_len < sizeof(*rsp))
6166 		return -EPROTO;
6167 
6168 	mtu     = __le16_to_cpu(rsp->mtu);
6169 	mps     = __le16_to_cpu(rsp->mps);
6170 	credits = __le16_to_cpu(rsp->credits);
6171 	result  = __le16_to_cpu(rsp->result);
6172 
6173 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6174 	       result);
6175 
6176 	mutex_lock(&conn->chan_lock);
6177 
6178 	cmd_len -= sizeof(*rsp);
6179 
6180 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6181 		u16 dcid;
6182 
6183 		if (chan->ident != cmd->ident ||
6184 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6185 		    chan->state == BT_CONNECTED)
6186 			continue;
6187 
6188 		l2cap_chan_lock(chan);
6189 
6190 		/* Check that there is a dcid for each pending channel */
6191 		if (cmd_len < sizeof(dcid)) {
6192 			l2cap_chan_del(chan, ECONNREFUSED);
6193 			l2cap_chan_unlock(chan);
6194 			continue;
6195 		}
6196 
6197 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6198 		cmd_len -= sizeof(u16);
6199 
6200 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6201 
6202 		/* Check if dcid is already in use */
6203 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6204 			/* If a device receives a
6205 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6206 			 * already-assigned Destination CID, then both the
6207 			 * original channel and the new channel shall be
6208 			 * immediately discarded and not used.
6209 			 */
6210 			l2cap_chan_del(chan, ECONNREFUSED);
6211 			l2cap_chan_unlock(chan);
6212 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6213 			l2cap_chan_lock(chan);
6214 			l2cap_chan_del(chan, ECONNRESET);
6215 			l2cap_chan_unlock(chan);
6216 			continue;
6217 		}
6218 
6219 		switch (result) {
6220 		case L2CAP_CR_LE_AUTHENTICATION:
6221 		case L2CAP_CR_LE_ENCRYPTION:
6222 			/* If we already have MITM protection we can't do
6223 			 * anything.
6224 			 */
6225 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6226 				l2cap_chan_del(chan, ECONNREFUSED);
6227 				break;
6228 			}
6229 
6230 			sec_level = hcon->sec_level + 1;
6231 			if (chan->sec_level < sec_level)
6232 				chan->sec_level = sec_level;
6233 
6234 			/* We'll need to send a new Connect Request */
6235 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6236 
6237 			smp_conn_security(hcon, chan->sec_level);
6238 			break;
6239 
6240 		case L2CAP_CR_LE_BAD_PSM:
6241 			l2cap_chan_del(chan, ECONNREFUSED);
6242 			break;
6243 
6244 		default:
6245 			/* If dcid was not set it means channels was refused */
6246 			if (!dcid) {
6247 				l2cap_chan_del(chan, ECONNREFUSED);
6248 				break;
6249 			}
6250 
6251 			chan->ident = 0;
6252 			chan->dcid = dcid;
6253 			chan->omtu = mtu;
6254 			chan->remote_mps = mps;
6255 			chan->tx_credits = credits;
6256 			l2cap_chan_ready(chan);
6257 			break;
6258 		}
6259 
6260 		l2cap_chan_unlock(chan);
6261 	}
6262 
6263 	mutex_unlock(&conn->chan_lock);
6264 
6265 	return err;
6266 }
6267 
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6268 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6269 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6270 					 u8 *data)
6271 {
6272 	struct l2cap_ecred_reconf_req *req = (void *) data;
6273 	struct l2cap_ecred_reconf_rsp rsp;
6274 	u16 mtu, mps, result;
6275 	struct l2cap_chan *chan;
6276 	int i, num_scid;
6277 
6278 	if (!enable_ecred)
6279 		return -EINVAL;
6280 
6281 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6282 		result = L2CAP_CR_LE_INVALID_PARAMS;
6283 		goto respond;
6284 	}
6285 
6286 	mtu = __le16_to_cpu(req->mtu);
6287 	mps = __le16_to_cpu(req->mps);
6288 
6289 	BT_DBG("mtu %u mps %u", mtu, mps);
6290 
6291 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6292 		result = L2CAP_RECONF_INVALID_MTU;
6293 		goto respond;
6294 	}
6295 
6296 	if (mps < L2CAP_ECRED_MIN_MPS) {
6297 		result = L2CAP_RECONF_INVALID_MPS;
6298 		goto respond;
6299 	}
6300 
6301 	cmd_len -= sizeof(*req);
6302 	num_scid = cmd_len / sizeof(u16);
6303 	result = L2CAP_RECONF_SUCCESS;
6304 
6305 	for (i = 0; i < num_scid; i++) {
6306 		u16 scid;
6307 
6308 		scid = __le16_to_cpu(req->scid[i]);
6309 		if (!scid)
6310 			return -EPROTO;
6311 
6312 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6313 		if (!chan)
6314 			continue;
6315 
6316 		/* If the MTU value is decreased for any of the included
6317 		 * channels, then the receiver shall disconnect all
6318 		 * included channels.
6319 		 */
6320 		if (chan->omtu > mtu) {
6321 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6322 			       chan->omtu, mtu);
6323 			result = L2CAP_RECONF_INVALID_MTU;
6324 		}
6325 
6326 		chan->omtu = mtu;
6327 		chan->remote_mps = mps;
6328 	}
6329 
6330 respond:
6331 	rsp.result = cpu_to_le16(result);
6332 
6333 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6334 		       &rsp);
6335 
6336 	return 0;
6337 }
6338 
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6339 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6340 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6341 					 u8 *data)
6342 {
6343 	struct l2cap_chan *chan, *tmp;
6344 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6345 	u16 result;
6346 
6347 	if (cmd_len < sizeof(*rsp))
6348 		return -EPROTO;
6349 
6350 	result = __le16_to_cpu(rsp->result);
6351 
6352 	BT_DBG("result 0x%4.4x", rsp->result);
6353 
6354 	if (!result)
6355 		return 0;
6356 
6357 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6358 		if (chan->ident != cmd->ident)
6359 			continue;
6360 
6361 		l2cap_chan_del(chan, ECONNRESET);
6362 	}
6363 
6364 	return 0;
6365 }
6366 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6367 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6368 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6369 				       u8 *data)
6370 {
6371 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6372 	struct l2cap_chan *chan;
6373 
6374 	if (cmd_len < sizeof(*rej))
6375 		return -EPROTO;
6376 
6377 	mutex_lock(&conn->chan_lock);
6378 
6379 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6380 	if (!chan)
6381 		goto done;
6382 
6383 	chan = l2cap_chan_hold_unless_zero(chan);
6384 	if (!chan)
6385 		goto done;
6386 
6387 	l2cap_chan_lock(chan);
6388 	l2cap_chan_del(chan, ECONNREFUSED);
6389 	l2cap_chan_unlock(chan);
6390 	l2cap_chan_put(chan);
6391 
6392 done:
6393 	mutex_unlock(&conn->chan_lock);
6394 	return 0;
6395 }
6396 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6397 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6398 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6399 				   u8 *data)
6400 {
6401 	int err = 0;
6402 
6403 	switch (cmd->code) {
6404 	case L2CAP_COMMAND_REJ:
6405 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6406 		break;
6407 
6408 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6409 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6410 		break;
6411 
6412 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6413 		break;
6414 
6415 	case L2CAP_LE_CONN_RSP:
6416 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6417 		break;
6418 
6419 	case L2CAP_LE_CONN_REQ:
6420 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6421 		break;
6422 
6423 	case L2CAP_LE_CREDITS:
6424 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6425 		break;
6426 
6427 	case L2CAP_ECRED_CONN_REQ:
6428 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6429 		break;
6430 
6431 	case L2CAP_ECRED_CONN_RSP:
6432 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6433 		break;
6434 
6435 	case L2CAP_ECRED_RECONF_REQ:
6436 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6437 		break;
6438 
6439 	case L2CAP_ECRED_RECONF_RSP:
6440 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6441 		break;
6442 
6443 	case L2CAP_DISCONN_REQ:
6444 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6445 		break;
6446 
6447 	case L2CAP_DISCONN_RSP:
6448 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6449 		break;
6450 
6451 	default:
6452 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6453 		err = -EINVAL;
6454 		break;
6455 	}
6456 
6457 	return err;
6458 }
6459 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6460 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6461 					struct sk_buff *skb)
6462 {
6463 	struct hci_conn *hcon = conn->hcon;
6464 	struct l2cap_cmd_hdr *cmd;
6465 	u16 len;
6466 	int err;
6467 
6468 	if (hcon->type != LE_LINK)
6469 		goto drop;
6470 
6471 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6472 		goto drop;
6473 
6474 	cmd = (void *) skb->data;
6475 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6476 
6477 	len = le16_to_cpu(cmd->len);
6478 
6479 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6480 
6481 	if (len != skb->len || !cmd->ident) {
6482 		BT_DBG("corrupted command");
6483 		goto drop;
6484 	}
6485 
6486 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6487 	if (err) {
6488 		struct l2cap_cmd_rej_unk rej;
6489 
6490 		BT_ERR("Wrong link type (%d)", err);
6491 
6492 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6493 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6494 			       sizeof(rej), &rej);
6495 	}
6496 
6497 drop:
6498 	kfree_skb(skb);
6499 }
6500 
l2cap_sig_send_rej(struct l2cap_conn * conn,u16 ident)6501 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
6502 {
6503 	struct l2cap_cmd_rej_unk rej;
6504 
6505 	rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6506 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
6507 }
6508 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6509 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6510 				     struct sk_buff *skb)
6511 {
6512 	struct hci_conn *hcon = conn->hcon;
6513 	struct l2cap_cmd_hdr *cmd;
6514 	int err;
6515 
6516 	l2cap_raw_recv(conn, skb);
6517 
6518 	if (hcon->type != ACL_LINK)
6519 		goto drop;
6520 
6521 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6522 		u16 len;
6523 
6524 		cmd = (void *) skb->data;
6525 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6526 
6527 		len = le16_to_cpu(cmd->len);
6528 
6529 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6530 		       cmd->ident);
6531 
6532 		if (len > skb->len || !cmd->ident) {
6533 			BT_DBG("corrupted command");
6534 			l2cap_sig_send_rej(conn, cmd->ident);
6535 			skb_pull(skb, len > skb->len ? skb->len : len);
6536 			continue;
6537 		}
6538 
6539 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6540 		if (err) {
6541 			BT_ERR("Wrong link type (%d)", err);
6542 			l2cap_sig_send_rej(conn, cmd->ident);
6543 		}
6544 
6545 		skb_pull(skb, len);
6546 	}
6547 
6548 	if (skb->len > 0) {
6549 		BT_DBG("corrupted command");
6550 		l2cap_sig_send_rej(conn, 0);
6551 	}
6552 
6553 drop:
6554 	kfree_skb(skb);
6555 }
6556 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)6557 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6558 {
6559 	u16 our_fcs, rcv_fcs;
6560 	int hdr_size;
6561 
6562 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6563 		hdr_size = L2CAP_EXT_HDR_SIZE;
6564 	else
6565 		hdr_size = L2CAP_ENH_HDR_SIZE;
6566 
6567 	if (chan->fcs == L2CAP_FCS_CRC16) {
6568 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6569 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6570 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6571 
6572 		if (our_fcs != rcv_fcs)
6573 			return -EBADMSG;
6574 	}
6575 	return 0;
6576 }
6577 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)6578 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6579 {
6580 	struct l2cap_ctrl control;
6581 
6582 	BT_DBG("chan %p", chan);
6583 
6584 	memset(&control, 0, sizeof(control));
6585 	control.sframe = 1;
6586 	control.final = 1;
6587 	control.reqseq = chan->buffer_seq;
6588 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6589 
6590 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6591 		control.super = L2CAP_SUPER_RNR;
6592 		l2cap_send_sframe(chan, &control);
6593 	}
6594 
6595 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6596 	    chan->unacked_frames > 0)
6597 		__set_retrans_timer(chan);
6598 
6599 	/* Send pending iframes */
6600 	l2cap_ertm_send(chan);
6601 
6602 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6603 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6604 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6605 		 * send it now.
6606 		 */
6607 		control.super = L2CAP_SUPER_RR;
6608 		l2cap_send_sframe(chan, &control);
6609 	}
6610 }
6611 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)6612 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6613 			    struct sk_buff **last_frag)
6614 {
6615 	/* skb->len reflects data in skb as well as all fragments
6616 	 * skb->data_len reflects only data in fragments
6617 	 */
6618 	if (!skb_has_frag_list(skb))
6619 		skb_shinfo(skb)->frag_list = new_frag;
6620 
6621 	new_frag->next = NULL;
6622 
6623 	(*last_frag)->next = new_frag;
6624 	*last_frag = new_frag;
6625 
6626 	skb->len += new_frag->len;
6627 	skb->data_len += new_frag->len;
6628 	skb->truesize += new_frag->truesize;
6629 }
6630 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)6631 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6632 				struct l2cap_ctrl *control)
6633 {
6634 	int err = -EINVAL;
6635 
6636 	switch (control->sar) {
6637 	case L2CAP_SAR_UNSEGMENTED:
6638 		if (chan->sdu)
6639 			break;
6640 
6641 		err = chan->ops->recv(chan, skb);
6642 		break;
6643 
6644 	case L2CAP_SAR_START:
6645 		if (chan->sdu)
6646 			break;
6647 
6648 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6649 			break;
6650 
6651 		chan->sdu_len = get_unaligned_le16(skb->data);
6652 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6653 
6654 		if (chan->sdu_len > chan->imtu) {
6655 			err = -EMSGSIZE;
6656 			break;
6657 		}
6658 
6659 		if (skb->len >= chan->sdu_len)
6660 			break;
6661 
6662 		chan->sdu = skb;
6663 		chan->sdu_last_frag = skb;
6664 
6665 		skb = NULL;
6666 		err = 0;
6667 		break;
6668 
6669 	case L2CAP_SAR_CONTINUE:
6670 		if (!chan->sdu)
6671 			break;
6672 
6673 		append_skb_frag(chan->sdu, skb,
6674 				&chan->sdu_last_frag);
6675 		skb = NULL;
6676 
6677 		if (chan->sdu->len >= chan->sdu_len)
6678 			break;
6679 
6680 		err = 0;
6681 		break;
6682 
6683 	case L2CAP_SAR_END:
6684 		if (!chan->sdu)
6685 			break;
6686 
6687 		append_skb_frag(chan->sdu, skb,
6688 				&chan->sdu_last_frag);
6689 		skb = NULL;
6690 
6691 		if (chan->sdu->len != chan->sdu_len)
6692 			break;
6693 
6694 		err = chan->ops->recv(chan, chan->sdu);
6695 
6696 		if (!err) {
6697 			/* Reassembly complete */
6698 			chan->sdu = NULL;
6699 			chan->sdu_last_frag = NULL;
6700 			chan->sdu_len = 0;
6701 		}
6702 		break;
6703 	}
6704 
6705 	if (err) {
6706 		kfree_skb(skb);
6707 		kfree_skb(chan->sdu);
6708 		chan->sdu = NULL;
6709 		chan->sdu_last_frag = NULL;
6710 		chan->sdu_len = 0;
6711 	}
6712 
6713 	return err;
6714 }
6715 
l2cap_resegment(struct l2cap_chan * chan)6716 static int l2cap_resegment(struct l2cap_chan *chan)
6717 {
6718 	/* Placeholder */
6719 	return 0;
6720 }
6721 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)6722 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6723 {
6724 	u8 event;
6725 
6726 	if (chan->mode != L2CAP_MODE_ERTM)
6727 		return;
6728 
6729 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6730 	l2cap_tx(chan, NULL, NULL, event);
6731 }
6732 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)6733 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6734 {
6735 	int err = 0;
6736 	/* Pass sequential frames to l2cap_reassemble_sdu()
6737 	 * until a gap is encountered.
6738 	 */
6739 
6740 	BT_DBG("chan %p", chan);
6741 
6742 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6743 		struct sk_buff *skb;
6744 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6745 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6746 
6747 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6748 
6749 		if (!skb)
6750 			break;
6751 
6752 		skb_unlink(skb, &chan->srej_q);
6753 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6754 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6755 		if (err)
6756 			break;
6757 	}
6758 
6759 	if (skb_queue_empty(&chan->srej_q)) {
6760 		chan->rx_state = L2CAP_RX_STATE_RECV;
6761 		l2cap_send_ack(chan);
6762 	}
6763 
6764 	return err;
6765 }
6766 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6767 static void l2cap_handle_srej(struct l2cap_chan *chan,
6768 			      struct l2cap_ctrl *control)
6769 {
6770 	struct sk_buff *skb;
6771 
6772 	BT_DBG("chan %p, control %p", chan, control);
6773 
6774 	if (control->reqseq == chan->next_tx_seq) {
6775 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6776 		l2cap_send_disconn_req(chan, ECONNRESET);
6777 		return;
6778 	}
6779 
6780 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6781 
6782 	if (skb == NULL) {
6783 		BT_DBG("Seq %d not available for retransmission",
6784 		       control->reqseq);
6785 		return;
6786 	}
6787 
6788 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6789 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6790 		l2cap_send_disconn_req(chan, ECONNRESET);
6791 		return;
6792 	}
6793 
6794 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6795 
6796 	if (control->poll) {
6797 		l2cap_pass_to_tx(chan, control);
6798 
6799 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6800 		l2cap_retransmit(chan, control);
6801 		l2cap_ertm_send(chan);
6802 
6803 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6804 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6805 			chan->srej_save_reqseq = control->reqseq;
6806 		}
6807 	} else {
6808 		l2cap_pass_to_tx_fbit(chan, control);
6809 
6810 		if (control->final) {
6811 			if (chan->srej_save_reqseq != control->reqseq ||
6812 			    !test_and_clear_bit(CONN_SREJ_ACT,
6813 						&chan->conn_state))
6814 				l2cap_retransmit(chan, control);
6815 		} else {
6816 			l2cap_retransmit(chan, control);
6817 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6818 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6819 				chan->srej_save_reqseq = control->reqseq;
6820 			}
6821 		}
6822 	}
6823 }
6824 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6825 static void l2cap_handle_rej(struct l2cap_chan *chan,
6826 			     struct l2cap_ctrl *control)
6827 {
6828 	struct sk_buff *skb;
6829 
6830 	BT_DBG("chan %p, control %p", chan, control);
6831 
6832 	if (control->reqseq == chan->next_tx_seq) {
6833 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6834 		l2cap_send_disconn_req(chan, ECONNRESET);
6835 		return;
6836 	}
6837 
6838 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6839 
6840 	if (chan->max_tx && skb &&
6841 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6842 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6843 		l2cap_send_disconn_req(chan, ECONNRESET);
6844 		return;
6845 	}
6846 
6847 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6848 
6849 	l2cap_pass_to_tx(chan, control);
6850 
6851 	if (control->final) {
6852 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6853 			l2cap_retransmit_all(chan, control);
6854 	} else {
6855 		l2cap_retransmit_all(chan, control);
6856 		l2cap_ertm_send(chan);
6857 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6858 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6859 	}
6860 }
6861 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)6862 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6863 {
6864 	BT_DBG("chan %p, txseq %d", chan, txseq);
6865 
6866 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6867 	       chan->expected_tx_seq);
6868 
6869 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6870 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6871 		    chan->tx_win) {
6872 			/* See notes below regarding "double poll" and
6873 			 * invalid packets.
6874 			 */
6875 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6876 				BT_DBG("Invalid/Ignore - after SREJ");
6877 				return L2CAP_TXSEQ_INVALID_IGNORE;
6878 			} else {
6879 				BT_DBG("Invalid - in window after SREJ sent");
6880 				return L2CAP_TXSEQ_INVALID;
6881 			}
6882 		}
6883 
6884 		if (chan->srej_list.head == txseq) {
6885 			BT_DBG("Expected SREJ");
6886 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6887 		}
6888 
6889 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6890 			BT_DBG("Duplicate SREJ - txseq already stored");
6891 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6892 		}
6893 
6894 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6895 			BT_DBG("Unexpected SREJ - not requested");
6896 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6897 		}
6898 	}
6899 
6900 	if (chan->expected_tx_seq == txseq) {
6901 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6902 		    chan->tx_win) {
6903 			BT_DBG("Invalid - txseq outside tx window");
6904 			return L2CAP_TXSEQ_INVALID;
6905 		} else {
6906 			BT_DBG("Expected");
6907 			return L2CAP_TXSEQ_EXPECTED;
6908 		}
6909 	}
6910 
6911 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6912 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6913 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6914 		return L2CAP_TXSEQ_DUPLICATE;
6915 	}
6916 
6917 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6918 		/* A source of invalid packets is a "double poll" condition,
6919 		 * where delays cause us to send multiple poll packets.  If
6920 		 * the remote stack receives and processes both polls,
6921 		 * sequence numbers can wrap around in such a way that a
6922 		 * resent frame has a sequence number that looks like new data
6923 		 * with a sequence gap.  This would trigger an erroneous SREJ
6924 		 * request.
6925 		 *
6926 		 * Fortunately, this is impossible with a tx window that's
6927 		 * less than half of the maximum sequence number, which allows
6928 		 * invalid frames to be safely ignored.
6929 		 *
6930 		 * With tx window sizes greater than half of the tx window
6931 		 * maximum, the frame is invalid and cannot be ignored.  This
6932 		 * causes a disconnect.
6933 		 */
6934 
6935 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6936 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6937 			return L2CAP_TXSEQ_INVALID_IGNORE;
6938 		} else {
6939 			BT_DBG("Invalid - txseq outside tx window");
6940 			return L2CAP_TXSEQ_INVALID;
6941 		}
6942 	} else {
6943 		BT_DBG("Unexpected - txseq indicates missing frames");
6944 		return L2CAP_TXSEQ_UNEXPECTED;
6945 	}
6946 }
6947 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6948 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6949 			       struct l2cap_ctrl *control,
6950 			       struct sk_buff *skb, u8 event)
6951 {
6952 	struct l2cap_ctrl local_control;
6953 	int err = 0;
6954 	bool skb_in_use = false;
6955 
6956 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6957 	       event);
6958 
6959 	switch (event) {
6960 	case L2CAP_EV_RECV_IFRAME:
6961 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6962 		case L2CAP_TXSEQ_EXPECTED:
6963 			l2cap_pass_to_tx(chan, control);
6964 
6965 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6966 				BT_DBG("Busy, discarding expected seq %d",
6967 				       control->txseq);
6968 				break;
6969 			}
6970 
6971 			chan->expected_tx_seq = __next_seq(chan,
6972 							   control->txseq);
6973 
6974 			chan->buffer_seq = chan->expected_tx_seq;
6975 			skb_in_use = true;
6976 
6977 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6978 			 * control, so make a copy in advance to use it after
6979 			 * l2cap_reassemble_sdu returns and to avoid the race
6980 			 * condition, for example:
6981 			 *
6982 			 * The current thread calls:
6983 			 *   l2cap_reassemble_sdu
6984 			 *     chan->ops->recv == l2cap_sock_recv_cb
6985 			 *       __sock_queue_rcv_skb
6986 			 * Another thread calls:
6987 			 *   bt_sock_recvmsg
6988 			 *     skb_recv_datagram
6989 			 *     skb_free_datagram
6990 			 * Then the current thread tries to access control, but
6991 			 * it was freed by skb_free_datagram.
6992 			 */
6993 			local_control = *control;
6994 			err = l2cap_reassemble_sdu(chan, skb, control);
6995 			if (err)
6996 				break;
6997 
6998 			if (local_control.final) {
6999 				if (!test_and_clear_bit(CONN_REJ_ACT,
7000 							&chan->conn_state)) {
7001 					local_control.final = 0;
7002 					l2cap_retransmit_all(chan, &local_control);
7003 					l2cap_ertm_send(chan);
7004 				}
7005 			}
7006 
7007 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
7008 				l2cap_send_ack(chan);
7009 			break;
7010 		case L2CAP_TXSEQ_UNEXPECTED:
7011 			l2cap_pass_to_tx(chan, control);
7012 
7013 			/* Can't issue SREJ frames in the local busy state.
7014 			 * Drop this frame, it will be seen as missing
7015 			 * when local busy is exited.
7016 			 */
7017 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
7018 				BT_DBG("Busy, discarding unexpected seq %d",
7019 				       control->txseq);
7020 				break;
7021 			}
7022 
7023 			/* There was a gap in the sequence, so an SREJ
7024 			 * must be sent for each missing frame.  The
7025 			 * current frame is stored for later use.
7026 			 */
7027 			skb_queue_tail(&chan->srej_q, skb);
7028 			skb_in_use = true;
7029 			BT_DBG("Queued %p (queue len %d)", skb,
7030 			       skb_queue_len(&chan->srej_q));
7031 
7032 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
7033 			l2cap_seq_list_clear(&chan->srej_list);
7034 			l2cap_send_srej(chan, control->txseq);
7035 
7036 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
7037 			break;
7038 		case L2CAP_TXSEQ_DUPLICATE:
7039 			l2cap_pass_to_tx(chan, control);
7040 			break;
7041 		case L2CAP_TXSEQ_INVALID_IGNORE:
7042 			break;
7043 		case L2CAP_TXSEQ_INVALID:
7044 		default:
7045 			l2cap_send_disconn_req(chan, ECONNRESET);
7046 			break;
7047 		}
7048 		break;
7049 	case L2CAP_EV_RECV_RR:
7050 		l2cap_pass_to_tx(chan, control);
7051 		if (control->final) {
7052 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7053 
7054 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
7055 			    !__chan_is_moving(chan)) {
7056 				control->final = 0;
7057 				l2cap_retransmit_all(chan, control);
7058 			}
7059 
7060 			l2cap_ertm_send(chan);
7061 		} else if (control->poll) {
7062 			l2cap_send_i_or_rr_or_rnr(chan);
7063 		} else {
7064 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7065 					       &chan->conn_state) &&
7066 			    chan->unacked_frames)
7067 				__set_retrans_timer(chan);
7068 
7069 			l2cap_ertm_send(chan);
7070 		}
7071 		break;
7072 	case L2CAP_EV_RECV_RNR:
7073 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7074 		l2cap_pass_to_tx(chan, control);
7075 		if (control && control->poll) {
7076 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7077 			l2cap_send_rr_or_rnr(chan, 0);
7078 		}
7079 		__clear_retrans_timer(chan);
7080 		l2cap_seq_list_clear(&chan->retrans_list);
7081 		break;
7082 	case L2CAP_EV_RECV_REJ:
7083 		l2cap_handle_rej(chan, control);
7084 		break;
7085 	case L2CAP_EV_RECV_SREJ:
7086 		l2cap_handle_srej(chan, control);
7087 		break;
7088 	default:
7089 		break;
7090 	}
7091 
7092 	if (skb && !skb_in_use) {
7093 		BT_DBG("Freeing %p", skb);
7094 		kfree_skb(skb);
7095 	}
7096 
7097 	return err;
7098 }
7099 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7100 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7101 				    struct l2cap_ctrl *control,
7102 				    struct sk_buff *skb, u8 event)
7103 {
7104 	int err = 0;
7105 	u16 txseq = control->txseq;
7106 	bool skb_in_use = false;
7107 
7108 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7109 	       event);
7110 
7111 	switch (event) {
7112 	case L2CAP_EV_RECV_IFRAME:
7113 		switch (l2cap_classify_txseq(chan, txseq)) {
7114 		case L2CAP_TXSEQ_EXPECTED:
7115 			/* Keep frame for reassembly later */
7116 			l2cap_pass_to_tx(chan, control);
7117 			skb_queue_tail(&chan->srej_q, skb);
7118 			skb_in_use = true;
7119 			BT_DBG("Queued %p (queue len %d)", skb,
7120 			       skb_queue_len(&chan->srej_q));
7121 
7122 			chan->expected_tx_seq = __next_seq(chan, txseq);
7123 			break;
7124 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7125 			l2cap_seq_list_pop(&chan->srej_list);
7126 
7127 			l2cap_pass_to_tx(chan, control);
7128 			skb_queue_tail(&chan->srej_q, skb);
7129 			skb_in_use = true;
7130 			BT_DBG("Queued %p (queue len %d)", skb,
7131 			       skb_queue_len(&chan->srej_q));
7132 
7133 			err = l2cap_rx_queued_iframes(chan);
7134 			if (err)
7135 				break;
7136 
7137 			break;
7138 		case L2CAP_TXSEQ_UNEXPECTED:
7139 			/* Got a frame that can't be reassembled yet.
7140 			 * Save it for later, and send SREJs to cover
7141 			 * the missing frames.
7142 			 */
7143 			skb_queue_tail(&chan->srej_q, skb);
7144 			skb_in_use = true;
7145 			BT_DBG("Queued %p (queue len %d)", skb,
7146 			       skb_queue_len(&chan->srej_q));
7147 
7148 			l2cap_pass_to_tx(chan, control);
7149 			l2cap_send_srej(chan, control->txseq);
7150 			break;
7151 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7152 			/* This frame was requested with an SREJ, but
7153 			 * some expected retransmitted frames are
7154 			 * missing.  Request retransmission of missing
7155 			 * SREJ'd frames.
7156 			 */
7157 			skb_queue_tail(&chan->srej_q, skb);
7158 			skb_in_use = true;
7159 			BT_DBG("Queued %p (queue len %d)", skb,
7160 			       skb_queue_len(&chan->srej_q));
7161 
7162 			l2cap_pass_to_tx(chan, control);
7163 			l2cap_send_srej_list(chan, control->txseq);
7164 			break;
7165 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7166 			/* We've already queued this frame.  Drop this copy. */
7167 			l2cap_pass_to_tx(chan, control);
7168 			break;
7169 		case L2CAP_TXSEQ_DUPLICATE:
7170 			/* Expecting a later sequence number, so this frame
7171 			 * was already received.  Ignore it completely.
7172 			 */
7173 			break;
7174 		case L2CAP_TXSEQ_INVALID_IGNORE:
7175 			break;
7176 		case L2CAP_TXSEQ_INVALID:
7177 		default:
7178 			l2cap_send_disconn_req(chan, ECONNRESET);
7179 			break;
7180 		}
7181 		break;
7182 	case L2CAP_EV_RECV_RR:
7183 		l2cap_pass_to_tx(chan, control);
7184 		if (control->final) {
7185 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7186 
7187 			if (!test_and_clear_bit(CONN_REJ_ACT,
7188 						&chan->conn_state)) {
7189 				control->final = 0;
7190 				l2cap_retransmit_all(chan, control);
7191 			}
7192 
7193 			l2cap_ertm_send(chan);
7194 		} else if (control->poll) {
7195 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7196 					       &chan->conn_state) &&
7197 			    chan->unacked_frames) {
7198 				__set_retrans_timer(chan);
7199 			}
7200 
7201 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7202 			l2cap_send_srej_tail(chan);
7203 		} else {
7204 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7205 					       &chan->conn_state) &&
7206 			    chan->unacked_frames)
7207 				__set_retrans_timer(chan);
7208 
7209 			l2cap_send_ack(chan);
7210 		}
7211 		break;
7212 	case L2CAP_EV_RECV_RNR:
7213 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7214 		l2cap_pass_to_tx(chan, control);
7215 		if (control->poll) {
7216 			l2cap_send_srej_tail(chan);
7217 		} else {
7218 			struct l2cap_ctrl rr_control;
7219 			memset(&rr_control, 0, sizeof(rr_control));
7220 			rr_control.sframe = 1;
7221 			rr_control.super = L2CAP_SUPER_RR;
7222 			rr_control.reqseq = chan->buffer_seq;
7223 			l2cap_send_sframe(chan, &rr_control);
7224 		}
7225 
7226 		break;
7227 	case L2CAP_EV_RECV_REJ:
7228 		l2cap_handle_rej(chan, control);
7229 		break;
7230 	case L2CAP_EV_RECV_SREJ:
7231 		l2cap_handle_srej(chan, control);
7232 		break;
7233 	}
7234 
7235 	if (skb && !skb_in_use) {
7236 		BT_DBG("Freeing %p", skb);
7237 		kfree_skb(skb);
7238 	}
7239 
7240 	return err;
7241 }
7242 
l2cap_finish_move(struct l2cap_chan * chan)7243 static int l2cap_finish_move(struct l2cap_chan *chan)
7244 {
7245 	BT_DBG("chan %p", chan);
7246 
7247 	chan->rx_state = L2CAP_RX_STATE_RECV;
7248 
7249 	if (chan->hs_hcon)
7250 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7251 	else
7252 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7253 
7254 	return l2cap_resegment(chan);
7255 }
7256 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7257 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7258 				 struct l2cap_ctrl *control,
7259 				 struct sk_buff *skb, u8 event)
7260 {
7261 	int err;
7262 
7263 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7264 	       event);
7265 
7266 	if (!control->poll)
7267 		return -EPROTO;
7268 
7269 	l2cap_process_reqseq(chan, control->reqseq);
7270 
7271 	if (!skb_queue_empty(&chan->tx_q))
7272 		chan->tx_send_head = skb_peek(&chan->tx_q);
7273 	else
7274 		chan->tx_send_head = NULL;
7275 
7276 	/* Rewind next_tx_seq to the point expected
7277 	 * by the receiver.
7278 	 */
7279 	chan->next_tx_seq = control->reqseq;
7280 	chan->unacked_frames = 0;
7281 
7282 	err = l2cap_finish_move(chan);
7283 	if (err)
7284 		return err;
7285 
7286 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7287 	l2cap_send_i_or_rr_or_rnr(chan);
7288 
7289 	if (event == L2CAP_EV_RECV_IFRAME)
7290 		return -EPROTO;
7291 
7292 	return l2cap_rx_state_recv(chan, control, NULL, event);
7293 }
7294 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7295 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7296 				 struct l2cap_ctrl *control,
7297 				 struct sk_buff *skb, u8 event)
7298 {
7299 	int err;
7300 
7301 	if (!control->final)
7302 		return -EPROTO;
7303 
7304 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7305 
7306 	chan->rx_state = L2CAP_RX_STATE_RECV;
7307 	l2cap_process_reqseq(chan, control->reqseq);
7308 
7309 	if (!skb_queue_empty(&chan->tx_q))
7310 		chan->tx_send_head = skb_peek(&chan->tx_q);
7311 	else
7312 		chan->tx_send_head = NULL;
7313 
7314 	/* Rewind next_tx_seq to the point expected
7315 	 * by the receiver.
7316 	 */
7317 	chan->next_tx_seq = control->reqseq;
7318 	chan->unacked_frames = 0;
7319 
7320 	if (chan->hs_hcon)
7321 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7322 	else
7323 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7324 
7325 	err = l2cap_resegment(chan);
7326 
7327 	if (!err)
7328 		err = l2cap_rx_state_recv(chan, control, skb, event);
7329 
7330 	return err;
7331 }
7332 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)7333 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7334 {
7335 	/* Make sure reqseq is for a packet that has been sent but not acked */
7336 	u16 unacked;
7337 
7338 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7339 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7340 }
7341 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7342 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7343 		    struct sk_buff *skb, u8 event)
7344 {
7345 	int err = 0;
7346 
7347 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7348 	       control, skb, event, chan->rx_state);
7349 
7350 	if (__valid_reqseq(chan, control->reqseq)) {
7351 		switch (chan->rx_state) {
7352 		case L2CAP_RX_STATE_RECV:
7353 			err = l2cap_rx_state_recv(chan, control, skb, event);
7354 			break;
7355 		case L2CAP_RX_STATE_SREJ_SENT:
7356 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7357 						       event);
7358 			break;
7359 		case L2CAP_RX_STATE_WAIT_P:
7360 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7361 			break;
7362 		case L2CAP_RX_STATE_WAIT_F:
7363 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7364 			break;
7365 		default:
7366 			/* shut it down */
7367 			break;
7368 		}
7369 	} else {
7370 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7371 		       control->reqseq, chan->next_tx_seq,
7372 		       chan->expected_ack_seq);
7373 		l2cap_send_disconn_req(chan, ECONNRESET);
7374 	}
7375 
7376 	return err;
7377 }
7378 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)7379 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7380 			   struct sk_buff *skb)
7381 {
7382 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7383 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
7384 	 * returns and to avoid the race condition, for example:
7385 	 *
7386 	 * The current thread calls:
7387 	 *   l2cap_reassemble_sdu
7388 	 *     chan->ops->recv == l2cap_sock_recv_cb
7389 	 *       __sock_queue_rcv_skb
7390 	 * Another thread calls:
7391 	 *   bt_sock_recvmsg
7392 	 *     skb_recv_datagram
7393 	 *     skb_free_datagram
7394 	 * Then the current thread tries to access control, but it was freed by
7395 	 * skb_free_datagram.
7396 	 */
7397 	u16 txseq = control->txseq;
7398 
7399 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7400 	       chan->rx_state);
7401 
7402 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7403 		l2cap_pass_to_tx(chan, control);
7404 
7405 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7406 		       __next_seq(chan, chan->buffer_seq));
7407 
7408 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7409 
7410 		l2cap_reassemble_sdu(chan, skb, control);
7411 	} else {
7412 		if (chan->sdu) {
7413 			kfree_skb(chan->sdu);
7414 			chan->sdu = NULL;
7415 		}
7416 		chan->sdu_last_frag = NULL;
7417 		chan->sdu_len = 0;
7418 
7419 		if (skb) {
7420 			BT_DBG("Freeing %p", skb);
7421 			kfree_skb(skb);
7422 		}
7423 	}
7424 
7425 	chan->last_acked_seq = txseq;
7426 	chan->expected_tx_seq = __next_seq(chan, txseq);
7427 
7428 	return 0;
7429 }
7430 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7431 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7432 {
7433 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7434 	u16 len;
7435 	u8 event;
7436 
7437 	__unpack_control(chan, skb);
7438 
7439 	len = skb->len;
7440 
7441 	/*
7442 	 * We can just drop the corrupted I-frame here.
7443 	 * Receiver will miss it and start proper recovery
7444 	 * procedures and ask for retransmission.
7445 	 */
7446 	if (l2cap_check_fcs(chan, skb))
7447 		goto drop;
7448 
7449 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7450 		len -= L2CAP_SDULEN_SIZE;
7451 
7452 	if (chan->fcs == L2CAP_FCS_CRC16)
7453 		len -= L2CAP_FCS_SIZE;
7454 
7455 	if (len > chan->mps) {
7456 		l2cap_send_disconn_req(chan, ECONNRESET);
7457 		goto drop;
7458 	}
7459 
7460 	if (chan->ops->filter) {
7461 		if (chan->ops->filter(chan, skb))
7462 			goto drop;
7463 	}
7464 
7465 	if (!control->sframe) {
7466 		int err;
7467 
7468 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7469 		       control->sar, control->reqseq, control->final,
7470 		       control->txseq);
7471 
7472 		/* Validate F-bit - F=0 always valid, F=1 only
7473 		 * valid in TX WAIT_F
7474 		 */
7475 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7476 			goto drop;
7477 
7478 		if (chan->mode != L2CAP_MODE_STREAMING) {
7479 			event = L2CAP_EV_RECV_IFRAME;
7480 			err = l2cap_rx(chan, control, skb, event);
7481 		} else {
7482 			err = l2cap_stream_rx(chan, control, skb);
7483 		}
7484 
7485 		if (err)
7486 			l2cap_send_disconn_req(chan, ECONNRESET);
7487 	} else {
7488 		const u8 rx_func_to_event[4] = {
7489 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7490 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7491 		};
7492 
7493 		/* Only I-frames are expected in streaming mode */
7494 		if (chan->mode == L2CAP_MODE_STREAMING)
7495 			goto drop;
7496 
7497 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7498 		       control->reqseq, control->final, control->poll,
7499 		       control->super);
7500 
7501 		if (len != 0) {
7502 			BT_ERR("Trailing bytes: %d in sframe", len);
7503 			l2cap_send_disconn_req(chan, ECONNRESET);
7504 			goto drop;
7505 		}
7506 
7507 		/* Validate F and P bits */
7508 		if (control->final && (control->poll ||
7509 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7510 			goto drop;
7511 
7512 		event = rx_func_to_event[control->super];
7513 		if (l2cap_rx(chan, control, skb, event))
7514 			l2cap_send_disconn_req(chan, ECONNRESET);
7515 	}
7516 
7517 	return 0;
7518 
7519 drop:
7520 	kfree_skb(skb);
7521 	return 0;
7522 }
7523 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)7524 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7525 {
7526 	struct l2cap_conn *conn = chan->conn;
7527 	struct l2cap_le_credits pkt;
7528 	u16 return_credits;
7529 
7530 	return_credits = (chan->imtu / chan->mps) + 1;
7531 
7532 	if (chan->rx_credits >= return_credits)
7533 		return;
7534 
7535 	return_credits -= chan->rx_credits;
7536 
7537 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7538 
7539 	chan->rx_credits += return_credits;
7540 
7541 	pkt.cid     = cpu_to_le16(chan->scid);
7542 	pkt.credits = cpu_to_le16(return_credits);
7543 
7544 	chan->ident = l2cap_get_ident(conn);
7545 
7546 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7547 }
7548 
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)7549 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7550 {
7551 	int err;
7552 
7553 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7554 
7555 	/* Wait recv to confirm reception before updating the credits */
7556 	err = chan->ops->recv(chan, skb);
7557 
7558 	/* Update credits whenever an SDU is received */
7559 	l2cap_chan_le_send_credits(chan);
7560 
7561 	return err;
7562 }
7563 
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7564 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7565 {
7566 	int err;
7567 
7568 	if (!chan->rx_credits) {
7569 		BT_ERR("No credits to receive LE L2CAP data");
7570 		l2cap_send_disconn_req(chan, ECONNRESET);
7571 		return -ENOBUFS;
7572 	}
7573 
7574 	if (chan->imtu < skb->len) {
7575 		BT_ERR("Too big LE L2CAP PDU");
7576 		return -ENOBUFS;
7577 	}
7578 
7579 	chan->rx_credits--;
7580 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7581 
7582 	/* Update if remote had run out of credits, this should only happens
7583 	 * if the remote is not using the entire MPS.
7584 	 */
7585 	if (!chan->rx_credits)
7586 		l2cap_chan_le_send_credits(chan);
7587 
7588 	err = 0;
7589 
7590 	if (!chan->sdu) {
7591 		u16 sdu_len;
7592 
7593 		sdu_len = get_unaligned_le16(skb->data);
7594 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7595 
7596 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7597 		       sdu_len, skb->len, chan->imtu);
7598 
7599 		if (sdu_len > chan->imtu) {
7600 			BT_ERR("Too big LE L2CAP SDU length received");
7601 			err = -EMSGSIZE;
7602 			goto failed;
7603 		}
7604 
7605 		if (skb->len > sdu_len) {
7606 			BT_ERR("Too much LE L2CAP data received");
7607 			err = -EINVAL;
7608 			goto failed;
7609 		}
7610 
7611 		if (skb->len == sdu_len)
7612 			return l2cap_ecred_recv(chan, skb);
7613 
7614 		chan->sdu = skb;
7615 		chan->sdu_len = sdu_len;
7616 		chan->sdu_last_frag = skb;
7617 
7618 		/* Detect if remote is not able to use the selected MPS */
7619 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7620 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7621 
7622 			/* Adjust the number of credits */
7623 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7624 			chan->mps = mps_len;
7625 			l2cap_chan_le_send_credits(chan);
7626 		}
7627 
7628 		return 0;
7629 	}
7630 
7631 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7632 	       chan->sdu->len, skb->len, chan->sdu_len);
7633 
7634 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7635 		BT_ERR("Too much LE L2CAP data received");
7636 		err = -EINVAL;
7637 		goto failed;
7638 	}
7639 
7640 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7641 	skb = NULL;
7642 
7643 	if (chan->sdu->len == chan->sdu_len) {
7644 		err = l2cap_ecred_recv(chan, chan->sdu);
7645 		if (!err) {
7646 			chan->sdu = NULL;
7647 			chan->sdu_last_frag = NULL;
7648 			chan->sdu_len = 0;
7649 		}
7650 	}
7651 
7652 failed:
7653 	if (err) {
7654 		kfree_skb(skb);
7655 		kfree_skb(chan->sdu);
7656 		chan->sdu = NULL;
7657 		chan->sdu_last_frag = NULL;
7658 		chan->sdu_len = 0;
7659 	}
7660 
7661 	/* We can't return an error here since we took care of the skb
7662 	 * freeing internally. An error return would cause the caller to
7663 	 * do a double-free of the skb.
7664 	 */
7665 	return 0;
7666 }
7667 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)7668 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7669 			       struct sk_buff *skb)
7670 {
7671 	struct l2cap_chan *chan;
7672 
7673 	chan = l2cap_get_chan_by_scid(conn, cid);
7674 	if (!chan) {
7675 		if (cid == L2CAP_CID_A2MP) {
7676 			chan = a2mp_channel_create(conn, skb);
7677 			if (!chan) {
7678 				kfree_skb(skb);
7679 				return;
7680 			}
7681 
7682 			l2cap_chan_hold(chan);
7683 			l2cap_chan_lock(chan);
7684 		} else {
7685 			BT_DBG("unknown cid 0x%4.4x", cid);
7686 			/* Drop packet and return */
7687 			kfree_skb(skb);
7688 			return;
7689 		}
7690 	}
7691 
7692 	BT_DBG("chan %p, len %d", chan, skb->len);
7693 
7694 	/* If we receive data on a fixed channel before the info req/rsp
7695 	 * procedure is done simply assume that the channel is supported
7696 	 * and mark it as ready.
7697 	 */
7698 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7699 		l2cap_chan_ready(chan);
7700 
7701 	if (chan->state != BT_CONNECTED)
7702 		goto drop;
7703 
7704 	switch (chan->mode) {
7705 	case L2CAP_MODE_LE_FLOWCTL:
7706 	case L2CAP_MODE_EXT_FLOWCTL:
7707 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7708 			goto drop;
7709 
7710 		goto done;
7711 
7712 	case L2CAP_MODE_BASIC:
7713 		/* If socket recv buffers overflows we drop data here
7714 		 * which is *bad* because L2CAP has to be reliable.
7715 		 * But we don't have any other choice. L2CAP doesn't
7716 		 * provide flow control mechanism. */
7717 
7718 		if (chan->imtu < skb->len) {
7719 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7720 			goto drop;
7721 		}
7722 
7723 		if (!chan->ops->recv(chan, skb))
7724 			goto done;
7725 		break;
7726 
7727 	case L2CAP_MODE_ERTM:
7728 	case L2CAP_MODE_STREAMING:
7729 		l2cap_data_rcv(chan, skb);
7730 		goto done;
7731 
7732 	default:
7733 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7734 		break;
7735 	}
7736 
7737 drop:
7738 	kfree_skb(skb);
7739 
7740 done:
7741 	l2cap_chan_unlock(chan);
7742 	l2cap_chan_put(chan);
7743 }
7744 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)7745 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7746 				  struct sk_buff *skb)
7747 {
7748 	struct hci_conn *hcon = conn->hcon;
7749 	struct l2cap_chan *chan;
7750 
7751 	if (hcon->type != ACL_LINK)
7752 		goto free_skb;
7753 
7754 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7755 					ACL_LINK);
7756 	if (!chan)
7757 		goto free_skb;
7758 
7759 	BT_DBG("chan %p, len %d", chan, skb->len);
7760 
7761 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7762 		goto drop;
7763 
7764 	if (chan->imtu < skb->len)
7765 		goto drop;
7766 
7767 	/* Store remote BD_ADDR and PSM for msg_name */
7768 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7769 	bt_cb(skb)->l2cap.psm = psm;
7770 
7771 	if (!chan->ops->recv(chan, skb)) {
7772 		l2cap_chan_put(chan);
7773 		return;
7774 	}
7775 
7776 drop:
7777 	l2cap_chan_put(chan);
7778 free_skb:
7779 	kfree_skb(skb);
7780 }
7781 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)7782 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7783 {
7784 	struct l2cap_hdr *lh = (void *) skb->data;
7785 	struct hci_conn *hcon = conn->hcon;
7786 	u16 cid, len;
7787 	__le16 psm;
7788 
7789 	if (hcon->state != BT_CONNECTED) {
7790 		BT_DBG("queueing pending rx skb");
7791 		skb_queue_tail(&conn->pending_rx, skb);
7792 		return;
7793 	}
7794 
7795 	skb_pull(skb, L2CAP_HDR_SIZE);
7796 	cid = __le16_to_cpu(lh->cid);
7797 	len = __le16_to_cpu(lh->len);
7798 
7799 	if (len != skb->len) {
7800 		kfree_skb(skb);
7801 		return;
7802 	}
7803 
7804 	/* Since we can't actively block incoming LE connections we must
7805 	 * at least ensure that we ignore incoming data from them.
7806 	 */
7807 	if (hcon->type == LE_LINK &&
7808 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7809 				   bdaddr_dst_type(hcon))) {
7810 		kfree_skb(skb);
7811 		return;
7812 	}
7813 
7814 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7815 
7816 	switch (cid) {
7817 	case L2CAP_CID_SIGNALING:
7818 		l2cap_sig_channel(conn, skb);
7819 		break;
7820 
7821 	case L2CAP_CID_CONN_LESS:
7822 		psm = get_unaligned((__le16 *) skb->data);
7823 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7824 		l2cap_conless_channel(conn, psm, skb);
7825 		break;
7826 
7827 	case L2CAP_CID_LE_SIGNALING:
7828 		l2cap_le_sig_channel(conn, skb);
7829 		break;
7830 
7831 	default:
7832 		l2cap_data_channel(conn, cid, skb);
7833 		break;
7834 	}
7835 }
7836 
process_pending_rx(struct work_struct * work)7837 static void process_pending_rx(struct work_struct *work)
7838 {
7839 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7840 					       pending_rx_work);
7841 	struct sk_buff *skb;
7842 
7843 	BT_DBG("");
7844 
7845 	while ((skb = skb_dequeue(&conn->pending_rx)))
7846 		l2cap_recv_frame(conn, skb);
7847 }
7848 
l2cap_conn_add(struct hci_conn * hcon)7849 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7850 {
7851 	struct l2cap_conn *conn = hcon->l2cap_data;
7852 	struct hci_chan *hchan;
7853 
7854 	if (conn)
7855 		return conn;
7856 
7857 	hchan = hci_chan_create(hcon);
7858 	if (!hchan)
7859 		return NULL;
7860 
7861 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7862 	if (!conn) {
7863 		hci_chan_del(hchan);
7864 		return NULL;
7865 	}
7866 
7867 	kref_init(&conn->ref);
7868 	hcon->l2cap_data = conn;
7869 	conn->hcon = hci_conn_get(hcon);
7870 	conn->hchan = hchan;
7871 
7872 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7873 
7874 	switch (hcon->type) {
7875 	case LE_LINK:
7876 		if (hcon->hdev->le_mtu) {
7877 			conn->mtu = hcon->hdev->le_mtu;
7878 			break;
7879 		}
7880 		fallthrough;
7881 	default:
7882 		conn->mtu = hcon->hdev->acl_mtu;
7883 		break;
7884 	}
7885 
7886 	conn->feat_mask = 0;
7887 
7888 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7889 
7890 	if (hcon->type == ACL_LINK &&
7891 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7892 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7893 
7894 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7895 	    (bredr_sc_enabled(hcon->hdev) ||
7896 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7897 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7898 
7899 	mutex_init(&conn->ident_lock);
7900 	mutex_init(&conn->chan_lock);
7901 
7902 	INIT_LIST_HEAD(&conn->chan_l);
7903 	INIT_LIST_HEAD(&conn->users);
7904 
7905 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7906 
7907 	skb_queue_head_init(&conn->pending_rx);
7908 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7909 	INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
7910 
7911 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7912 
7913 	return conn;
7914 }
7915 
is_valid_psm(u16 psm,u8 dst_type)7916 static bool is_valid_psm(u16 psm, u8 dst_type)
7917 {
7918 	if (!psm)
7919 		return false;
7920 
7921 	if (bdaddr_type_is_le(dst_type))
7922 		return (psm <= 0x00ff);
7923 
7924 	/* PSM must be odd and lsb of upper byte must be 0 */
7925 	return ((psm & 0x0101) == 0x0001);
7926 }
7927 
7928 struct l2cap_chan_data {
7929 	struct l2cap_chan *chan;
7930 	struct pid *pid;
7931 	int count;
7932 };
7933 
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)7934 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7935 {
7936 	struct l2cap_chan_data *d = data;
7937 	struct pid *pid;
7938 
7939 	if (chan == d->chan)
7940 		return;
7941 
7942 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7943 		return;
7944 
7945 	pid = chan->ops->get_peer_pid(chan);
7946 
7947 	/* Only count deferred channels with the same PID/PSM */
7948 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7949 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7950 		return;
7951 
7952 	d->count++;
7953 }
7954 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)7955 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7956 		       bdaddr_t *dst, u8 dst_type)
7957 {
7958 	struct l2cap_conn *conn;
7959 	struct hci_conn *hcon;
7960 	struct hci_dev *hdev;
7961 	int err;
7962 
7963 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7964 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7965 
7966 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7967 	if (!hdev)
7968 		return -EHOSTUNREACH;
7969 
7970 	hci_dev_lock(hdev);
7971 
7972 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7973 	    chan->chan_type != L2CAP_CHAN_RAW) {
7974 		err = -EINVAL;
7975 		goto done;
7976 	}
7977 
7978 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7979 		err = -EINVAL;
7980 		goto done;
7981 	}
7982 
7983 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7984 		err = -EINVAL;
7985 		goto done;
7986 	}
7987 
7988 	switch (chan->mode) {
7989 	case L2CAP_MODE_BASIC:
7990 		break;
7991 	case L2CAP_MODE_LE_FLOWCTL:
7992 		break;
7993 	case L2CAP_MODE_EXT_FLOWCTL:
7994 		if (!enable_ecred) {
7995 			err = -EOPNOTSUPP;
7996 			goto done;
7997 		}
7998 		break;
7999 	case L2CAP_MODE_ERTM:
8000 	case L2CAP_MODE_STREAMING:
8001 		if (!disable_ertm)
8002 			break;
8003 		fallthrough;
8004 	default:
8005 		err = -EOPNOTSUPP;
8006 		goto done;
8007 	}
8008 
8009 	switch (chan->state) {
8010 	case BT_CONNECT:
8011 	case BT_CONNECT2:
8012 	case BT_CONFIG:
8013 		/* Already connecting */
8014 		err = 0;
8015 		goto done;
8016 
8017 	case BT_CONNECTED:
8018 		/* Already connected */
8019 		err = -EISCONN;
8020 		goto done;
8021 
8022 	case BT_OPEN:
8023 	case BT_BOUND:
8024 		/* Can connect */
8025 		break;
8026 
8027 	default:
8028 		err = -EBADFD;
8029 		goto done;
8030 	}
8031 
8032 	/* Set destination address and psm */
8033 	bacpy(&chan->dst, dst);
8034 	chan->dst_type = dst_type;
8035 
8036 	chan->psm = psm;
8037 	chan->dcid = cid;
8038 
8039 	if (bdaddr_type_is_le(dst_type)) {
8040 		/* Convert from L2CAP channel address type to HCI address type
8041 		 */
8042 		if (dst_type == BDADDR_LE_PUBLIC)
8043 			dst_type = ADDR_LE_DEV_PUBLIC;
8044 		else
8045 			dst_type = ADDR_LE_DEV_RANDOM;
8046 
8047 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8048 			hcon = hci_connect_le(hdev, dst, dst_type, false,
8049 					      chan->sec_level,
8050 					      HCI_LE_CONN_TIMEOUT,
8051 					      HCI_ROLE_SLAVE);
8052 		else
8053 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
8054 						   chan->sec_level,
8055 						   HCI_LE_CONN_TIMEOUT,
8056 						   CONN_REASON_L2CAP_CHAN);
8057 
8058 	} else {
8059 		u8 auth_type = l2cap_get_auth_type(chan);
8060 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
8061 				       CONN_REASON_L2CAP_CHAN);
8062 	}
8063 
8064 	if (IS_ERR(hcon)) {
8065 		err = PTR_ERR(hcon);
8066 		goto done;
8067 	}
8068 
8069 	conn = l2cap_conn_add(hcon);
8070 	if (!conn) {
8071 		hci_conn_drop(hcon);
8072 		err = -ENOMEM;
8073 		goto done;
8074 	}
8075 
8076 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8077 		struct l2cap_chan_data data;
8078 
8079 		data.chan = chan;
8080 		data.pid = chan->ops->get_peer_pid(chan);
8081 		data.count = 1;
8082 
8083 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8084 
8085 		/* Check if there isn't too many channels being connected */
8086 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8087 			hci_conn_drop(hcon);
8088 			err = -EPROTO;
8089 			goto done;
8090 		}
8091 	}
8092 
8093 	mutex_lock(&conn->chan_lock);
8094 	l2cap_chan_lock(chan);
8095 
8096 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8097 		hci_conn_drop(hcon);
8098 		err = -EBUSY;
8099 		goto chan_unlock;
8100 	}
8101 
8102 	/* Update source addr of the socket */
8103 	bacpy(&chan->src, &hcon->src);
8104 	chan->src_type = bdaddr_src_type(hcon);
8105 
8106 	__l2cap_chan_add(conn, chan);
8107 
8108 	/* l2cap_chan_add takes its own ref so we can drop this one */
8109 	hci_conn_drop(hcon);
8110 
8111 	l2cap_state_change(chan, BT_CONNECT);
8112 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8113 
8114 	/* Release chan->sport so that it can be reused by other
8115 	 * sockets (as it's only used for listening sockets).
8116 	 */
8117 	write_lock(&chan_list_lock);
8118 	chan->sport = 0;
8119 	write_unlock(&chan_list_lock);
8120 
8121 	if (hcon->state == BT_CONNECTED) {
8122 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8123 			__clear_chan_timer(chan);
8124 			if (l2cap_chan_check_security(chan, true))
8125 				l2cap_state_change(chan, BT_CONNECTED);
8126 		} else
8127 			l2cap_do_start(chan);
8128 	}
8129 
8130 	err = 0;
8131 
8132 chan_unlock:
8133 	l2cap_chan_unlock(chan);
8134 	mutex_unlock(&conn->chan_lock);
8135 done:
8136 	hci_dev_unlock(hdev);
8137 	hci_dev_put(hdev);
8138 	return err;
8139 }
8140 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8141 
l2cap_ecred_reconfigure(struct l2cap_chan * chan)8142 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8143 {
8144 	struct l2cap_conn *conn = chan->conn;
8145 	struct {
8146 		struct l2cap_ecred_reconf_req req;
8147 		__le16 scid;
8148 	} pdu;
8149 
8150 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8151 	pdu.req.mps = cpu_to_le16(chan->mps);
8152 	pdu.scid    = cpu_to_le16(chan->scid);
8153 
8154 	chan->ident = l2cap_get_ident(conn);
8155 
8156 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8157 		       sizeof(pdu), &pdu);
8158 }
8159 
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)8160 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8161 {
8162 	if (chan->imtu > mtu)
8163 		return -EINVAL;
8164 
8165 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8166 
8167 	chan->imtu = mtu;
8168 
8169 	l2cap_ecred_reconfigure(chan);
8170 
8171 	return 0;
8172 }
8173 
8174 /* ---- L2CAP interface with lower layer (HCI) ---- */
8175 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)8176 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8177 {
8178 	int exact = 0, lm1 = 0, lm2 = 0;
8179 	struct l2cap_chan *c;
8180 
8181 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8182 
8183 	/* Find listening sockets and check their link_mode */
8184 	read_lock(&chan_list_lock);
8185 	list_for_each_entry(c, &chan_list, global_l) {
8186 		if (c->state != BT_LISTEN)
8187 			continue;
8188 
8189 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8190 			lm1 |= HCI_LM_ACCEPT;
8191 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8192 				lm1 |= HCI_LM_MASTER;
8193 			exact++;
8194 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8195 			lm2 |= HCI_LM_ACCEPT;
8196 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8197 				lm2 |= HCI_LM_MASTER;
8198 		}
8199 	}
8200 	read_unlock(&chan_list_lock);
8201 
8202 	return exact ? lm1 : lm2;
8203 }
8204 
8205 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8206  * from an existing channel in the list or from the beginning of the
8207  * global list (by passing NULL as first parameter).
8208  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)8209 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8210 						  struct hci_conn *hcon)
8211 {
8212 	u8 src_type = bdaddr_src_type(hcon);
8213 
8214 	read_lock(&chan_list_lock);
8215 
8216 	if (c)
8217 		c = list_next_entry(c, global_l);
8218 	else
8219 		c = list_entry(chan_list.next, typeof(*c), global_l);
8220 
8221 	list_for_each_entry_from(c, &chan_list, global_l) {
8222 		if (c->chan_type != L2CAP_CHAN_FIXED)
8223 			continue;
8224 		if (c->state != BT_LISTEN)
8225 			continue;
8226 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8227 			continue;
8228 		if (src_type != c->src_type)
8229 			continue;
8230 
8231 		c = l2cap_chan_hold_unless_zero(c);
8232 		read_unlock(&chan_list_lock);
8233 		return c;
8234 	}
8235 
8236 	read_unlock(&chan_list_lock);
8237 
8238 	return NULL;
8239 }
8240 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)8241 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8242 {
8243 	struct hci_dev *hdev = hcon->hdev;
8244 	struct l2cap_conn *conn;
8245 	struct l2cap_chan *pchan;
8246 	u8 dst_type;
8247 
8248 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8249 		return;
8250 
8251 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8252 
8253 	if (status) {
8254 		l2cap_conn_del(hcon, bt_to_errno(status));
8255 		return;
8256 	}
8257 
8258 	conn = l2cap_conn_add(hcon);
8259 	if (!conn)
8260 		return;
8261 
8262 	dst_type = bdaddr_dst_type(hcon);
8263 
8264 	/* If device is blocked, do not create channels for it */
8265 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8266 		return;
8267 
8268 	/* Find fixed channels and notify them of the new connection. We
8269 	 * use multiple individual lookups, continuing each time where
8270 	 * we left off, because the list lock would prevent calling the
8271 	 * potentially sleeping l2cap_chan_lock() function.
8272 	 */
8273 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8274 	while (pchan) {
8275 		struct l2cap_chan *chan, *next;
8276 
8277 		/* Client fixed channels should override server ones */
8278 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8279 			goto next;
8280 
8281 		l2cap_chan_lock(pchan);
8282 		chan = pchan->ops->new_connection(pchan);
8283 		if (chan) {
8284 			bacpy(&chan->src, &hcon->src);
8285 			bacpy(&chan->dst, &hcon->dst);
8286 			chan->src_type = bdaddr_src_type(hcon);
8287 			chan->dst_type = dst_type;
8288 
8289 			__l2cap_chan_add(conn, chan);
8290 		}
8291 
8292 		l2cap_chan_unlock(pchan);
8293 next:
8294 		next = l2cap_global_fixed_chan(pchan, hcon);
8295 		l2cap_chan_put(pchan);
8296 		pchan = next;
8297 	}
8298 
8299 	l2cap_conn_ready(conn);
8300 }
8301 
l2cap_disconn_ind(struct hci_conn * hcon)8302 int l2cap_disconn_ind(struct hci_conn *hcon)
8303 {
8304 	struct l2cap_conn *conn = hcon->l2cap_data;
8305 
8306 	BT_DBG("hcon %p", hcon);
8307 
8308 	if (!conn)
8309 		return HCI_ERROR_REMOTE_USER_TERM;
8310 	return conn->disc_reason;
8311 }
8312 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)8313 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8314 {
8315 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8316 		return;
8317 
8318 	BT_DBG("hcon %p reason %d", hcon, reason);
8319 
8320 	l2cap_conn_del(hcon, bt_to_errno(reason));
8321 }
8322 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)8323 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8324 {
8325 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8326 		return;
8327 
8328 	if (encrypt == 0x00) {
8329 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8330 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8331 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8332 			   chan->sec_level == BT_SECURITY_FIPS)
8333 			l2cap_chan_close(chan, ECONNREFUSED);
8334 	} else {
8335 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8336 			__clear_chan_timer(chan);
8337 	}
8338 }
8339 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)8340 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8341 {
8342 	struct l2cap_conn *conn = hcon->l2cap_data;
8343 	struct l2cap_chan *chan;
8344 
8345 	if (!conn)
8346 		return;
8347 
8348 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8349 
8350 	mutex_lock(&conn->chan_lock);
8351 
8352 	list_for_each_entry(chan, &conn->chan_l, list) {
8353 		l2cap_chan_lock(chan);
8354 
8355 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8356 		       state_to_string(chan->state));
8357 
8358 		if (chan->scid == L2CAP_CID_A2MP) {
8359 			l2cap_chan_unlock(chan);
8360 			continue;
8361 		}
8362 
8363 		if (!status && encrypt)
8364 			chan->sec_level = hcon->sec_level;
8365 
8366 		if (!__l2cap_no_conn_pending(chan)) {
8367 			l2cap_chan_unlock(chan);
8368 			continue;
8369 		}
8370 
8371 		if (!status && (chan->state == BT_CONNECTED ||
8372 				chan->state == BT_CONFIG)) {
8373 			chan->ops->resume(chan);
8374 			l2cap_check_encryption(chan, encrypt);
8375 			l2cap_chan_unlock(chan);
8376 			continue;
8377 		}
8378 
8379 		if (chan->state == BT_CONNECT) {
8380 			if (!status && l2cap_check_enc_key_size(hcon))
8381 				l2cap_start_connection(chan);
8382 			else
8383 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8384 		} else if (chan->state == BT_CONNECT2 &&
8385 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8386 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8387 			struct l2cap_conn_rsp rsp;
8388 			__u16 res, stat;
8389 
8390 			if (!status && l2cap_check_enc_key_size(hcon)) {
8391 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8392 					res = L2CAP_CR_PEND;
8393 					stat = L2CAP_CS_AUTHOR_PEND;
8394 					chan->ops->defer(chan);
8395 				} else {
8396 					l2cap_state_change(chan, BT_CONFIG);
8397 					res = L2CAP_CR_SUCCESS;
8398 					stat = L2CAP_CS_NO_INFO;
8399 				}
8400 			} else {
8401 				l2cap_state_change(chan, BT_DISCONN);
8402 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8403 				res = L2CAP_CR_SEC_BLOCK;
8404 				stat = L2CAP_CS_NO_INFO;
8405 			}
8406 
8407 			rsp.scid   = cpu_to_le16(chan->dcid);
8408 			rsp.dcid   = cpu_to_le16(chan->scid);
8409 			rsp.result = cpu_to_le16(res);
8410 			rsp.status = cpu_to_le16(stat);
8411 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8412 				       sizeof(rsp), &rsp);
8413 
8414 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8415 			    res == L2CAP_CR_SUCCESS) {
8416 				char buf[128];
8417 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8418 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8419 					       L2CAP_CONF_REQ,
8420 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8421 					       buf);
8422 				chan->num_conf_req++;
8423 			}
8424 		}
8425 
8426 		l2cap_chan_unlock(chan);
8427 	}
8428 
8429 	mutex_unlock(&conn->chan_lock);
8430 }
8431 
8432 /* Append fragment into frame respecting the maximum len of rx_skb */
l2cap_recv_frag(struct l2cap_conn * conn,struct sk_buff * skb,u16 len)8433 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8434 			   u16 len)
8435 {
8436 	if (!conn->rx_skb) {
8437 		/* Allocate skb for the complete frame (with header) */
8438 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8439 		if (!conn->rx_skb)
8440 			return -ENOMEM;
8441 		/* Init rx_len */
8442 		conn->rx_len = len;
8443 	}
8444 
8445 	/* Copy as much as the rx_skb can hold */
8446 	len = min_t(u16, len, skb->len);
8447 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8448 	skb_pull(skb, len);
8449 	conn->rx_len -= len;
8450 
8451 	return len;
8452 }
8453 
l2cap_recv_len(struct l2cap_conn * conn,struct sk_buff * skb)8454 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8455 {
8456 	struct sk_buff *rx_skb;
8457 	int len;
8458 
8459 	/* Append just enough to complete the header */
8460 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8461 
8462 	/* If header could not be read just continue */
8463 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8464 		return len;
8465 
8466 	rx_skb = conn->rx_skb;
8467 	len = get_unaligned_le16(rx_skb->data);
8468 
8469 	/* Check if rx_skb has enough space to received all fragments */
8470 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8471 		/* Update expected len */
8472 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8473 		return L2CAP_LEN_SIZE;
8474 	}
8475 
8476 	/* Reset conn->rx_skb since it will need to be reallocated in order to
8477 	 * fit all fragments.
8478 	 */
8479 	conn->rx_skb = NULL;
8480 
8481 	/* Reallocates rx_skb using the exact expected length */
8482 	len = l2cap_recv_frag(conn, rx_skb,
8483 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8484 	kfree_skb(rx_skb);
8485 
8486 	return len;
8487 }
8488 
l2cap_recv_reset(struct l2cap_conn * conn)8489 static void l2cap_recv_reset(struct l2cap_conn *conn)
8490 {
8491 	kfree_skb(conn->rx_skb);
8492 	conn->rx_skb = NULL;
8493 	conn->rx_len = 0;
8494 }
8495 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)8496 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8497 {
8498 	struct l2cap_conn *conn = hcon->l2cap_data;
8499 	int len;
8500 
8501 	/* For AMP controller do not create l2cap conn */
8502 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8503 		goto drop;
8504 
8505 	if (!conn)
8506 		conn = l2cap_conn_add(hcon);
8507 
8508 	if (!conn)
8509 		goto drop;
8510 
8511 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8512 
8513 	switch (flags) {
8514 	case ACL_START:
8515 	case ACL_START_NO_FLUSH:
8516 	case ACL_COMPLETE:
8517 		if (conn->rx_skb) {
8518 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8519 			l2cap_recv_reset(conn);
8520 			l2cap_conn_unreliable(conn, ECOMM);
8521 		}
8522 
8523 		/* Start fragment may not contain the L2CAP length so just
8524 		 * copy the initial byte when that happens and use conn->mtu as
8525 		 * expected length.
8526 		 */
8527 		if (skb->len < L2CAP_LEN_SIZE) {
8528 			l2cap_recv_frag(conn, skb, conn->mtu);
8529 			break;
8530 		}
8531 
8532 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8533 
8534 		if (len == skb->len) {
8535 			/* Complete frame received */
8536 			l2cap_recv_frame(conn, skb);
8537 			return;
8538 		}
8539 
8540 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8541 
8542 		if (skb->len > len) {
8543 			BT_ERR("Frame is too long (len %u, expected len %d)",
8544 			       skb->len, len);
8545 			l2cap_conn_unreliable(conn, ECOMM);
8546 			goto drop;
8547 		}
8548 
8549 		/* Append fragment into frame (with header) */
8550 		if (l2cap_recv_frag(conn, skb, len) < 0)
8551 			goto drop;
8552 
8553 		break;
8554 
8555 	case ACL_CONT:
8556 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8557 
8558 		if (!conn->rx_skb) {
8559 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8560 			l2cap_conn_unreliable(conn, ECOMM);
8561 			goto drop;
8562 		}
8563 
8564 		/* Complete the L2CAP length if it has not been read */
8565 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8566 			if (l2cap_recv_len(conn, skb) < 0) {
8567 				l2cap_conn_unreliable(conn, ECOMM);
8568 				goto drop;
8569 			}
8570 
8571 			/* Header still could not be read just continue */
8572 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8573 				break;
8574 		}
8575 
8576 		if (skb->len > conn->rx_len) {
8577 			BT_ERR("Fragment is too long (len %u, expected %u)",
8578 			       skb->len, conn->rx_len);
8579 			l2cap_recv_reset(conn);
8580 			l2cap_conn_unreliable(conn, ECOMM);
8581 			goto drop;
8582 		}
8583 
8584 		/* Append fragment into frame (with header) */
8585 		l2cap_recv_frag(conn, skb, skb->len);
8586 
8587 		if (!conn->rx_len) {
8588 			/* Complete frame received. l2cap_recv_frame
8589 			 * takes ownership of the skb so set the global
8590 			 * rx_skb pointer to NULL first.
8591 			 */
8592 			struct sk_buff *rx_skb = conn->rx_skb;
8593 			conn->rx_skb = NULL;
8594 			l2cap_recv_frame(conn, rx_skb);
8595 		}
8596 		break;
8597 	}
8598 
8599 drop:
8600 	kfree_skb(skb);
8601 }
8602 
8603 static struct hci_cb l2cap_cb = {
8604 	.name		= "L2CAP",
8605 	.connect_cfm	= l2cap_connect_cfm,
8606 	.disconn_cfm	= l2cap_disconn_cfm,
8607 	.security_cfm	= l2cap_security_cfm,
8608 };
8609 
l2cap_debugfs_show(struct seq_file * f,void * p)8610 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8611 {
8612 	struct l2cap_chan *c;
8613 
8614 	read_lock(&chan_list_lock);
8615 
8616 	list_for_each_entry(c, &chan_list, global_l) {
8617 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8618 			   &c->src, c->src_type, &c->dst, c->dst_type,
8619 			   c->state, __le16_to_cpu(c->psm),
8620 			   c->scid, c->dcid, c->imtu, c->omtu,
8621 			   c->sec_level, c->mode);
8622 	}
8623 
8624 	read_unlock(&chan_list_lock);
8625 
8626 	return 0;
8627 }
8628 
8629 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8630 
8631 static struct dentry *l2cap_debugfs;
8632 
l2cap_init(void)8633 int __init l2cap_init(void)
8634 {
8635 	int err;
8636 
8637 	err = l2cap_init_sockets();
8638 	if (err < 0)
8639 		return err;
8640 
8641 	hci_register_cb(&l2cap_cb);
8642 
8643 	if (IS_ERR_OR_NULL(bt_debugfs))
8644 		return 0;
8645 
8646 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8647 					    NULL, &l2cap_debugfs_fops);
8648 
8649 	return 0;
8650 }
8651 
l2cap_exit(void)8652 void l2cap_exit(void)
8653 {
8654 	debugfs_remove(l2cap_debugfs);
8655 	hci_unregister_cb(&l2cap_cb);
8656 	l2cap_cleanup_sockets();
8657 }
8658 
8659 module_param(disable_ertm, bool, 0644);
8660 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8661 
8662 module_param(enable_ecred, bool, 0644);
8663 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8664