1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
67 
bdaddr_type(u8 link_type,u8 bdaddr_type)68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 {
70 	if (link_type == LE_LINK) {
71 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 			return BDADDR_LE_PUBLIC;
73 		else
74 			return BDADDR_LE_RANDOM;
75 	}
76 
77 	return BDADDR_BREDR;
78 }
79 
bdaddr_src_type(struct hci_conn * hcon)80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 {
82 	return bdaddr_type(hcon->type, hcon->src_type);
83 }
84 
bdaddr_dst_type(struct hci_conn * hcon)85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 {
87 	return bdaddr_type(hcon->type, hcon->dst_type);
88 }
89 
90 /* ---- L2CAP channels ---- */
91 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
93 						   u16 cid)
94 {
95 	struct l2cap_chan *c;
96 
97 	list_for_each_entry(c, &conn->chan_l, list) {
98 		if (c->dcid == cid)
99 			return c;
100 	}
101 	return NULL;
102 }
103 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 						   u16 cid)
106 {
107 	struct l2cap_chan *c;
108 
109 	list_for_each_entry(c, &conn->chan_l, list) {
110 		if (c->scid == cid)
111 			return c;
112 	}
113 	return NULL;
114 }
115 
116 /* Find channel with given SCID.
117  * Returns a reference locked channel.
118  */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 						 u16 cid)
121 {
122 	struct l2cap_chan *c;
123 
124 	mutex_lock(&conn->chan_lock);
125 	c = __l2cap_get_chan_by_scid(conn, cid);
126 	if (c) {
127 		/* Only lock if chan reference is not 0 */
128 		c = l2cap_chan_hold_unless_zero(c);
129 		if (c)
130 			l2cap_chan_lock(c);
131 	}
132 	mutex_unlock(&conn->chan_lock);
133 
134 	return c;
135 }
136 
137 /* Find channel with given DCID.
138  * Returns a reference locked channel.
139  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
141 						 u16 cid)
142 {
143 	struct l2cap_chan *c;
144 
145 	mutex_lock(&conn->chan_lock);
146 	c = __l2cap_get_chan_by_dcid(conn, cid);
147 	if (c) {
148 		/* Only lock if chan reference is not 0 */
149 		c = l2cap_chan_hold_unless_zero(c);
150 		if (c)
151 			l2cap_chan_lock(c);
152 	}
153 	mutex_unlock(&conn->chan_lock);
154 
155 	return c;
156 }
157 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						    u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	list_for_each_entry(c, &conn->chan_l, list) {
164 		if (c->ident == ident)
165 			return c;
166 	}
167 	return NULL;
168 }
169 
l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
171 						  u8 ident)
172 {
173 	struct l2cap_chan *c;
174 
175 	mutex_lock(&conn->chan_lock);
176 	c = __l2cap_get_chan_by_ident(conn, ident);
177 	if (c) {
178 		/* Only lock if chan reference is not 0 */
179 		c = l2cap_chan_hold_unless_zero(c);
180 		if (c)
181 			l2cap_chan_lock(c);
182 	}
183 	mutex_unlock(&conn->chan_lock);
184 
185 	return c;
186 }
187 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
189 						      u8 src_type)
190 {
191 	struct l2cap_chan *c;
192 
193 	list_for_each_entry(c, &chan_list, global_l) {
194 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
195 			continue;
196 
197 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
198 			continue;
199 
200 		if (c->sport == psm && !bacmp(&c->src, src))
201 			return c;
202 	}
203 	return NULL;
204 }
205 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
207 {
208 	int err;
209 
210 	write_lock(&chan_list_lock);
211 
212 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
213 		err = -EADDRINUSE;
214 		goto done;
215 	}
216 
217 	if (psm) {
218 		chan->psm = psm;
219 		chan->sport = psm;
220 		err = 0;
221 	} else {
222 		u16 p, start, end, incr;
223 
224 		if (chan->src_type == BDADDR_BREDR) {
225 			start = L2CAP_PSM_DYN_START;
226 			end = L2CAP_PSM_AUTO_END;
227 			incr = 2;
228 		} else {
229 			start = L2CAP_PSM_LE_DYN_START;
230 			end = L2CAP_PSM_LE_DYN_END;
231 			incr = 1;
232 		}
233 
234 		err = -EINVAL;
235 		for (p = start; p <= end; p += incr)
236 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
237 							 chan->src_type)) {
238 				chan->psm   = cpu_to_le16(p);
239 				chan->sport = cpu_to_le16(p);
240 				err = 0;
241 				break;
242 			}
243 	}
244 
245 done:
246 	write_unlock(&chan_list_lock);
247 	return err;
248 }
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
250 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)251 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
252 {
253 	write_lock(&chan_list_lock);
254 
255 	/* Override the defaults (which are for conn-oriented) */
256 	chan->omtu = L2CAP_DEFAULT_MTU;
257 	chan->chan_type = L2CAP_CHAN_FIXED;
258 
259 	chan->scid = scid;
260 
261 	write_unlock(&chan_list_lock);
262 
263 	return 0;
264 }
265 
l2cap_alloc_cid(struct l2cap_conn * conn)266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
267 {
268 	u16 cid, dyn_end;
269 
270 	if (conn->hcon->type == LE_LINK)
271 		dyn_end = L2CAP_CID_LE_DYN_END;
272 	else
273 		dyn_end = L2CAP_CID_DYN_END;
274 
275 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 		if (!__l2cap_get_chan_by_scid(conn, cid))
277 			return cid;
278 	}
279 
280 	return 0;
281 }
282 
l2cap_state_change(struct l2cap_chan * chan,int state)283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
284 {
285 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 	       state_to_string(state));
287 
288 	chan->state = state;
289 	chan->ops->state_change(chan, state, 0);
290 }
291 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
293 						int state, int err)
294 {
295 	chan->state = state;
296 	chan->ops->state_change(chan, chan->state, err);
297 }
298 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
300 {
301 	chan->ops->state_change(chan, chan->state, err);
302 }
303 
__set_retrans_timer(struct l2cap_chan * chan)304 static void __set_retrans_timer(struct l2cap_chan *chan)
305 {
306 	if (!delayed_work_pending(&chan->monitor_timer) &&
307 	    chan->retrans_timeout) {
308 		l2cap_set_timer(chan, &chan->retrans_timer,
309 				msecs_to_jiffies(chan->retrans_timeout));
310 	}
311 }
312 
__set_monitor_timer(struct l2cap_chan * chan)313 static void __set_monitor_timer(struct l2cap_chan *chan)
314 {
315 	__clear_retrans_timer(chan);
316 	if (chan->monitor_timeout) {
317 		l2cap_set_timer(chan, &chan->monitor_timer,
318 				msecs_to_jiffies(chan->monitor_timeout));
319 	}
320 }
321 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
323 					       u16 seq)
324 {
325 	struct sk_buff *skb;
326 
327 	skb_queue_walk(head, skb) {
328 		if (bt_cb(skb)->l2cap.txseq == seq)
329 			return skb;
330 	}
331 
332 	return NULL;
333 }
334 
335 /* ---- L2CAP sequence number lists ---- */
336 
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338  * SREJ requests that are received and for frames that are to be
339  * retransmitted. These seq_list functions implement a singly-linked
340  * list in an array, where membership in the list can also be checked
341  * in constant time. Items can also be added to the tail of the list
342  * and removed from the head in constant time, without further memory
343  * allocs or frees.
344  */
345 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
347 {
348 	size_t alloc_size, i;
349 
350 	/* Allocated size is a power of 2 to map sequence numbers
351 	 * (which may be up to 14 bits) in to a smaller array that is
352 	 * sized for the negotiated ERTM transmit windows.
353 	 */
354 	alloc_size = roundup_pow_of_two(size);
355 
356 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
357 	if (!seq_list->list)
358 		return -ENOMEM;
359 
360 	seq_list->mask = alloc_size - 1;
361 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 	for (i = 0; i < alloc_size; i++)
364 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
365 
366 	return 0;
367 }
368 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
370 {
371 	kfree(seq_list->list);
372 }
373 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
375 					   u16 seq)
376 {
377 	/* Constant-time check for list membership */
378 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
379 }
380 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
382 {
383 	u16 seq = seq_list->head;
384 	u16 mask = seq_list->mask;
385 
386 	seq_list->head = seq_list->list[seq & mask];
387 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
388 
389 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
392 	}
393 
394 	return seq;
395 }
396 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
398 {
399 	u16 i;
400 
401 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	for (i = 0; i <= seq_list->mask; i++)
405 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
406 
407 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
409 }
410 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
412 {
413 	u16 mask = seq_list->mask;
414 
415 	/* All appends happen in constant time */
416 
417 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
418 		return;
419 
420 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 		seq_list->head = seq;
422 	else
423 		seq_list->list[seq_list->tail & mask] = seq;
424 
425 	seq_list->tail = seq;
426 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
427 }
428 
l2cap_chan_timeout(struct work_struct * work)429 static void l2cap_chan_timeout(struct work_struct *work)
430 {
431 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
432 					       chan_timer.work);
433 	struct l2cap_conn *conn = chan->conn;
434 	int reason;
435 
436 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
437 
438 	mutex_lock(&conn->chan_lock);
439 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
440 	 * this work. No need to call l2cap_chan_hold(chan) here again.
441 	 */
442 	l2cap_chan_lock(chan);
443 
444 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
445 		reason = ECONNREFUSED;
446 	else if (chan->state == BT_CONNECT &&
447 		 chan->sec_level != BT_SECURITY_SDP)
448 		reason = ECONNREFUSED;
449 	else
450 		reason = ETIMEDOUT;
451 
452 	l2cap_chan_close(chan, reason);
453 
454 	chan->ops->close(chan);
455 
456 	l2cap_chan_unlock(chan);
457 	l2cap_chan_put(chan);
458 
459 	mutex_unlock(&conn->chan_lock);
460 }
461 
l2cap_chan_create(void)462 struct l2cap_chan *l2cap_chan_create(void)
463 {
464 	struct l2cap_chan *chan;
465 
466 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
467 	if (!chan)
468 		return NULL;
469 
470 	skb_queue_head_init(&chan->tx_q);
471 	skb_queue_head_init(&chan->srej_q);
472 	mutex_init(&chan->lock);
473 
474 	/* Set default lock nesting level */
475 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
476 
477 	write_lock(&chan_list_lock);
478 	list_add(&chan->global_l, &chan_list);
479 	write_unlock(&chan_list_lock);
480 
481 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
482 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
483 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
484 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
485 
486 	chan->state = BT_OPEN;
487 
488 	kref_init(&chan->kref);
489 
490 	/* This flag is cleared in l2cap_chan_ready() */
491 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
492 
493 	BT_DBG("chan %p", chan);
494 
495 	return chan;
496 }
497 EXPORT_SYMBOL_GPL(l2cap_chan_create);
498 
l2cap_chan_destroy(struct kref * kref)499 static void l2cap_chan_destroy(struct kref *kref)
500 {
501 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
502 
503 	BT_DBG("chan %p", chan);
504 
505 	write_lock(&chan_list_lock);
506 	list_del(&chan->global_l);
507 	write_unlock(&chan_list_lock);
508 
509 	kfree(chan);
510 }
511 
l2cap_chan_hold(struct l2cap_chan * c)512 void l2cap_chan_hold(struct l2cap_chan *c)
513 {
514 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
515 
516 	kref_get(&c->kref);
517 }
518 
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)519 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
520 {
521 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
522 
523 	if (!kref_get_unless_zero(&c->kref))
524 		return NULL;
525 
526 	return c;
527 }
528 
l2cap_chan_put(struct l2cap_chan * c)529 void l2cap_chan_put(struct l2cap_chan *c)
530 {
531 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
532 
533 	kref_put(&c->kref, l2cap_chan_destroy);
534 }
535 EXPORT_SYMBOL_GPL(l2cap_chan_put);
536 
l2cap_chan_set_defaults(struct l2cap_chan * chan)537 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
538 {
539 	chan->fcs  = L2CAP_FCS_CRC16;
540 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
541 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
542 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
543 	chan->remote_max_tx = chan->max_tx;
544 	chan->remote_tx_win = chan->tx_win;
545 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
546 	chan->sec_level = BT_SECURITY_LOW;
547 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
548 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
549 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
550 
551 	chan->conf_state = 0;
552 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
553 
554 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
555 }
556 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
557 
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)558 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
559 {
560 	chan->sdu = NULL;
561 	chan->sdu_last_frag = NULL;
562 	chan->sdu_len = 0;
563 	chan->tx_credits = tx_credits;
564 	/* Derive MPS from connection MTU to stop HCI fragmentation */
565 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
566 	/* Give enough credits for a full packet */
567 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
568 
569 	skb_queue_head_init(&chan->tx_q);
570 }
571 
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)572 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
573 {
574 	l2cap_le_flowctl_init(chan, tx_credits);
575 
576 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
577 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
578 		chan->mps = L2CAP_ECRED_MIN_MPS;
579 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
580 	}
581 }
582 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)583 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
584 {
585 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
586 	       __le16_to_cpu(chan->psm), chan->dcid);
587 
588 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
589 
590 	chan->conn = conn;
591 
592 	switch (chan->chan_type) {
593 	case L2CAP_CHAN_CONN_ORIENTED:
594 		/* Alloc CID for connection-oriented socket */
595 		chan->scid = l2cap_alloc_cid(conn);
596 		if (conn->hcon->type == ACL_LINK)
597 			chan->omtu = L2CAP_DEFAULT_MTU;
598 		break;
599 
600 	case L2CAP_CHAN_CONN_LESS:
601 		/* Connectionless socket */
602 		chan->scid = L2CAP_CID_CONN_LESS;
603 		chan->dcid = L2CAP_CID_CONN_LESS;
604 		chan->omtu = L2CAP_DEFAULT_MTU;
605 		break;
606 
607 	case L2CAP_CHAN_FIXED:
608 		/* Caller will set CID and CID specific MTU values */
609 		break;
610 
611 	default:
612 		/* Raw socket can send/recv signalling messages only */
613 		chan->scid = L2CAP_CID_SIGNALING;
614 		chan->dcid = L2CAP_CID_SIGNALING;
615 		chan->omtu = L2CAP_DEFAULT_MTU;
616 	}
617 
618 	chan->local_id		= L2CAP_BESTEFFORT_ID;
619 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
620 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
621 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
622 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
623 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
624 
625 	l2cap_chan_hold(chan);
626 
627 	/* Only keep a reference for fixed channels if they requested it */
628 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
629 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
630 		hci_conn_hold(conn->hcon);
631 
632 	list_add(&chan->list, &conn->chan_l);
633 }
634 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)635 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
636 {
637 	mutex_lock(&conn->chan_lock);
638 	__l2cap_chan_add(conn, chan);
639 	mutex_unlock(&conn->chan_lock);
640 }
641 
l2cap_chan_del(struct l2cap_chan * chan,int err)642 void l2cap_chan_del(struct l2cap_chan *chan, int err)
643 {
644 	struct l2cap_conn *conn = chan->conn;
645 
646 	__clear_chan_timer(chan);
647 
648 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
649 	       state_to_string(chan->state));
650 
651 	chan->ops->teardown(chan, err);
652 
653 	if (conn) {
654 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
655 		/* Delete from channel list */
656 		list_del(&chan->list);
657 
658 		l2cap_chan_put(chan);
659 
660 		chan->conn = NULL;
661 
662 		/* Reference was only held for non-fixed channels or
663 		 * fixed channels that explicitly requested it using the
664 		 * FLAG_HOLD_HCI_CONN flag.
665 		 */
666 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
667 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
668 			hci_conn_drop(conn->hcon);
669 
670 		if (mgr && mgr->bredr_chan == chan)
671 			mgr->bredr_chan = NULL;
672 	}
673 
674 	if (chan->hs_hchan) {
675 		struct hci_chan *hs_hchan = chan->hs_hchan;
676 
677 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
678 		amp_disconnect_logical_link(hs_hchan);
679 	}
680 
681 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
682 		return;
683 
684 	switch (chan->mode) {
685 	case L2CAP_MODE_BASIC:
686 		break;
687 
688 	case L2CAP_MODE_LE_FLOWCTL:
689 	case L2CAP_MODE_EXT_FLOWCTL:
690 		skb_queue_purge(&chan->tx_q);
691 		break;
692 
693 	case L2CAP_MODE_ERTM:
694 		__clear_retrans_timer(chan);
695 		__clear_monitor_timer(chan);
696 		__clear_ack_timer(chan);
697 
698 		skb_queue_purge(&chan->srej_q);
699 
700 		l2cap_seq_list_free(&chan->srej_list);
701 		l2cap_seq_list_free(&chan->retrans_list);
702 		fallthrough;
703 
704 	case L2CAP_MODE_STREAMING:
705 		skb_queue_purge(&chan->tx_q);
706 		break;
707 	}
708 }
709 EXPORT_SYMBOL_GPL(l2cap_chan_del);
710 
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)711 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
712 			      void *data)
713 {
714 	struct l2cap_chan *chan;
715 
716 	list_for_each_entry(chan, &conn->chan_l, list) {
717 		func(chan, data);
718 	}
719 }
720 
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)721 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
722 		     void *data)
723 {
724 	if (!conn)
725 		return;
726 
727 	mutex_lock(&conn->chan_lock);
728 	__l2cap_chan_list(conn, func, data);
729 	mutex_unlock(&conn->chan_lock);
730 }
731 
732 EXPORT_SYMBOL_GPL(l2cap_chan_list);
733 
l2cap_conn_update_id_addr(struct work_struct * work)734 static void l2cap_conn_update_id_addr(struct work_struct *work)
735 {
736 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
737 					       id_addr_update_work);
738 	struct hci_conn *hcon = conn->hcon;
739 	struct l2cap_chan *chan;
740 
741 	mutex_lock(&conn->chan_lock);
742 
743 	list_for_each_entry(chan, &conn->chan_l, list) {
744 		l2cap_chan_lock(chan);
745 		bacpy(&chan->dst, &hcon->dst);
746 		chan->dst_type = bdaddr_dst_type(hcon);
747 		l2cap_chan_unlock(chan);
748 	}
749 
750 	mutex_unlock(&conn->chan_lock);
751 }
752 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)753 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
754 {
755 	struct l2cap_conn *conn = chan->conn;
756 	struct l2cap_le_conn_rsp rsp;
757 	u16 result;
758 
759 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
760 		result = L2CAP_CR_LE_AUTHORIZATION;
761 	else
762 		result = L2CAP_CR_LE_BAD_PSM;
763 
764 	l2cap_state_change(chan, BT_DISCONN);
765 
766 	rsp.dcid    = cpu_to_le16(chan->scid);
767 	rsp.mtu     = cpu_to_le16(chan->imtu);
768 	rsp.mps     = cpu_to_le16(chan->mps);
769 	rsp.credits = cpu_to_le16(chan->rx_credits);
770 	rsp.result  = cpu_to_le16(result);
771 
772 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
773 		       &rsp);
774 }
775 
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)776 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
777 {
778 	struct l2cap_conn *conn = chan->conn;
779 	struct l2cap_ecred_conn_rsp rsp;
780 	u16 result;
781 
782 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
783 		result = L2CAP_CR_LE_AUTHORIZATION;
784 	else
785 		result = L2CAP_CR_LE_BAD_PSM;
786 
787 	l2cap_state_change(chan, BT_DISCONN);
788 
789 	memset(&rsp, 0, sizeof(rsp));
790 
791 	rsp.result  = cpu_to_le16(result);
792 
793 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
794 		       &rsp);
795 }
796 
l2cap_chan_connect_reject(struct l2cap_chan * chan)797 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
798 {
799 	struct l2cap_conn *conn = chan->conn;
800 	struct l2cap_conn_rsp rsp;
801 	u16 result;
802 
803 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
804 		result = L2CAP_CR_SEC_BLOCK;
805 	else
806 		result = L2CAP_CR_BAD_PSM;
807 
808 	l2cap_state_change(chan, BT_DISCONN);
809 
810 	rsp.scid   = cpu_to_le16(chan->dcid);
811 	rsp.dcid   = cpu_to_le16(chan->scid);
812 	rsp.result = cpu_to_le16(result);
813 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
814 
815 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
816 }
817 
l2cap_chan_close(struct l2cap_chan * chan,int reason)818 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
819 {
820 	struct l2cap_conn *conn = chan->conn;
821 
822 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
823 
824 	switch (chan->state) {
825 	case BT_LISTEN:
826 		chan->ops->teardown(chan, 0);
827 		break;
828 
829 	case BT_CONNECTED:
830 	case BT_CONFIG:
831 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
832 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
833 			l2cap_send_disconn_req(chan, reason);
834 		} else
835 			l2cap_chan_del(chan, reason);
836 		break;
837 
838 	case BT_CONNECT2:
839 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
840 			if (conn->hcon->type == ACL_LINK)
841 				l2cap_chan_connect_reject(chan);
842 			else if (conn->hcon->type == LE_LINK) {
843 				switch (chan->mode) {
844 				case L2CAP_MODE_LE_FLOWCTL:
845 					l2cap_chan_le_connect_reject(chan);
846 					break;
847 				case L2CAP_MODE_EXT_FLOWCTL:
848 					l2cap_chan_ecred_connect_reject(chan);
849 					break;
850 				}
851 			}
852 		}
853 
854 		l2cap_chan_del(chan, reason);
855 		break;
856 
857 	case BT_CONNECT:
858 	case BT_DISCONN:
859 		l2cap_chan_del(chan, reason);
860 		break;
861 
862 	default:
863 		chan->ops->teardown(chan, 0);
864 		break;
865 	}
866 }
867 EXPORT_SYMBOL(l2cap_chan_close);
868 
l2cap_get_auth_type(struct l2cap_chan * chan)869 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
870 {
871 	switch (chan->chan_type) {
872 	case L2CAP_CHAN_RAW:
873 		switch (chan->sec_level) {
874 		case BT_SECURITY_HIGH:
875 		case BT_SECURITY_FIPS:
876 			return HCI_AT_DEDICATED_BONDING_MITM;
877 		case BT_SECURITY_MEDIUM:
878 			return HCI_AT_DEDICATED_BONDING;
879 		default:
880 			return HCI_AT_NO_BONDING;
881 		}
882 		break;
883 	case L2CAP_CHAN_CONN_LESS:
884 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
885 			if (chan->sec_level == BT_SECURITY_LOW)
886 				chan->sec_level = BT_SECURITY_SDP;
887 		}
888 		if (chan->sec_level == BT_SECURITY_HIGH ||
889 		    chan->sec_level == BT_SECURITY_FIPS)
890 			return HCI_AT_NO_BONDING_MITM;
891 		else
892 			return HCI_AT_NO_BONDING;
893 		break;
894 	case L2CAP_CHAN_CONN_ORIENTED:
895 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
896 			if (chan->sec_level == BT_SECURITY_LOW)
897 				chan->sec_level = BT_SECURITY_SDP;
898 
899 			if (chan->sec_level == BT_SECURITY_HIGH ||
900 			    chan->sec_level == BT_SECURITY_FIPS)
901 				return HCI_AT_NO_BONDING_MITM;
902 			else
903 				return HCI_AT_NO_BONDING;
904 		}
905 		fallthrough;
906 
907 	default:
908 		switch (chan->sec_level) {
909 		case BT_SECURITY_HIGH:
910 		case BT_SECURITY_FIPS:
911 			return HCI_AT_GENERAL_BONDING_MITM;
912 		case BT_SECURITY_MEDIUM:
913 			return HCI_AT_GENERAL_BONDING;
914 		default:
915 			return HCI_AT_NO_BONDING;
916 		}
917 		break;
918 	}
919 }
920 
921 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)922 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
923 {
924 	struct l2cap_conn *conn = chan->conn;
925 	__u8 auth_type;
926 
927 	if (conn->hcon->type == LE_LINK)
928 		return smp_conn_security(conn->hcon, chan->sec_level);
929 
930 	auth_type = l2cap_get_auth_type(chan);
931 
932 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
933 				 initiator);
934 }
935 
l2cap_get_ident(struct l2cap_conn * conn)936 static u8 l2cap_get_ident(struct l2cap_conn *conn)
937 {
938 	u8 id;
939 
940 	/* Get next available identificator.
941 	 *    1 - 128 are used by kernel.
942 	 *  129 - 199 are reserved.
943 	 *  200 - 254 are used by utilities like l2ping, etc.
944 	 */
945 
946 	mutex_lock(&conn->ident_lock);
947 
948 	if (++conn->tx_ident > 128)
949 		conn->tx_ident = 1;
950 
951 	id = conn->tx_ident;
952 
953 	mutex_unlock(&conn->ident_lock);
954 
955 	return id;
956 }
957 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)958 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
959 			   void *data)
960 {
961 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
962 	u8 flags;
963 
964 	BT_DBG("code 0x%2.2x", code);
965 
966 	if (!skb)
967 		return;
968 
969 	/* Use NO_FLUSH if supported or we have an LE link (which does
970 	 * not support auto-flushing packets) */
971 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
972 	    conn->hcon->type == LE_LINK)
973 		flags = ACL_START_NO_FLUSH;
974 	else
975 		flags = ACL_START;
976 
977 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
978 	skb->priority = HCI_PRIO_MAX;
979 
980 	hci_send_acl(conn->hchan, skb, flags);
981 }
982 
__chan_is_moving(struct l2cap_chan * chan)983 static bool __chan_is_moving(struct l2cap_chan *chan)
984 {
985 	return chan->move_state != L2CAP_MOVE_STABLE &&
986 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
987 }
988 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)989 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
990 {
991 	struct hci_conn *hcon = chan->conn->hcon;
992 	u16 flags;
993 
994 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
995 	       skb->priority);
996 
997 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
998 		if (chan->hs_hchan)
999 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
1000 		else
1001 			kfree_skb(skb);
1002 
1003 		return;
1004 	}
1005 
1006 	/* Use NO_FLUSH for LE links (where this is the only option) or
1007 	 * if the BR/EDR link supports it and flushing has not been
1008 	 * explicitly requested (through FLAG_FLUSHABLE).
1009 	 */
1010 	if (hcon->type == LE_LINK ||
1011 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1012 	     lmp_no_flush_capable(hcon->hdev)))
1013 		flags = ACL_START_NO_FLUSH;
1014 	else
1015 		flags = ACL_START;
1016 
1017 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1018 	hci_send_acl(chan->conn->hchan, skb, flags);
1019 }
1020 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1021 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1022 {
1023 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1024 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1025 
1026 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1027 		/* S-Frame */
1028 		control->sframe = 1;
1029 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1030 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1031 
1032 		control->sar = 0;
1033 		control->txseq = 0;
1034 	} else {
1035 		/* I-Frame */
1036 		control->sframe = 0;
1037 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1038 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1039 
1040 		control->poll = 0;
1041 		control->super = 0;
1042 	}
1043 }
1044 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1045 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1046 {
1047 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1048 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1049 
1050 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1051 		/* S-Frame */
1052 		control->sframe = 1;
1053 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1054 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1055 
1056 		control->sar = 0;
1057 		control->txseq = 0;
1058 	} else {
1059 		/* I-Frame */
1060 		control->sframe = 0;
1061 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1062 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1063 
1064 		control->poll = 0;
1065 		control->super = 0;
1066 	}
1067 }
1068 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1069 static inline void __unpack_control(struct l2cap_chan *chan,
1070 				    struct sk_buff *skb)
1071 {
1072 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1073 		__unpack_extended_control(get_unaligned_le32(skb->data),
1074 					  &bt_cb(skb)->l2cap);
1075 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1076 	} else {
1077 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1078 					  &bt_cb(skb)->l2cap);
1079 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1080 	}
1081 }
1082 
__pack_extended_control(struct l2cap_ctrl * control)1083 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1084 {
1085 	u32 packed;
1086 
1087 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1088 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1089 
1090 	if (control->sframe) {
1091 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1092 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1093 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1094 	} else {
1095 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1096 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1097 	}
1098 
1099 	return packed;
1100 }
1101 
__pack_enhanced_control(struct l2cap_ctrl * control)1102 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1103 {
1104 	u16 packed;
1105 
1106 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1107 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1108 
1109 	if (control->sframe) {
1110 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1111 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1112 		packed |= L2CAP_CTRL_FRAME_TYPE;
1113 	} else {
1114 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1115 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1116 	}
1117 
1118 	return packed;
1119 }
1120 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1121 static inline void __pack_control(struct l2cap_chan *chan,
1122 				  struct l2cap_ctrl *control,
1123 				  struct sk_buff *skb)
1124 {
1125 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1126 		put_unaligned_le32(__pack_extended_control(control),
1127 				   skb->data + L2CAP_HDR_SIZE);
1128 	} else {
1129 		put_unaligned_le16(__pack_enhanced_control(control),
1130 				   skb->data + L2CAP_HDR_SIZE);
1131 	}
1132 }
1133 
__ertm_hdr_size(struct l2cap_chan * chan)1134 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1135 {
1136 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1137 		return L2CAP_EXT_HDR_SIZE;
1138 	else
1139 		return L2CAP_ENH_HDR_SIZE;
1140 }
1141 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1142 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1143 					       u32 control)
1144 {
1145 	struct sk_buff *skb;
1146 	struct l2cap_hdr *lh;
1147 	int hlen = __ertm_hdr_size(chan);
1148 
1149 	if (chan->fcs == L2CAP_FCS_CRC16)
1150 		hlen += L2CAP_FCS_SIZE;
1151 
1152 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1153 
1154 	if (!skb)
1155 		return ERR_PTR(-ENOMEM);
1156 
1157 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1158 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1159 	lh->cid = cpu_to_le16(chan->dcid);
1160 
1161 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1162 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1163 	else
1164 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1165 
1166 	if (chan->fcs == L2CAP_FCS_CRC16) {
1167 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1168 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1169 	}
1170 
1171 	skb->priority = HCI_PRIO_MAX;
1172 	return skb;
1173 }
1174 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1175 static void l2cap_send_sframe(struct l2cap_chan *chan,
1176 			      struct l2cap_ctrl *control)
1177 {
1178 	struct sk_buff *skb;
1179 	u32 control_field;
1180 
1181 	BT_DBG("chan %p, control %p", chan, control);
1182 
1183 	if (!control->sframe)
1184 		return;
1185 
1186 	if (__chan_is_moving(chan))
1187 		return;
1188 
1189 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1190 	    !control->poll)
1191 		control->final = 1;
1192 
1193 	if (control->super == L2CAP_SUPER_RR)
1194 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1195 	else if (control->super == L2CAP_SUPER_RNR)
1196 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1197 
1198 	if (control->super != L2CAP_SUPER_SREJ) {
1199 		chan->last_acked_seq = control->reqseq;
1200 		__clear_ack_timer(chan);
1201 	}
1202 
1203 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1204 	       control->final, control->poll, control->super);
1205 
1206 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1207 		control_field = __pack_extended_control(control);
1208 	else
1209 		control_field = __pack_enhanced_control(control);
1210 
1211 	skb = l2cap_create_sframe_pdu(chan, control_field);
1212 	if (!IS_ERR(skb))
1213 		l2cap_do_send(chan, skb);
1214 }
1215 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1216 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1217 {
1218 	struct l2cap_ctrl control;
1219 
1220 	BT_DBG("chan %p, poll %d", chan, poll);
1221 
1222 	memset(&control, 0, sizeof(control));
1223 	control.sframe = 1;
1224 	control.poll = poll;
1225 
1226 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1227 		control.super = L2CAP_SUPER_RNR;
1228 	else
1229 		control.super = L2CAP_SUPER_RR;
1230 
1231 	control.reqseq = chan->buffer_seq;
1232 	l2cap_send_sframe(chan, &control);
1233 }
1234 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1235 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1236 {
1237 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1238 		return true;
1239 
1240 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1241 }
1242 
__amp_capable(struct l2cap_chan * chan)1243 static bool __amp_capable(struct l2cap_chan *chan)
1244 {
1245 	struct l2cap_conn *conn = chan->conn;
1246 	struct hci_dev *hdev;
1247 	bool amp_available = false;
1248 
1249 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1250 		return false;
1251 
1252 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1253 		return false;
1254 
1255 	read_lock(&hci_dev_list_lock);
1256 	list_for_each_entry(hdev, &hci_dev_list, list) {
1257 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1258 		    test_bit(HCI_UP, &hdev->flags)) {
1259 			amp_available = true;
1260 			break;
1261 		}
1262 	}
1263 	read_unlock(&hci_dev_list_lock);
1264 
1265 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1266 		return amp_available;
1267 
1268 	return false;
1269 }
1270 
l2cap_check_efs(struct l2cap_chan * chan)1271 static bool l2cap_check_efs(struct l2cap_chan *chan)
1272 {
1273 	/* Check EFS parameters */
1274 	return true;
1275 }
1276 
l2cap_send_conn_req(struct l2cap_chan * chan)1277 void l2cap_send_conn_req(struct l2cap_chan *chan)
1278 {
1279 	struct l2cap_conn *conn = chan->conn;
1280 	struct l2cap_conn_req req;
1281 
1282 	req.scid = cpu_to_le16(chan->scid);
1283 	req.psm  = chan->psm;
1284 
1285 	chan->ident = l2cap_get_ident(conn);
1286 
1287 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1288 
1289 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1290 }
1291 
l2cap_send_create_chan_req(struct l2cap_chan * chan,u8 amp_id)1292 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1293 {
1294 	struct l2cap_create_chan_req req;
1295 	req.scid = cpu_to_le16(chan->scid);
1296 	req.psm  = chan->psm;
1297 	req.amp_id = amp_id;
1298 
1299 	chan->ident = l2cap_get_ident(chan->conn);
1300 
1301 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1302 		       sizeof(req), &req);
1303 }
1304 
l2cap_move_setup(struct l2cap_chan * chan)1305 static void l2cap_move_setup(struct l2cap_chan *chan)
1306 {
1307 	struct sk_buff *skb;
1308 
1309 	BT_DBG("chan %p", chan);
1310 
1311 	if (chan->mode != L2CAP_MODE_ERTM)
1312 		return;
1313 
1314 	__clear_retrans_timer(chan);
1315 	__clear_monitor_timer(chan);
1316 	__clear_ack_timer(chan);
1317 
1318 	chan->retry_count = 0;
1319 	skb_queue_walk(&chan->tx_q, skb) {
1320 		if (bt_cb(skb)->l2cap.retries)
1321 			bt_cb(skb)->l2cap.retries = 1;
1322 		else
1323 			break;
1324 	}
1325 
1326 	chan->expected_tx_seq = chan->buffer_seq;
1327 
1328 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1329 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1330 	l2cap_seq_list_clear(&chan->retrans_list);
1331 	l2cap_seq_list_clear(&chan->srej_list);
1332 	skb_queue_purge(&chan->srej_q);
1333 
1334 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1335 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1336 
1337 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1338 }
1339 
l2cap_move_done(struct l2cap_chan * chan)1340 static void l2cap_move_done(struct l2cap_chan *chan)
1341 {
1342 	u8 move_role = chan->move_role;
1343 	BT_DBG("chan %p", chan);
1344 
1345 	chan->move_state = L2CAP_MOVE_STABLE;
1346 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1347 
1348 	if (chan->mode != L2CAP_MODE_ERTM)
1349 		return;
1350 
1351 	switch (move_role) {
1352 	case L2CAP_MOVE_ROLE_INITIATOR:
1353 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1354 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1355 		break;
1356 	case L2CAP_MOVE_ROLE_RESPONDER:
1357 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1358 		break;
1359 	}
1360 }
1361 
l2cap_chan_ready(struct l2cap_chan * chan)1362 static void l2cap_chan_ready(struct l2cap_chan *chan)
1363 {
1364 	/* The channel may have already been flagged as connected in
1365 	 * case of receiving data before the L2CAP info req/rsp
1366 	 * procedure is complete.
1367 	 */
1368 	if (chan->state == BT_CONNECTED)
1369 		return;
1370 
1371 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1372 	chan->conf_state = 0;
1373 	__clear_chan_timer(chan);
1374 
1375 	switch (chan->mode) {
1376 	case L2CAP_MODE_LE_FLOWCTL:
1377 	case L2CAP_MODE_EXT_FLOWCTL:
1378 		if (!chan->tx_credits)
1379 			chan->ops->suspend(chan);
1380 		break;
1381 	}
1382 
1383 	chan->state = BT_CONNECTED;
1384 
1385 	chan->ops->ready(chan);
1386 }
1387 
l2cap_le_connect(struct l2cap_chan * chan)1388 static void l2cap_le_connect(struct l2cap_chan *chan)
1389 {
1390 	struct l2cap_conn *conn = chan->conn;
1391 	struct l2cap_le_conn_req req;
1392 
1393 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1394 		return;
1395 
1396 	if (!chan->imtu)
1397 		chan->imtu = chan->conn->mtu;
1398 
1399 	l2cap_le_flowctl_init(chan, 0);
1400 
1401 	memset(&req, 0, sizeof(req));
1402 	req.psm     = chan->psm;
1403 	req.scid    = cpu_to_le16(chan->scid);
1404 	req.mtu     = cpu_to_le16(chan->imtu);
1405 	req.mps     = cpu_to_le16(chan->mps);
1406 	req.credits = cpu_to_le16(chan->rx_credits);
1407 
1408 	chan->ident = l2cap_get_ident(conn);
1409 
1410 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1411 		       sizeof(req), &req);
1412 }
1413 
1414 struct l2cap_ecred_conn_data {
1415 	struct {
1416 		struct l2cap_ecred_conn_req req;
1417 		__le16 scid[5];
1418 	} __packed pdu;
1419 	struct l2cap_chan *chan;
1420 	struct pid *pid;
1421 	int count;
1422 };
1423 
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1424 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1425 {
1426 	struct l2cap_ecred_conn_data *conn = data;
1427 	struct pid *pid;
1428 
1429 	if (chan == conn->chan)
1430 		return;
1431 
1432 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1433 		return;
1434 
1435 	pid = chan->ops->get_peer_pid(chan);
1436 
1437 	/* Only add deferred channels with the same PID/PSM */
1438 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1439 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1440 		return;
1441 
1442 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1443 		return;
1444 
1445 	l2cap_ecred_init(chan, 0);
1446 
1447 	/* Set the same ident so we can match on the rsp */
1448 	chan->ident = conn->chan->ident;
1449 
1450 	/* Include all channels deferred */
1451 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1452 
1453 	conn->count++;
1454 }
1455 
l2cap_ecred_connect(struct l2cap_chan * chan)1456 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1457 {
1458 	struct l2cap_conn *conn = chan->conn;
1459 	struct l2cap_ecred_conn_data data;
1460 
1461 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1462 		return;
1463 
1464 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1465 		return;
1466 
1467 	l2cap_ecred_init(chan, 0);
1468 
1469 	memset(&data, 0, sizeof(data));
1470 	data.pdu.req.psm     = chan->psm;
1471 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1472 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1473 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1474 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1475 
1476 	chan->ident = l2cap_get_ident(conn);
1477 
1478 	data.count = 1;
1479 	data.chan = chan;
1480 	data.pid = chan->ops->get_peer_pid(chan);
1481 
1482 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1483 
1484 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1485 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1486 		       &data.pdu);
1487 }
1488 
l2cap_le_start(struct l2cap_chan * chan)1489 static void l2cap_le_start(struct l2cap_chan *chan)
1490 {
1491 	struct l2cap_conn *conn = chan->conn;
1492 
1493 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1494 		return;
1495 
1496 	if (!chan->psm) {
1497 		l2cap_chan_ready(chan);
1498 		return;
1499 	}
1500 
1501 	if (chan->state == BT_CONNECT) {
1502 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1503 			l2cap_ecred_connect(chan);
1504 		else
1505 			l2cap_le_connect(chan);
1506 	}
1507 }
1508 
l2cap_start_connection(struct l2cap_chan * chan)1509 static void l2cap_start_connection(struct l2cap_chan *chan)
1510 {
1511 	if (__amp_capable(chan)) {
1512 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1513 		a2mp_discover_amp(chan);
1514 	} else if (chan->conn->hcon->type == LE_LINK) {
1515 		l2cap_le_start(chan);
1516 	} else {
1517 		l2cap_send_conn_req(chan);
1518 	}
1519 }
1520 
l2cap_request_info(struct l2cap_conn * conn)1521 static void l2cap_request_info(struct l2cap_conn *conn)
1522 {
1523 	struct l2cap_info_req req;
1524 
1525 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1526 		return;
1527 
1528 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1529 
1530 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1531 	conn->info_ident = l2cap_get_ident(conn);
1532 
1533 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1534 
1535 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1536 		       sizeof(req), &req);
1537 }
1538 
l2cap_check_enc_key_size(struct hci_conn * hcon)1539 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1540 {
1541 	/* The minimum encryption key size needs to be enforced by the
1542 	 * host stack before establishing any L2CAP connections. The
1543 	 * specification in theory allows a minimum of 1, but to align
1544 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1545 	 *
1546 	 * This check might also be called for unencrypted connections
1547 	 * that have no key size requirements. Ensure that the link is
1548 	 * actually encrypted before enforcing a key size.
1549 	 */
1550 	int min_key_size = hcon->hdev->min_enc_key_size;
1551 
1552 	/* On FIPS security level, key size must be 16 bytes */
1553 	if (hcon->sec_level == BT_SECURITY_FIPS)
1554 		min_key_size = 16;
1555 
1556 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1557 		hcon->enc_key_size >= min_key_size);
1558 }
1559 
l2cap_do_start(struct l2cap_chan * chan)1560 static void l2cap_do_start(struct l2cap_chan *chan)
1561 {
1562 	struct l2cap_conn *conn = chan->conn;
1563 
1564 	if (conn->hcon->type == LE_LINK) {
1565 		l2cap_le_start(chan);
1566 		return;
1567 	}
1568 
1569 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1570 		l2cap_request_info(conn);
1571 		return;
1572 	}
1573 
1574 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1575 		return;
1576 
1577 	if (!l2cap_chan_check_security(chan, true) ||
1578 	    !__l2cap_no_conn_pending(chan))
1579 		return;
1580 
1581 	if (l2cap_check_enc_key_size(conn->hcon))
1582 		l2cap_start_connection(chan);
1583 	else
1584 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1585 }
1586 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1587 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1588 {
1589 	u32 local_feat_mask = l2cap_feat_mask;
1590 	if (!disable_ertm)
1591 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1592 
1593 	switch (mode) {
1594 	case L2CAP_MODE_ERTM:
1595 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1596 	case L2CAP_MODE_STREAMING:
1597 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1598 	default:
1599 		return 0x00;
1600 	}
1601 }
1602 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1603 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1604 {
1605 	struct l2cap_conn *conn = chan->conn;
1606 	struct l2cap_disconn_req req;
1607 
1608 	if (!conn)
1609 		return;
1610 
1611 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1612 		__clear_retrans_timer(chan);
1613 		__clear_monitor_timer(chan);
1614 		__clear_ack_timer(chan);
1615 	}
1616 
1617 	if (chan->scid == L2CAP_CID_A2MP) {
1618 		l2cap_state_change(chan, BT_DISCONN);
1619 		return;
1620 	}
1621 
1622 	req.dcid = cpu_to_le16(chan->dcid);
1623 	req.scid = cpu_to_le16(chan->scid);
1624 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1625 		       sizeof(req), &req);
1626 
1627 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1628 }
1629 
1630 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1631 static void l2cap_conn_start(struct l2cap_conn *conn)
1632 {
1633 	struct l2cap_chan *chan, *tmp;
1634 
1635 	BT_DBG("conn %p", conn);
1636 
1637 	mutex_lock(&conn->chan_lock);
1638 
1639 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1640 		l2cap_chan_lock(chan);
1641 
1642 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1643 			l2cap_chan_ready(chan);
1644 			l2cap_chan_unlock(chan);
1645 			continue;
1646 		}
1647 
1648 		if (chan->state == BT_CONNECT) {
1649 			if (!l2cap_chan_check_security(chan, true) ||
1650 			    !__l2cap_no_conn_pending(chan)) {
1651 				l2cap_chan_unlock(chan);
1652 				continue;
1653 			}
1654 
1655 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1656 			    && test_bit(CONF_STATE2_DEVICE,
1657 					&chan->conf_state)) {
1658 				l2cap_chan_close(chan, ECONNRESET);
1659 				l2cap_chan_unlock(chan);
1660 				continue;
1661 			}
1662 
1663 			if (l2cap_check_enc_key_size(conn->hcon))
1664 				l2cap_start_connection(chan);
1665 			else
1666 				l2cap_chan_close(chan, ECONNREFUSED);
1667 
1668 		} else if (chan->state == BT_CONNECT2) {
1669 			struct l2cap_conn_rsp rsp;
1670 			char buf[128];
1671 			rsp.scid = cpu_to_le16(chan->dcid);
1672 			rsp.dcid = cpu_to_le16(chan->scid);
1673 
1674 			if (l2cap_chan_check_security(chan, false)) {
1675 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1676 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1677 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1678 					chan->ops->defer(chan);
1679 
1680 				} else {
1681 					l2cap_state_change(chan, BT_CONFIG);
1682 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1683 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1684 				}
1685 			} else {
1686 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1687 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1688 			}
1689 
1690 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1691 				       sizeof(rsp), &rsp);
1692 
1693 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1694 			    rsp.result != L2CAP_CR_SUCCESS) {
1695 				l2cap_chan_unlock(chan);
1696 				continue;
1697 			}
1698 
1699 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1700 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1701 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1702 			chan->num_conf_req++;
1703 		}
1704 
1705 		l2cap_chan_unlock(chan);
1706 	}
1707 
1708 	mutex_unlock(&conn->chan_lock);
1709 }
1710 
l2cap_le_conn_ready(struct l2cap_conn * conn)1711 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1712 {
1713 	struct hci_conn *hcon = conn->hcon;
1714 	struct hci_dev *hdev = hcon->hdev;
1715 
1716 	BT_DBG("%s conn %p", hdev->name, conn);
1717 
1718 	/* For outgoing pairing which doesn't necessarily have an
1719 	 * associated socket (e.g. mgmt_pair_device).
1720 	 */
1721 	if (hcon->out)
1722 		smp_conn_security(hcon, hcon->pending_sec_level);
1723 
1724 	/* For LE peripheral connections, make sure the connection interval
1725 	 * is in the range of the minimum and maximum interval that has
1726 	 * been configured for this connection. If not, then trigger
1727 	 * the connection update procedure.
1728 	 */
1729 	if (hcon->role == HCI_ROLE_SLAVE &&
1730 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1731 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1732 		struct l2cap_conn_param_update_req req;
1733 
1734 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1735 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1736 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1737 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1738 
1739 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1740 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1741 	}
1742 }
1743 
l2cap_conn_ready(struct l2cap_conn * conn)1744 static void l2cap_conn_ready(struct l2cap_conn *conn)
1745 {
1746 	struct l2cap_chan *chan;
1747 	struct hci_conn *hcon = conn->hcon;
1748 
1749 	BT_DBG("conn %p", conn);
1750 
1751 	if (hcon->type == ACL_LINK)
1752 		l2cap_request_info(conn);
1753 
1754 	mutex_lock(&conn->chan_lock);
1755 
1756 	list_for_each_entry(chan, &conn->chan_l, list) {
1757 
1758 		l2cap_chan_lock(chan);
1759 
1760 		if (chan->scid == L2CAP_CID_A2MP) {
1761 			l2cap_chan_unlock(chan);
1762 			continue;
1763 		}
1764 
1765 		if (hcon->type == LE_LINK) {
1766 			l2cap_le_start(chan);
1767 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1768 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1769 				l2cap_chan_ready(chan);
1770 		} else if (chan->state == BT_CONNECT) {
1771 			l2cap_do_start(chan);
1772 		}
1773 
1774 		l2cap_chan_unlock(chan);
1775 	}
1776 
1777 	mutex_unlock(&conn->chan_lock);
1778 
1779 	if (hcon->type == LE_LINK)
1780 		l2cap_le_conn_ready(conn);
1781 
1782 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1783 }
1784 
1785 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1786 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1787 {
1788 	struct l2cap_chan *chan;
1789 
1790 	BT_DBG("conn %p", conn);
1791 
1792 	mutex_lock(&conn->chan_lock);
1793 
1794 	list_for_each_entry(chan, &conn->chan_l, list) {
1795 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1796 			l2cap_chan_set_err(chan, err);
1797 	}
1798 
1799 	mutex_unlock(&conn->chan_lock);
1800 }
1801 
l2cap_info_timeout(struct work_struct * work)1802 static void l2cap_info_timeout(struct work_struct *work)
1803 {
1804 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1805 					       info_timer.work);
1806 
1807 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1808 	conn->info_ident = 0;
1809 
1810 	l2cap_conn_start(conn);
1811 }
1812 
1813 /*
1814  * l2cap_user
1815  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1816  * callback is called during registration. The ->remove callback is called
1817  * during unregistration.
1818  * An l2cap_user object can either be explicitly unregistered or when the
1819  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1820  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1821  * External modules must own a reference to the l2cap_conn object if they intend
1822  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1823  * any time if they don't.
1824  */
1825 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1826 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1827 {
1828 	struct hci_dev *hdev = conn->hcon->hdev;
1829 	int ret;
1830 
1831 	/* We need to check whether l2cap_conn is registered. If it is not, we
1832 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1833 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1834 	 * relies on the parent hci_conn object to be locked. This itself relies
1835 	 * on the hci_dev object to be locked. So we must lock the hci device
1836 	 * here, too. */
1837 
1838 	hci_dev_lock(hdev);
1839 
1840 	if (!list_empty(&user->list)) {
1841 		ret = -EINVAL;
1842 		goto out_unlock;
1843 	}
1844 
1845 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1846 	if (!conn->hchan) {
1847 		ret = -ENODEV;
1848 		goto out_unlock;
1849 	}
1850 
1851 	ret = user->probe(conn, user);
1852 	if (ret)
1853 		goto out_unlock;
1854 
1855 	list_add(&user->list, &conn->users);
1856 	ret = 0;
1857 
1858 out_unlock:
1859 	hci_dev_unlock(hdev);
1860 	return ret;
1861 }
1862 EXPORT_SYMBOL(l2cap_register_user);
1863 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1864 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1865 {
1866 	struct hci_dev *hdev = conn->hcon->hdev;
1867 
1868 	hci_dev_lock(hdev);
1869 
1870 	if (list_empty(&user->list))
1871 		goto out_unlock;
1872 
1873 	list_del_init(&user->list);
1874 	user->remove(conn, user);
1875 
1876 out_unlock:
1877 	hci_dev_unlock(hdev);
1878 }
1879 EXPORT_SYMBOL(l2cap_unregister_user);
1880 
l2cap_unregister_all_users(struct l2cap_conn * conn)1881 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1882 {
1883 	struct l2cap_user *user;
1884 
1885 	while (!list_empty(&conn->users)) {
1886 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1887 		list_del_init(&user->list);
1888 		user->remove(conn, user);
1889 	}
1890 }
1891 
l2cap_conn_del(struct hci_conn * hcon,int err)1892 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1893 {
1894 	struct l2cap_conn *conn = hcon->l2cap_data;
1895 	struct l2cap_chan *chan, *l;
1896 
1897 	if (!conn)
1898 		return;
1899 
1900 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1901 
1902 	kfree_skb(conn->rx_skb);
1903 
1904 	skb_queue_purge(&conn->pending_rx);
1905 
1906 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1907 	 * might block if we are running on a worker from the same workqueue
1908 	 * pending_rx_work is waiting on.
1909 	 */
1910 	if (work_pending(&conn->pending_rx_work))
1911 		cancel_work_sync(&conn->pending_rx_work);
1912 
1913 	if (work_pending(&conn->id_addr_update_work))
1914 		cancel_work_sync(&conn->id_addr_update_work);
1915 
1916 	l2cap_unregister_all_users(conn);
1917 
1918 	/* Force the connection to be immediately dropped */
1919 	hcon->disc_timeout = 0;
1920 
1921 	mutex_lock(&conn->chan_lock);
1922 
1923 	/* Kill channels */
1924 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1925 		l2cap_chan_hold(chan);
1926 		l2cap_chan_lock(chan);
1927 
1928 		l2cap_chan_del(chan, err);
1929 
1930 		chan->ops->close(chan);
1931 
1932 		l2cap_chan_unlock(chan);
1933 		l2cap_chan_put(chan);
1934 	}
1935 
1936 	mutex_unlock(&conn->chan_lock);
1937 
1938 	hci_chan_del(conn->hchan);
1939 
1940 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1941 		cancel_delayed_work_sync(&conn->info_timer);
1942 
1943 	hcon->l2cap_data = NULL;
1944 	conn->hchan = NULL;
1945 	l2cap_conn_put(conn);
1946 }
1947 
l2cap_conn_free(struct kref * ref)1948 static void l2cap_conn_free(struct kref *ref)
1949 {
1950 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1951 
1952 	hci_conn_put(conn->hcon);
1953 	kfree(conn);
1954 }
1955 
l2cap_conn_get(struct l2cap_conn * conn)1956 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1957 {
1958 	kref_get(&conn->ref);
1959 	return conn;
1960 }
1961 EXPORT_SYMBOL(l2cap_conn_get);
1962 
l2cap_conn_put(struct l2cap_conn * conn)1963 void l2cap_conn_put(struct l2cap_conn *conn)
1964 {
1965 	kref_put(&conn->ref, l2cap_conn_free);
1966 }
1967 EXPORT_SYMBOL(l2cap_conn_put);
1968 
1969 /* ---- Socket interface ---- */
1970 
1971 /* Find socket with psm and source / destination bdaddr.
1972  * Returns closest match.
1973  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1974 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1975 						   bdaddr_t *src,
1976 						   bdaddr_t *dst,
1977 						   u8 link_type)
1978 {
1979 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1980 
1981 	read_lock(&chan_list_lock);
1982 
1983 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1984 		if (state && c->state != state)
1985 			continue;
1986 
1987 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1988 			continue;
1989 
1990 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1991 			continue;
1992 
1993 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1994 			int src_match, dst_match;
1995 			int src_any, dst_any;
1996 
1997 			/* Exact match. */
1998 			src_match = !bacmp(&c->src, src);
1999 			dst_match = !bacmp(&c->dst, dst);
2000 			if (src_match && dst_match) {
2001 				if (!l2cap_chan_hold_unless_zero(c))
2002 					continue;
2003 
2004 				read_unlock(&chan_list_lock);
2005 				return c;
2006 			}
2007 
2008 			/* Closest match */
2009 			src_any = !bacmp(&c->src, BDADDR_ANY);
2010 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
2011 			if ((src_match && dst_any) || (src_any && dst_match) ||
2012 			    (src_any && dst_any))
2013 				c1 = c;
2014 		}
2015 	}
2016 
2017 	if (c1)
2018 		c1 = l2cap_chan_hold_unless_zero(c1);
2019 
2020 	read_unlock(&chan_list_lock);
2021 
2022 	return c1;
2023 }
2024 
l2cap_monitor_timeout(struct work_struct * work)2025 static void l2cap_monitor_timeout(struct work_struct *work)
2026 {
2027 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2028 					       monitor_timer.work);
2029 
2030 	BT_DBG("chan %p", chan);
2031 
2032 	l2cap_chan_lock(chan);
2033 
2034 	if (!chan->conn) {
2035 		l2cap_chan_unlock(chan);
2036 		l2cap_chan_put(chan);
2037 		return;
2038 	}
2039 
2040 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2041 
2042 	l2cap_chan_unlock(chan);
2043 	l2cap_chan_put(chan);
2044 }
2045 
l2cap_retrans_timeout(struct work_struct * work)2046 static void l2cap_retrans_timeout(struct work_struct *work)
2047 {
2048 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2049 					       retrans_timer.work);
2050 
2051 	BT_DBG("chan %p", chan);
2052 
2053 	l2cap_chan_lock(chan);
2054 
2055 	if (!chan->conn) {
2056 		l2cap_chan_unlock(chan);
2057 		l2cap_chan_put(chan);
2058 		return;
2059 	}
2060 
2061 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2062 	l2cap_chan_unlock(chan);
2063 	l2cap_chan_put(chan);
2064 }
2065 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)2066 static void l2cap_streaming_send(struct l2cap_chan *chan,
2067 				 struct sk_buff_head *skbs)
2068 {
2069 	struct sk_buff *skb;
2070 	struct l2cap_ctrl *control;
2071 
2072 	BT_DBG("chan %p, skbs %p", chan, skbs);
2073 
2074 	if (__chan_is_moving(chan))
2075 		return;
2076 
2077 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2078 
2079 	while (!skb_queue_empty(&chan->tx_q)) {
2080 
2081 		skb = skb_dequeue(&chan->tx_q);
2082 
2083 		bt_cb(skb)->l2cap.retries = 1;
2084 		control = &bt_cb(skb)->l2cap;
2085 
2086 		control->reqseq = 0;
2087 		control->txseq = chan->next_tx_seq;
2088 
2089 		__pack_control(chan, control, skb);
2090 
2091 		if (chan->fcs == L2CAP_FCS_CRC16) {
2092 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2093 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2094 		}
2095 
2096 		l2cap_do_send(chan, skb);
2097 
2098 		BT_DBG("Sent txseq %u", control->txseq);
2099 
2100 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2101 		chan->frames_sent++;
2102 	}
2103 }
2104 
l2cap_ertm_send(struct l2cap_chan * chan)2105 static int l2cap_ertm_send(struct l2cap_chan *chan)
2106 {
2107 	struct sk_buff *skb, *tx_skb;
2108 	struct l2cap_ctrl *control;
2109 	int sent = 0;
2110 
2111 	BT_DBG("chan %p", chan);
2112 
2113 	if (chan->state != BT_CONNECTED)
2114 		return -ENOTCONN;
2115 
2116 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2117 		return 0;
2118 
2119 	if (__chan_is_moving(chan))
2120 		return 0;
2121 
2122 	while (chan->tx_send_head &&
2123 	       chan->unacked_frames < chan->remote_tx_win &&
2124 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2125 
2126 		skb = chan->tx_send_head;
2127 
2128 		bt_cb(skb)->l2cap.retries = 1;
2129 		control = &bt_cb(skb)->l2cap;
2130 
2131 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2132 			control->final = 1;
2133 
2134 		control->reqseq = chan->buffer_seq;
2135 		chan->last_acked_seq = chan->buffer_seq;
2136 		control->txseq = chan->next_tx_seq;
2137 
2138 		__pack_control(chan, control, skb);
2139 
2140 		if (chan->fcs == L2CAP_FCS_CRC16) {
2141 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2142 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2143 		}
2144 
2145 		/* Clone after data has been modified. Data is assumed to be
2146 		   read-only (for locking purposes) on cloned sk_buffs.
2147 		 */
2148 		tx_skb = skb_clone(skb, GFP_KERNEL);
2149 
2150 		if (!tx_skb)
2151 			break;
2152 
2153 		__set_retrans_timer(chan);
2154 
2155 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2156 		chan->unacked_frames++;
2157 		chan->frames_sent++;
2158 		sent++;
2159 
2160 		if (skb_queue_is_last(&chan->tx_q, skb))
2161 			chan->tx_send_head = NULL;
2162 		else
2163 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2164 
2165 		l2cap_do_send(chan, tx_skb);
2166 		BT_DBG("Sent txseq %u", control->txseq);
2167 	}
2168 
2169 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2170 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2171 
2172 	return sent;
2173 }
2174 
l2cap_ertm_resend(struct l2cap_chan * chan)2175 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2176 {
2177 	struct l2cap_ctrl control;
2178 	struct sk_buff *skb;
2179 	struct sk_buff *tx_skb;
2180 	u16 seq;
2181 
2182 	BT_DBG("chan %p", chan);
2183 
2184 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2185 		return;
2186 
2187 	if (__chan_is_moving(chan))
2188 		return;
2189 
2190 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2191 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2192 
2193 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2194 		if (!skb) {
2195 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2196 			       seq);
2197 			continue;
2198 		}
2199 
2200 		bt_cb(skb)->l2cap.retries++;
2201 		control = bt_cb(skb)->l2cap;
2202 
2203 		if (chan->max_tx != 0 &&
2204 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2205 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2206 			l2cap_send_disconn_req(chan, ECONNRESET);
2207 			l2cap_seq_list_clear(&chan->retrans_list);
2208 			break;
2209 		}
2210 
2211 		control.reqseq = chan->buffer_seq;
2212 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2213 			control.final = 1;
2214 		else
2215 			control.final = 0;
2216 
2217 		if (skb_cloned(skb)) {
2218 			/* Cloned sk_buffs are read-only, so we need a
2219 			 * writeable copy
2220 			 */
2221 			tx_skb = skb_copy(skb, GFP_KERNEL);
2222 		} else {
2223 			tx_skb = skb_clone(skb, GFP_KERNEL);
2224 		}
2225 
2226 		if (!tx_skb) {
2227 			l2cap_seq_list_clear(&chan->retrans_list);
2228 			break;
2229 		}
2230 
2231 		/* Update skb contents */
2232 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2233 			put_unaligned_le32(__pack_extended_control(&control),
2234 					   tx_skb->data + L2CAP_HDR_SIZE);
2235 		} else {
2236 			put_unaligned_le16(__pack_enhanced_control(&control),
2237 					   tx_skb->data + L2CAP_HDR_SIZE);
2238 		}
2239 
2240 		/* Update FCS */
2241 		if (chan->fcs == L2CAP_FCS_CRC16) {
2242 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2243 					tx_skb->len - L2CAP_FCS_SIZE);
2244 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2245 						L2CAP_FCS_SIZE);
2246 		}
2247 
2248 		l2cap_do_send(chan, tx_skb);
2249 
2250 		BT_DBG("Resent txseq %d", control.txseq);
2251 
2252 		chan->last_acked_seq = chan->buffer_seq;
2253 	}
2254 }
2255 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2256 static void l2cap_retransmit(struct l2cap_chan *chan,
2257 			     struct l2cap_ctrl *control)
2258 {
2259 	BT_DBG("chan %p, control %p", chan, control);
2260 
2261 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2262 	l2cap_ertm_resend(chan);
2263 }
2264 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2265 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2266 				 struct l2cap_ctrl *control)
2267 {
2268 	struct sk_buff *skb;
2269 
2270 	BT_DBG("chan %p, control %p", chan, control);
2271 
2272 	if (control->poll)
2273 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2274 
2275 	l2cap_seq_list_clear(&chan->retrans_list);
2276 
2277 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2278 		return;
2279 
2280 	if (chan->unacked_frames) {
2281 		skb_queue_walk(&chan->tx_q, skb) {
2282 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2283 			    skb == chan->tx_send_head)
2284 				break;
2285 		}
2286 
2287 		skb_queue_walk_from(&chan->tx_q, skb) {
2288 			if (skb == chan->tx_send_head)
2289 				break;
2290 
2291 			l2cap_seq_list_append(&chan->retrans_list,
2292 					      bt_cb(skb)->l2cap.txseq);
2293 		}
2294 
2295 		l2cap_ertm_resend(chan);
2296 	}
2297 }
2298 
l2cap_send_ack(struct l2cap_chan * chan)2299 static void l2cap_send_ack(struct l2cap_chan *chan)
2300 {
2301 	struct l2cap_ctrl control;
2302 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2303 					 chan->last_acked_seq);
2304 	int threshold;
2305 
2306 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2307 	       chan, chan->last_acked_seq, chan->buffer_seq);
2308 
2309 	memset(&control, 0, sizeof(control));
2310 	control.sframe = 1;
2311 
2312 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2313 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2314 		__clear_ack_timer(chan);
2315 		control.super = L2CAP_SUPER_RNR;
2316 		control.reqseq = chan->buffer_seq;
2317 		l2cap_send_sframe(chan, &control);
2318 	} else {
2319 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2320 			l2cap_ertm_send(chan);
2321 			/* If any i-frames were sent, they included an ack */
2322 			if (chan->buffer_seq == chan->last_acked_seq)
2323 				frames_to_ack = 0;
2324 		}
2325 
2326 		/* Ack now if the window is 3/4ths full.
2327 		 * Calculate without mul or div
2328 		 */
2329 		threshold = chan->ack_win;
2330 		threshold += threshold << 1;
2331 		threshold >>= 2;
2332 
2333 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2334 		       threshold);
2335 
2336 		if (frames_to_ack >= threshold) {
2337 			__clear_ack_timer(chan);
2338 			control.super = L2CAP_SUPER_RR;
2339 			control.reqseq = chan->buffer_seq;
2340 			l2cap_send_sframe(chan, &control);
2341 			frames_to_ack = 0;
2342 		}
2343 
2344 		if (frames_to_ack)
2345 			__set_ack_timer(chan);
2346 	}
2347 }
2348 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2349 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2350 					 struct msghdr *msg, int len,
2351 					 int count, struct sk_buff *skb)
2352 {
2353 	struct l2cap_conn *conn = chan->conn;
2354 	struct sk_buff **frag;
2355 	int sent = 0;
2356 
2357 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2358 		return -EFAULT;
2359 
2360 	sent += count;
2361 	len  -= count;
2362 
2363 	/* Continuation fragments (no L2CAP header) */
2364 	frag = &skb_shinfo(skb)->frag_list;
2365 	while (len) {
2366 		struct sk_buff *tmp;
2367 
2368 		count = min_t(unsigned int, conn->mtu, len);
2369 
2370 		tmp = chan->ops->alloc_skb(chan, 0, count,
2371 					   msg->msg_flags & MSG_DONTWAIT);
2372 		if (IS_ERR(tmp))
2373 			return PTR_ERR(tmp);
2374 
2375 		*frag = tmp;
2376 
2377 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2378 				   &msg->msg_iter))
2379 			return -EFAULT;
2380 
2381 		sent += count;
2382 		len  -= count;
2383 
2384 		skb->len += (*frag)->len;
2385 		skb->data_len += (*frag)->len;
2386 
2387 		frag = &(*frag)->next;
2388 	}
2389 
2390 	return sent;
2391 }
2392 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2393 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2394 						 struct msghdr *msg, size_t len)
2395 {
2396 	struct l2cap_conn *conn = chan->conn;
2397 	struct sk_buff *skb;
2398 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2399 	struct l2cap_hdr *lh;
2400 
2401 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2402 	       __le16_to_cpu(chan->psm), len);
2403 
2404 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2405 
2406 	skb = chan->ops->alloc_skb(chan, hlen, count,
2407 				   msg->msg_flags & MSG_DONTWAIT);
2408 	if (IS_ERR(skb))
2409 		return skb;
2410 
2411 	/* Create L2CAP header */
2412 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2413 	lh->cid = cpu_to_le16(chan->dcid);
2414 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2415 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2416 
2417 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2418 	if (unlikely(err < 0)) {
2419 		kfree_skb(skb);
2420 		return ERR_PTR(err);
2421 	}
2422 	return skb;
2423 }
2424 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2425 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2426 					      struct msghdr *msg, size_t len)
2427 {
2428 	struct l2cap_conn *conn = chan->conn;
2429 	struct sk_buff *skb;
2430 	int err, count;
2431 	struct l2cap_hdr *lh;
2432 
2433 	BT_DBG("chan %p len %zu", chan, len);
2434 
2435 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2436 
2437 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2438 				   msg->msg_flags & MSG_DONTWAIT);
2439 	if (IS_ERR(skb))
2440 		return skb;
2441 
2442 	/* Create L2CAP header */
2443 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2444 	lh->cid = cpu_to_le16(chan->dcid);
2445 	lh->len = cpu_to_le16(len);
2446 
2447 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2448 	if (unlikely(err < 0)) {
2449 		kfree_skb(skb);
2450 		return ERR_PTR(err);
2451 	}
2452 	return skb;
2453 }
2454 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2455 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2456 					       struct msghdr *msg, size_t len,
2457 					       u16 sdulen)
2458 {
2459 	struct l2cap_conn *conn = chan->conn;
2460 	struct sk_buff *skb;
2461 	int err, count, hlen;
2462 	struct l2cap_hdr *lh;
2463 
2464 	BT_DBG("chan %p len %zu", chan, len);
2465 
2466 	if (!conn)
2467 		return ERR_PTR(-ENOTCONN);
2468 
2469 	hlen = __ertm_hdr_size(chan);
2470 
2471 	if (sdulen)
2472 		hlen += L2CAP_SDULEN_SIZE;
2473 
2474 	if (chan->fcs == L2CAP_FCS_CRC16)
2475 		hlen += L2CAP_FCS_SIZE;
2476 
2477 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2478 
2479 	skb = chan->ops->alloc_skb(chan, hlen, count,
2480 				   msg->msg_flags & MSG_DONTWAIT);
2481 	if (IS_ERR(skb))
2482 		return skb;
2483 
2484 	/* Create L2CAP header */
2485 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2486 	lh->cid = cpu_to_le16(chan->dcid);
2487 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2488 
2489 	/* Control header is populated later */
2490 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2491 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2492 	else
2493 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2494 
2495 	if (sdulen)
2496 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2497 
2498 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2499 	if (unlikely(err < 0)) {
2500 		kfree_skb(skb);
2501 		return ERR_PTR(err);
2502 	}
2503 
2504 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2505 	bt_cb(skb)->l2cap.retries = 0;
2506 	return skb;
2507 }
2508 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2509 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2510 			     struct sk_buff_head *seg_queue,
2511 			     struct msghdr *msg, size_t len)
2512 {
2513 	struct sk_buff *skb;
2514 	u16 sdu_len;
2515 	size_t pdu_len;
2516 	u8 sar;
2517 
2518 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2519 
2520 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2521 	 * so fragmented skbs are not used.  The HCI layer's handling
2522 	 * of fragmented skbs is not compatible with ERTM's queueing.
2523 	 */
2524 
2525 	/* PDU size is derived from the HCI MTU */
2526 	pdu_len = chan->conn->mtu;
2527 
2528 	/* Constrain PDU size for BR/EDR connections */
2529 	if (!chan->hs_hcon)
2530 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2531 
2532 	/* Adjust for largest possible L2CAP overhead. */
2533 	if (chan->fcs)
2534 		pdu_len -= L2CAP_FCS_SIZE;
2535 
2536 	pdu_len -= __ertm_hdr_size(chan);
2537 
2538 	/* Remote device may have requested smaller PDUs */
2539 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2540 
2541 	if (len <= pdu_len) {
2542 		sar = L2CAP_SAR_UNSEGMENTED;
2543 		sdu_len = 0;
2544 		pdu_len = len;
2545 	} else {
2546 		sar = L2CAP_SAR_START;
2547 		sdu_len = len;
2548 	}
2549 
2550 	while (len > 0) {
2551 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2552 
2553 		if (IS_ERR(skb)) {
2554 			__skb_queue_purge(seg_queue);
2555 			return PTR_ERR(skb);
2556 		}
2557 
2558 		bt_cb(skb)->l2cap.sar = sar;
2559 		__skb_queue_tail(seg_queue, skb);
2560 
2561 		len -= pdu_len;
2562 		if (sdu_len)
2563 			sdu_len = 0;
2564 
2565 		if (len <= pdu_len) {
2566 			sar = L2CAP_SAR_END;
2567 			pdu_len = len;
2568 		} else {
2569 			sar = L2CAP_SAR_CONTINUE;
2570 		}
2571 	}
2572 
2573 	return 0;
2574 }
2575 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2576 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2577 						   struct msghdr *msg,
2578 						   size_t len, u16 sdulen)
2579 {
2580 	struct l2cap_conn *conn = chan->conn;
2581 	struct sk_buff *skb;
2582 	int err, count, hlen;
2583 	struct l2cap_hdr *lh;
2584 
2585 	BT_DBG("chan %p len %zu", chan, len);
2586 
2587 	if (!conn)
2588 		return ERR_PTR(-ENOTCONN);
2589 
2590 	hlen = L2CAP_HDR_SIZE;
2591 
2592 	if (sdulen)
2593 		hlen += L2CAP_SDULEN_SIZE;
2594 
2595 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2596 
2597 	skb = chan->ops->alloc_skb(chan, hlen, count,
2598 				   msg->msg_flags & MSG_DONTWAIT);
2599 	if (IS_ERR(skb))
2600 		return skb;
2601 
2602 	/* Create L2CAP header */
2603 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2604 	lh->cid = cpu_to_le16(chan->dcid);
2605 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2606 
2607 	if (sdulen)
2608 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2609 
2610 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2611 	if (unlikely(err < 0)) {
2612 		kfree_skb(skb);
2613 		return ERR_PTR(err);
2614 	}
2615 
2616 	return skb;
2617 }
2618 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2619 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2620 				struct sk_buff_head *seg_queue,
2621 				struct msghdr *msg, size_t len)
2622 {
2623 	struct sk_buff *skb;
2624 	size_t pdu_len;
2625 	u16 sdu_len;
2626 
2627 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2628 
2629 	sdu_len = len;
2630 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2631 
2632 	while (len > 0) {
2633 		if (len <= pdu_len)
2634 			pdu_len = len;
2635 
2636 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2637 		if (IS_ERR(skb)) {
2638 			__skb_queue_purge(seg_queue);
2639 			return PTR_ERR(skb);
2640 		}
2641 
2642 		__skb_queue_tail(seg_queue, skb);
2643 
2644 		len -= pdu_len;
2645 
2646 		if (sdu_len) {
2647 			sdu_len = 0;
2648 			pdu_len += L2CAP_SDULEN_SIZE;
2649 		}
2650 	}
2651 
2652 	return 0;
2653 }
2654 
l2cap_le_flowctl_send(struct l2cap_chan * chan)2655 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2656 {
2657 	int sent = 0;
2658 
2659 	BT_DBG("chan %p", chan);
2660 
2661 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2662 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2663 		chan->tx_credits--;
2664 		sent++;
2665 	}
2666 
2667 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2668 	       skb_queue_len(&chan->tx_q));
2669 }
2670 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2671 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2672 {
2673 	struct sk_buff *skb;
2674 	int err;
2675 	struct sk_buff_head seg_queue;
2676 
2677 	if (!chan->conn)
2678 		return -ENOTCONN;
2679 
2680 	/* Connectionless channel */
2681 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2682 		skb = l2cap_create_connless_pdu(chan, msg, len);
2683 		if (IS_ERR(skb))
2684 			return PTR_ERR(skb);
2685 
2686 		/* Channel lock is released before requesting new skb and then
2687 		 * reacquired thus we need to recheck channel state.
2688 		 */
2689 		if (chan->state != BT_CONNECTED) {
2690 			kfree_skb(skb);
2691 			return -ENOTCONN;
2692 		}
2693 
2694 		l2cap_do_send(chan, skb);
2695 		return len;
2696 	}
2697 
2698 	switch (chan->mode) {
2699 	case L2CAP_MODE_LE_FLOWCTL:
2700 	case L2CAP_MODE_EXT_FLOWCTL:
2701 		/* Check outgoing MTU */
2702 		if (len > chan->omtu)
2703 			return -EMSGSIZE;
2704 
2705 		__skb_queue_head_init(&seg_queue);
2706 
2707 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2708 
2709 		if (chan->state != BT_CONNECTED) {
2710 			__skb_queue_purge(&seg_queue);
2711 			err = -ENOTCONN;
2712 		}
2713 
2714 		if (err)
2715 			return err;
2716 
2717 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2718 
2719 		l2cap_le_flowctl_send(chan);
2720 
2721 		if (!chan->tx_credits)
2722 			chan->ops->suspend(chan);
2723 
2724 		err = len;
2725 
2726 		break;
2727 
2728 	case L2CAP_MODE_BASIC:
2729 		/* Check outgoing MTU */
2730 		if (len > chan->omtu)
2731 			return -EMSGSIZE;
2732 
2733 		/* Create a basic PDU */
2734 		skb = l2cap_create_basic_pdu(chan, msg, len);
2735 		if (IS_ERR(skb))
2736 			return PTR_ERR(skb);
2737 
2738 		/* Channel lock is released before requesting new skb and then
2739 		 * reacquired thus we need to recheck channel state.
2740 		 */
2741 		if (chan->state != BT_CONNECTED) {
2742 			kfree_skb(skb);
2743 			return -ENOTCONN;
2744 		}
2745 
2746 		l2cap_do_send(chan, skb);
2747 		err = len;
2748 		break;
2749 
2750 	case L2CAP_MODE_ERTM:
2751 	case L2CAP_MODE_STREAMING:
2752 		/* Check outgoing MTU */
2753 		if (len > chan->omtu) {
2754 			err = -EMSGSIZE;
2755 			break;
2756 		}
2757 
2758 		__skb_queue_head_init(&seg_queue);
2759 
2760 		/* Do segmentation before calling in to the state machine,
2761 		 * since it's possible to block while waiting for memory
2762 		 * allocation.
2763 		 */
2764 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2765 
2766 		/* The channel could have been closed while segmenting,
2767 		 * check that it is still connected.
2768 		 */
2769 		if (chan->state != BT_CONNECTED) {
2770 			__skb_queue_purge(&seg_queue);
2771 			err = -ENOTCONN;
2772 		}
2773 
2774 		if (err)
2775 			break;
2776 
2777 		if (chan->mode == L2CAP_MODE_ERTM)
2778 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2779 		else
2780 			l2cap_streaming_send(chan, &seg_queue);
2781 
2782 		err = len;
2783 
2784 		/* If the skbs were not queued for sending, they'll still be in
2785 		 * seg_queue and need to be purged.
2786 		 */
2787 		__skb_queue_purge(&seg_queue);
2788 		break;
2789 
2790 	default:
2791 		BT_DBG("bad state %1.1x", chan->mode);
2792 		err = -EBADFD;
2793 	}
2794 
2795 	return err;
2796 }
2797 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2798 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2799 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2800 {
2801 	struct l2cap_ctrl control;
2802 	u16 seq;
2803 
2804 	BT_DBG("chan %p, txseq %u", chan, txseq);
2805 
2806 	memset(&control, 0, sizeof(control));
2807 	control.sframe = 1;
2808 	control.super = L2CAP_SUPER_SREJ;
2809 
2810 	for (seq = chan->expected_tx_seq; seq != txseq;
2811 	     seq = __next_seq(chan, seq)) {
2812 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2813 			control.reqseq = seq;
2814 			l2cap_send_sframe(chan, &control);
2815 			l2cap_seq_list_append(&chan->srej_list, seq);
2816 		}
2817 	}
2818 
2819 	chan->expected_tx_seq = __next_seq(chan, txseq);
2820 }
2821 
l2cap_send_srej_tail(struct l2cap_chan * chan)2822 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2823 {
2824 	struct l2cap_ctrl control;
2825 
2826 	BT_DBG("chan %p", chan);
2827 
2828 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2829 		return;
2830 
2831 	memset(&control, 0, sizeof(control));
2832 	control.sframe = 1;
2833 	control.super = L2CAP_SUPER_SREJ;
2834 	control.reqseq = chan->srej_list.tail;
2835 	l2cap_send_sframe(chan, &control);
2836 }
2837 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2838 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2839 {
2840 	struct l2cap_ctrl control;
2841 	u16 initial_head;
2842 	u16 seq;
2843 
2844 	BT_DBG("chan %p, txseq %u", chan, txseq);
2845 
2846 	memset(&control, 0, sizeof(control));
2847 	control.sframe = 1;
2848 	control.super = L2CAP_SUPER_SREJ;
2849 
2850 	/* Capture initial list head to allow only one pass through the list. */
2851 	initial_head = chan->srej_list.head;
2852 
2853 	do {
2854 		seq = l2cap_seq_list_pop(&chan->srej_list);
2855 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2856 			break;
2857 
2858 		control.reqseq = seq;
2859 		l2cap_send_sframe(chan, &control);
2860 		l2cap_seq_list_append(&chan->srej_list, seq);
2861 	} while (chan->srej_list.head != initial_head);
2862 }
2863 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2864 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2865 {
2866 	struct sk_buff *acked_skb;
2867 	u16 ackseq;
2868 
2869 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2870 
2871 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2872 		return;
2873 
2874 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2875 	       chan->expected_ack_seq, chan->unacked_frames);
2876 
2877 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2878 	     ackseq = __next_seq(chan, ackseq)) {
2879 
2880 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2881 		if (acked_skb) {
2882 			skb_unlink(acked_skb, &chan->tx_q);
2883 			kfree_skb(acked_skb);
2884 			chan->unacked_frames--;
2885 		}
2886 	}
2887 
2888 	chan->expected_ack_seq = reqseq;
2889 
2890 	if (chan->unacked_frames == 0)
2891 		__clear_retrans_timer(chan);
2892 
2893 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2894 }
2895 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2896 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2897 {
2898 	BT_DBG("chan %p", chan);
2899 
2900 	chan->expected_tx_seq = chan->buffer_seq;
2901 	l2cap_seq_list_clear(&chan->srej_list);
2902 	skb_queue_purge(&chan->srej_q);
2903 	chan->rx_state = L2CAP_RX_STATE_RECV;
2904 }
2905 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2906 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2907 				struct l2cap_ctrl *control,
2908 				struct sk_buff_head *skbs, u8 event)
2909 {
2910 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2911 	       event);
2912 
2913 	switch (event) {
2914 	case L2CAP_EV_DATA_REQUEST:
2915 		if (chan->tx_send_head == NULL)
2916 			chan->tx_send_head = skb_peek(skbs);
2917 
2918 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2919 		l2cap_ertm_send(chan);
2920 		break;
2921 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2922 		BT_DBG("Enter LOCAL_BUSY");
2923 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2924 
2925 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2926 			/* The SREJ_SENT state must be aborted if we are to
2927 			 * enter the LOCAL_BUSY state.
2928 			 */
2929 			l2cap_abort_rx_srej_sent(chan);
2930 		}
2931 
2932 		l2cap_send_ack(chan);
2933 
2934 		break;
2935 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2936 		BT_DBG("Exit LOCAL_BUSY");
2937 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2938 
2939 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2940 			struct l2cap_ctrl local_control;
2941 
2942 			memset(&local_control, 0, sizeof(local_control));
2943 			local_control.sframe = 1;
2944 			local_control.super = L2CAP_SUPER_RR;
2945 			local_control.poll = 1;
2946 			local_control.reqseq = chan->buffer_seq;
2947 			l2cap_send_sframe(chan, &local_control);
2948 
2949 			chan->retry_count = 1;
2950 			__set_monitor_timer(chan);
2951 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2952 		}
2953 		break;
2954 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2955 		l2cap_process_reqseq(chan, control->reqseq);
2956 		break;
2957 	case L2CAP_EV_EXPLICIT_POLL:
2958 		l2cap_send_rr_or_rnr(chan, 1);
2959 		chan->retry_count = 1;
2960 		__set_monitor_timer(chan);
2961 		__clear_ack_timer(chan);
2962 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2963 		break;
2964 	case L2CAP_EV_RETRANS_TO:
2965 		l2cap_send_rr_or_rnr(chan, 1);
2966 		chan->retry_count = 1;
2967 		__set_monitor_timer(chan);
2968 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2969 		break;
2970 	case L2CAP_EV_RECV_FBIT:
2971 		/* Nothing to process */
2972 		break;
2973 	default:
2974 		break;
2975 	}
2976 }
2977 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2978 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2979 				  struct l2cap_ctrl *control,
2980 				  struct sk_buff_head *skbs, u8 event)
2981 {
2982 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2983 	       event);
2984 
2985 	switch (event) {
2986 	case L2CAP_EV_DATA_REQUEST:
2987 		if (chan->tx_send_head == NULL)
2988 			chan->tx_send_head = skb_peek(skbs);
2989 		/* Queue data, but don't send. */
2990 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2991 		break;
2992 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2993 		BT_DBG("Enter LOCAL_BUSY");
2994 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2995 
2996 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2997 			/* The SREJ_SENT state must be aborted if we are to
2998 			 * enter the LOCAL_BUSY state.
2999 			 */
3000 			l2cap_abort_rx_srej_sent(chan);
3001 		}
3002 
3003 		l2cap_send_ack(chan);
3004 
3005 		break;
3006 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
3007 		BT_DBG("Exit LOCAL_BUSY");
3008 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3009 
3010 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
3011 			struct l2cap_ctrl local_control;
3012 			memset(&local_control, 0, sizeof(local_control));
3013 			local_control.sframe = 1;
3014 			local_control.super = L2CAP_SUPER_RR;
3015 			local_control.poll = 1;
3016 			local_control.reqseq = chan->buffer_seq;
3017 			l2cap_send_sframe(chan, &local_control);
3018 
3019 			chan->retry_count = 1;
3020 			__set_monitor_timer(chan);
3021 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
3022 		}
3023 		break;
3024 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
3025 		l2cap_process_reqseq(chan, control->reqseq);
3026 		fallthrough;
3027 
3028 	case L2CAP_EV_RECV_FBIT:
3029 		if (control && control->final) {
3030 			__clear_monitor_timer(chan);
3031 			if (chan->unacked_frames > 0)
3032 				__set_retrans_timer(chan);
3033 			chan->retry_count = 0;
3034 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3035 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3036 		}
3037 		break;
3038 	case L2CAP_EV_EXPLICIT_POLL:
3039 		/* Ignore */
3040 		break;
3041 	case L2CAP_EV_MONITOR_TO:
3042 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3043 			l2cap_send_rr_or_rnr(chan, 1);
3044 			__set_monitor_timer(chan);
3045 			chan->retry_count++;
3046 		} else {
3047 			l2cap_send_disconn_req(chan, ECONNABORTED);
3048 		}
3049 		break;
3050 	default:
3051 		break;
3052 	}
3053 }
3054 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)3055 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3056 		     struct sk_buff_head *skbs, u8 event)
3057 {
3058 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3059 	       chan, control, skbs, event, chan->tx_state);
3060 
3061 	switch (chan->tx_state) {
3062 	case L2CAP_TX_STATE_XMIT:
3063 		l2cap_tx_state_xmit(chan, control, skbs, event);
3064 		break;
3065 	case L2CAP_TX_STATE_WAIT_F:
3066 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3067 		break;
3068 	default:
3069 		/* Ignore event */
3070 		break;
3071 	}
3072 }
3073 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)3074 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3075 			     struct l2cap_ctrl *control)
3076 {
3077 	BT_DBG("chan %p, control %p", chan, control);
3078 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3079 }
3080 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)3081 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3082 				  struct l2cap_ctrl *control)
3083 {
3084 	BT_DBG("chan %p, control %p", chan, control);
3085 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3086 }
3087 
3088 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)3089 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3090 {
3091 	struct sk_buff *nskb;
3092 	struct l2cap_chan *chan;
3093 
3094 	BT_DBG("conn %p", conn);
3095 
3096 	mutex_lock(&conn->chan_lock);
3097 
3098 	list_for_each_entry(chan, &conn->chan_l, list) {
3099 		if (chan->chan_type != L2CAP_CHAN_RAW)
3100 			continue;
3101 
3102 		/* Don't send frame to the channel it came from */
3103 		if (bt_cb(skb)->l2cap.chan == chan)
3104 			continue;
3105 
3106 		nskb = skb_clone(skb, GFP_KERNEL);
3107 		if (!nskb)
3108 			continue;
3109 		if (chan->ops->recv(chan, nskb))
3110 			kfree_skb(nskb);
3111 	}
3112 
3113 	mutex_unlock(&conn->chan_lock);
3114 }
3115 
3116 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)3117 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3118 				       u8 ident, u16 dlen, void *data)
3119 {
3120 	struct sk_buff *skb, **frag;
3121 	struct l2cap_cmd_hdr *cmd;
3122 	struct l2cap_hdr *lh;
3123 	int len, count;
3124 
3125 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3126 	       conn, code, ident, dlen);
3127 
3128 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3129 		return NULL;
3130 
3131 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3132 	count = min_t(unsigned int, conn->mtu, len);
3133 
3134 	skb = bt_skb_alloc(count, GFP_KERNEL);
3135 	if (!skb)
3136 		return NULL;
3137 
3138 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3139 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3140 
3141 	if (conn->hcon->type == LE_LINK)
3142 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3143 	else
3144 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3145 
3146 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3147 	cmd->code  = code;
3148 	cmd->ident = ident;
3149 	cmd->len   = cpu_to_le16(dlen);
3150 
3151 	if (dlen) {
3152 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3153 		skb_put_data(skb, data, count);
3154 		data += count;
3155 	}
3156 
3157 	len -= skb->len;
3158 
3159 	/* Continuation fragments (no L2CAP header) */
3160 	frag = &skb_shinfo(skb)->frag_list;
3161 	while (len) {
3162 		count = min_t(unsigned int, conn->mtu, len);
3163 
3164 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3165 		if (!*frag)
3166 			goto fail;
3167 
3168 		skb_put_data(*frag, data, count);
3169 
3170 		len  -= count;
3171 		data += count;
3172 
3173 		frag = &(*frag)->next;
3174 	}
3175 
3176 	return skb;
3177 
3178 fail:
3179 	kfree_skb(skb);
3180 	return NULL;
3181 }
3182 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3183 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3184 				     unsigned long *val)
3185 {
3186 	struct l2cap_conf_opt *opt = *ptr;
3187 	int len;
3188 
3189 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3190 	*ptr += len;
3191 
3192 	*type = opt->type;
3193 	*olen = opt->len;
3194 
3195 	switch (opt->len) {
3196 	case 1:
3197 		*val = *((u8 *) opt->val);
3198 		break;
3199 
3200 	case 2:
3201 		*val = get_unaligned_le16(opt->val);
3202 		break;
3203 
3204 	case 4:
3205 		*val = get_unaligned_le32(opt->val);
3206 		break;
3207 
3208 	default:
3209 		*val = (unsigned long) opt->val;
3210 		break;
3211 	}
3212 
3213 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3214 	return len;
3215 }
3216 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3217 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3218 {
3219 	struct l2cap_conf_opt *opt = *ptr;
3220 
3221 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3222 
3223 	if (size < L2CAP_CONF_OPT_SIZE + len)
3224 		return;
3225 
3226 	opt->type = type;
3227 	opt->len  = len;
3228 
3229 	switch (len) {
3230 	case 1:
3231 		*((u8 *) opt->val)  = val;
3232 		break;
3233 
3234 	case 2:
3235 		put_unaligned_le16(val, opt->val);
3236 		break;
3237 
3238 	case 4:
3239 		put_unaligned_le32(val, opt->val);
3240 		break;
3241 
3242 	default:
3243 		memcpy(opt->val, (void *) val, len);
3244 		break;
3245 	}
3246 
3247 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3248 }
3249 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3250 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3251 {
3252 	struct l2cap_conf_efs efs;
3253 
3254 	switch (chan->mode) {
3255 	case L2CAP_MODE_ERTM:
3256 		efs.id		= chan->local_id;
3257 		efs.stype	= chan->local_stype;
3258 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3259 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3260 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3261 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3262 		break;
3263 
3264 	case L2CAP_MODE_STREAMING:
3265 		efs.id		= 1;
3266 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3267 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3268 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3269 		efs.acc_lat	= 0;
3270 		efs.flush_to	= 0;
3271 		break;
3272 
3273 	default:
3274 		return;
3275 	}
3276 
3277 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3278 			   (unsigned long) &efs, size);
3279 }
3280 
l2cap_ack_timeout(struct work_struct * work)3281 static void l2cap_ack_timeout(struct work_struct *work)
3282 {
3283 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3284 					       ack_timer.work);
3285 	u16 frames_to_ack;
3286 
3287 	BT_DBG("chan %p", chan);
3288 
3289 	l2cap_chan_lock(chan);
3290 
3291 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3292 				     chan->last_acked_seq);
3293 
3294 	if (frames_to_ack)
3295 		l2cap_send_rr_or_rnr(chan, 0);
3296 
3297 	l2cap_chan_unlock(chan);
3298 	l2cap_chan_put(chan);
3299 }
3300 
l2cap_ertm_init(struct l2cap_chan * chan)3301 int l2cap_ertm_init(struct l2cap_chan *chan)
3302 {
3303 	int err;
3304 
3305 	chan->next_tx_seq = 0;
3306 	chan->expected_tx_seq = 0;
3307 	chan->expected_ack_seq = 0;
3308 	chan->unacked_frames = 0;
3309 	chan->buffer_seq = 0;
3310 	chan->frames_sent = 0;
3311 	chan->last_acked_seq = 0;
3312 	chan->sdu = NULL;
3313 	chan->sdu_last_frag = NULL;
3314 	chan->sdu_len = 0;
3315 
3316 	skb_queue_head_init(&chan->tx_q);
3317 
3318 	chan->local_amp_id = AMP_ID_BREDR;
3319 	chan->move_id = AMP_ID_BREDR;
3320 	chan->move_state = L2CAP_MOVE_STABLE;
3321 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3322 
3323 	if (chan->mode != L2CAP_MODE_ERTM)
3324 		return 0;
3325 
3326 	chan->rx_state = L2CAP_RX_STATE_RECV;
3327 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3328 
3329 	skb_queue_head_init(&chan->srej_q);
3330 
3331 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3332 	if (err < 0)
3333 		return err;
3334 
3335 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3336 	if (err < 0)
3337 		l2cap_seq_list_free(&chan->srej_list);
3338 
3339 	return err;
3340 }
3341 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3342 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3343 {
3344 	switch (mode) {
3345 	case L2CAP_MODE_STREAMING:
3346 	case L2CAP_MODE_ERTM:
3347 		if (l2cap_mode_supported(mode, remote_feat_mask))
3348 			return mode;
3349 		fallthrough;
3350 	default:
3351 		return L2CAP_MODE_BASIC;
3352 	}
3353 }
3354 
__l2cap_ews_supported(struct l2cap_conn * conn)3355 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3356 {
3357 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3358 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3359 }
3360 
__l2cap_efs_supported(struct l2cap_conn * conn)3361 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3362 {
3363 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3364 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3365 }
3366 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3367 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3368 				      struct l2cap_conf_rfc *rfc)
3369 {
3370 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3371 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3372 
3373 		/* Class 1 devices have must have ERTM timeouts
3374 		 * exceeding the Link Supervision Timeout.  The
3375 		 * default Link Supervision Timeout for AMP
3376 		 * controllers is 10 seconds.
3377 		 *
3378 		 * Class 1 devices use 0xffffffff for their
3379 		 * best-effort flush timeout, so the clamping logic
3380 		 * will result in a timeout that meets the above
3381 		 * requirement.  ERTM timeouts are 16-bit values, so
3382 		 * the maximum timeout is 65.535 seconds.
3383 		 */
3384 
3385 		/* Convert timeout to milliseconds and round */
3386 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3387 
3388 		/* This is the recommended formula for class 2 devices
3389 		 * that start ERTM timers when packets are sent to the
3390 		 * controller.
3391 		 */
3392 		ertm_to = 3 * ertm_to + 500;
3393 
3394 		if (ertm_to > 0xffff)
3395 			ertm_to = 0xffff;
3396 
3397 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3398 		rfc->monitor_timeout = rfc->retrans_timeout;
3399 	} else {
3400 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3401 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3402 	}
3403 }
3404 
l2cap_txwin_setup(struct l2cap_chan * chan)3405 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3406 {
3407 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3408 	    __l2cap_ews_supported(chan->conn)) {
3409 		/* use extended control field */
3410 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3411 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3412 	} else {
3413 		chan->tx_win = min_t(u16, chan->tx_win,
3414 				     L2CAP_DEFAULT_TX_WINDOW);
3415 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3416 	}
3417 	chan->ack_win = chan->tx_win;
3418 }
3419 
l2cap_mtu_auto(struct l2cap_chan * chan)3420 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3421 {
3422 	struct hci_conn *conn = chan->conn->hcon;
3423 
3424 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3425 
3426 	/* The 2-DH1 packet has between 2 and 56 information bytes
3427 	 * (including the 2-byte payload header)
3428 	 */
3429 	if (!(conn->pkt_type & HCI_2DH1))
3430 		chan->imtu = 54;
3431 
3432 	/* The 3-DH1 packet has between 2 and 85 information bytes
3433 	 * (including the 2-byte payload header)
3434 	 */
3435 	if (!(conn->pkt_type & HCI_3DH1))
3436 		chan->imtu = 83;
3437 
3438 	/* The 2-DH3 packet has between 2 and 369 information bytes
3439 	 * (including the 2-byte payload header)
3440 	 */
3441 	if (!(conn->pkt_type & HCI_2DH3))
3442 		chan->imtu = 367;
3443 
3444 	/* The 3-DH3 packet has between 2 and 554 information bytes
3445 	 * (including the 2-byte payload header)
3446 	 */
3447 	if (!(conn->pkt_type & HCI_3DH3))
3448 		chan->imtu = 552;
3449 
3450 	/* The 2-DH5 packet has between 2 and 681 information bytes
3451 	 * (including the 2-byte payload header)
3452 	 */
3453 	if (!(conn->pkt_type & HCI_2DH5))
3454 		chan->imtu = 679;
3455 
3456 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3457 	 * (including the 2-byte payload header)
3458 	 */
3459 	if (!(conn->pkt_type & HCI_3DH5))
3460 		chan->imtu = 1021;
3461 }
3462 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3463 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3464 {
3465 	struct l2cap_conf_req *req = data;
3466 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3467 	void *ptr = req->data;
3468 	void *endptr = data + data_size;
3469 	u16 size;
3470 
3471 	BT_DBG("chan %p", chan);
3472 
3473 	if (chan->num_conf_req || chan->num_conf_rsp)
3474 		goto done;
3475 
3476 	switch (chan->mode) {
3477 	case L2CAP_MODE_STREAMING:
3478 	case L2CAP_MODE_ERTM:
3479 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3480 			break;
3481 
3482 		if (__l2cap_efs_supported(chan->conn))
3483 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3484 
3485 		fallthrough;
3486 	default:
3487 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3488 		break;
3489 	}
3490 
3491 done:
3492 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3493 		if (!chan->imtu)
3494 			l2cap_mtu_auto(chan);
3495 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3496 				   endptr - ptr);
3497 	}
3498 
3499 	switch (chan->mode) {
3500 	case L2CAP_MODE_BASIC:
3501 		if (disable_ertm)
3502 			break;
3503 
3504 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3505 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3506 			break;
3507 
3508 		rfc.mode            = L2CAP_MODE_BASIC;
3509 		rfc.txwin_size      = 0;
3510 		rfc.max_transmit    = 0;
3511 		rfc.retrans_timeout = 0;
3512 		rfc.monitor_timeout = 0;
3513 		rfc.max_pdu_size    = 0;
3514 
3515 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3516 				   (unsigned long) &rfc, endptr - ptr);
3517 		break;
3518 
3519 	case L2CAP_MODE_ERTM:
3520 		rfc.mode            = L2CAP_MODE_ERTM;
3521 		rfc.max_transmit    = chan->max_tx;
3522 
3523 		__l2cap_set_ertm_timeouts(chan, &rfc);
3524 
3525 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3526 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3527 			     L2CAP_FCS_SIZE);
3528 		rfc.max_pdu_size = cpu_to_le16(size);
3529 
3530 		l2cap_txwin_setup(chan);
3531 
3532 		rfc.txwin_size = min_t(u16, chan->tx_win,
3533 				       L2CAP_DEFAULT_TX_WINDOW);
3534 
3535 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3536 				   (unsigned long) &rfc, endptr - ptr);
3537 
3538 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3539 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3540 
3541 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3542 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3543 					   chan->tx_win, endptr - ptr);
3544 
3545 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3546 			if (chan->fcs == L2CAP_FCS_NONE ||
3547 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3548 				chan->fcs = L2CAP_FCS_NONE;
3549 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3550 						   chan->fcs, endptr - ptr);
3551 			}
3552 		break;
3553 
3554 	case L2CAP_MODE_STREAMING:
3555 		l2cap_txwin_setup(chan);
3556 		rfc.mode            = L2CAP_MODE_STREAMING;
3557 		rfc.txwin_size      = 0;
3558 		rfc.max_transmit    = 0;
3559 		rfc.retrans_timeout = 0;
3560 		rfc.monitor_timeout = 0;
3561 
3562 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3563 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3564 			     L2CAP_FCS_SIZE);
3565 		rfc.max_pdu_size = cpu_to_le16(size);
3566 
3567 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3568 				   (unsigned long) &rfc, endptr - ptr);
3569 
3570 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3571 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3572 
3573 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3574 			if (chan->fcs == L2CAP_FCS_NONE ||
3575 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3576 				chan->fcs = L2CAP_FCS_NONE;
3577 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3578 						   chan->fcs, endptr - ptr);
3579 			}
3580 		break;
3581 	}
3582 
3583 	req->dcid  = cpu_to_le16(chan->dcid);
3584 	req->flags = cpu_to_le16(0);
3585 
3586 	return ptr - data;
3587 }
3588 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3589 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3590 {
3591 	struct l2cap_conf_rsp *rsp = data;
3592 	void *ptr = rsp->data;
3593 	void *endptr = data + data_size;
3594 	void *req = chan->conf_req;
3595 	int len = chan->conf_len;
3596 	int type, hint, olen;
3597 	unsigned long val;
3598 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3599 	struct l2cap_conf_efs efs;
3600 	u8 remote_efs = 0;
3601 	u16 mtu = L2CAP_DEFAULT_MTU;
3602 	u16 result = L2CAP_CONF_SUCCESS;
3603 	u16 size;
3604 
3605 	BT_DBG("chan %p", chan);
3606 
3607 	while (len >= L2CAP_CONF_OPT_SIZE) {
3608 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3609 		if (len < 0)
3610 			break;
3611 
3612 		hint  = type & L2CAP_CONF_HINT;
3613 		type &= L2CAP_CONF_MASK;
3614 
3615 		switch (type) {
3616 		case L2CAP_CONF_MTU:
3617 			if (olen != 2)
3618 				break;
3619 			mtu = val;
3620 			break;
3621 
3622 		case L2CAP_CONF_FLUSH_TO:
3623 			if (olen != 2)
3624 				break;
3625 			chan->flush_to = val;
3626 			break;
3627 
3628 		case L2CAP_CONF_QOS:
3629 			break;
3630 
3631 		case L2CAP_CONF_RFC:
3632 			if (olen != sizeof(rfc))
3633 				break;
3634 			memcpy(&rfc, (void *) val, olen);
3635 			break;
3636 
3637 		case L2CAP_CONF_FCS:
3638 			if (olen != 1)
3639 				break;
3640 			if (val == L2CAP_FCS_NONE)
3641 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3642 			break;
3643 
3644 		case L2CAP_CONF_EFS:
3645 			if (olen != sizeof(efs))
3646 				break;
3647 			remote_efs = 1;
3648 			memcpy(&efs, (void *) val, olen);
3649 			break;
3650 
3651 		case L2CAP_CONF_EWS:
3652 			if (olen != 2)
3653 				break;
3654 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3655 				return -ECONNREFUSED;
3656 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3657 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3658 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3659 			chan->remote_tx_win = val;
3660 			break;
3661 
3662 		default:
3663 			if (hint)
3664 				break;
3665 			result = L2CAP_CONF_UNKNOWN;
3666 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3667 			break;
3668 		}
3669 	}
3670 
3671 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3672 		goto done;
3673 
3674 	switch (chan->mode) {
3675 	case L2CAP_MODE_STREAMING:
3676 	case L2CAP_MODE_ERTM:
3677 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3678 			chan->mode = l2cap_select_mode(rfc.mode,
3679 						       chan->conn->feat_mask);
3680 			break;
3681 		}
3682 
3683 		if (remote_efs) {
3684 			if (__l2cap_efs_supported(chan->conn))
3685 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3686 			else
3687 				return -ECONNREFUSED;
3688 		}
3689 
3690 		if (chan->mode != rfc.mode)
3691 			return -ECONNREFUSED;
3692 
3693 		break;
3694 	}
3695 
3696 done:
3697 	if (chan->mode != rfc.mode) {
3698 		result = L2CAP_CONF_UNACCEPT;
3699 		rfc.mode = chan->mode;
3700 
3701 		if (chan->num_conf_rsp == 1)
3702 			return -ECONNREFUSED;
3703 
3704 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3705 				   (unsigned long) &rfc, endptr - ptr);
3706 	}
3707 
3708 	if (result == L2CAP_CONF_SUCCESS) {
3709 		/* Configure output options and let the other side know
3710 		 * which ones we don't like. */
3711 
3712 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3713 			result = L2CAP_CONF_UNACCEPT;
3714 		else {
3715 			chan->omtu = mtu;
3716 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3717 		}
3718 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3719 
3720 		if (remote_efs) {
3721 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3722 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3723 			    efs.stype != chan->local_stype) {
3724 
3725 				result = L2CAP_CONF_UNACCEPT;
3726 
3727 				if (chan->num_conf_req >= 1)
3728 					return -ECONNREFUSED;
3729 
3730 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3731 						   sizeof(efs),
3732 						   (unsigned long) &efs, endptr - ptr);
3733 			} else {
3734 				/* Send PENDING Conf Rsp */
3735 				result = L2CAP_CONF_PENDING;
3736 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3737 			}
3738 		}
3739 
3740 		switch (rfc.mode) {
3741 		case L2CAP_MODE_BASIC:
3742 			chan->fcs = L2CAP_FCS_NONE;
3743 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3744 			break;
3745 
3746 		case L2CAP_MODE_ERTM:
3747 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3748 				chan->remote_tx_win = rfc.txwin_size;
3749 			else
3750 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3751 
3752 			chan->remote_max_tx = rfc.max_transmit;
3753 
3754 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3755 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3756 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3757 			rfc.max_pdu_size = cpu_to_le16(size);
3758 			chan->remote_mps = size;
3759 
3760 			__l2cap_set_ertm_timeouts(chan, &rfc);
3761 
3762 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3763 
3764 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3765 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3766 
3767 			if (remote_efs &&
3768 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3769 				chan->remote_id = efs.id;
3770 				chan->remote_stype = efs.stype;
3771 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3772 				chan->remote_flush_to =
3773 					le32_to_cpu(efs.flush_to);
3774 				chan->remote_acc_lat =
3775 					le32_to_cpu(efs.acc_lat);
3776 				chan->remote_sdu_itime =
3777 					le32_to_cpu(efs.sdu_itime);
3778 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3779 						   sizeof(efs),
3780 						   (unsigned long) &efs, endptr - ptr);
3781 			}
3782 			break;
3783 
3784 		case L2CAP_MODE_STREAMING:
3785 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3786 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3787 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3788 			rfc.max_pdu_size = cpu_to_le16(size);
3789 			chan->remote_mps = size;
3790 
3791 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3792 
3793 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3794 					   (unsigned long) &rfc, endptr - ptr);
3795 
3796 			break;
3797 
3798 		default:
3799 			result = L2CAP_CONF_UNACCEPT;
3800 
3801 			memset(&rfc, 0, sizeof(rfc));
3802 			rfc.mode = chan->mode;
3803 		}
3804 
3805 		if (result == L2CAP_CONF_SUCCESS)
3806 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3807 	}
3808 	rsp->scid   = cpu_to_le16(chan->dcid);
3809 	rsp->result = cpu_to_le16(result);
3810 	rsp->flags  = cpu_to_le16(0);
3811 
3812 	return ptr - data;
3813 }
3814 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3815 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3816 				void *data, size_t size, u16 *result)
3817 {
3818 	struct l2cap_conf_req *req = data;
3819 	void *ptr = req->data;
3820 	void *endptr = data + size;
3821 	int type, olen;
3822 	unsigned long val;
3823 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3824 	struct l2cap_conf_efs efs;
3825 
3826 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3827 
3828 	while (len >= L2CAP_CONF_OPT_SIZE) {
3829 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3830 		if (len < 0)
3831 			break;
3832 
3833 		switch (type) {
3834 		case L2CAP_CONF_MTU:
3835 			if (olen != 2)
3836 				break;
3837 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3838 				*result = L2CAP_CONF_UNACCEPT;
3839 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3840 			} else
3841 				chan->imtu = val;
3842 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3843 					   endptr - ptr);
3844 			break;
3845 
3846 		case L2CAP_CONF_FLUSH_TO:
3847 			if (olen != 2)
3848 				break;
3849 			chan->flush_to = val;
3850 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3851 					   chan->flush_to, endptr - ptr);
3852 			break;
3853 
3854 		case L2CAP_CONF_RFC:
3855 			if (olen != sizeof(rfc))
3856 				break;
3857 			memcpy(&rfc, (void *)val, olen);
3858 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3859 			    rfc.mode != chan->mode)
3860 				return -ECONNREFUSED;
3861 			chan->fcs = 0;
3862 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3863 					   (unsigned long) &rfc, endptr - ptr);
3864 			break;
3865 
3866 		case L2CAP_CONF_EWS:
3867 			if (olen != 2)
3868 				break;
3869 			chan->ack_win = min_t(u16, val, chan->ack_win);
3870 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3871 					   chan->tx_win, endptr - ptr);
3872 			break;
3873 
3874 		case L2CAP_CONF_EFS:
3875 			if (olen != sizeof(efs))
3876 				break;
3877 			memcpy(&efs, (void *)val, olen);
3878 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3879 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3880 			    efs.stype != chan->local_stype)
3881 				return -ECONNREFUSED;
3882 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3883 					   (unsigned long) &efs, endptr - ptr);
3884 			break;
3885 
3886 		case L2CAP_CONF_FCS:
3887 			if (olen != 1)
3888 				break;
3889 			if (*result == L2CAP_CONF_PENDING)
3890 				if (val == L2CAP_FCS_NONE)
3891 					set_bit(CONF_RECV_NO_FCS,
3892 						&chan->conf_state);
3893 			break;
3894 		}
3895 	}
3896 
3897 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3898 		return -ECONNREFUSED;
3899 
3900 	chan->mode = rfc.mode;
3901 
3902 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3903 		switch (rfc.mode) {
3904 		case L2CAP_MODE_ERTM:
3905 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3906 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3907 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3908 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3909 				chan->ack_win = min_t(u16, chan->ack_win,
3910 						      rfc.txwin_size);
3911 
3912 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3913 				chan->local_msdu = le16_to_cpu(efs.msdu);
3914 				chan->local_sdu_itime =
3915 					le32_to_cpu(efs.sdu_itime);
3916 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3917 				chan->local_flush_to =
3918 					le32_to_cpu(efs.flush_to);
3919 			}
3920 			break;
3921 
3922 		case L2CAP_MODE_STREAMING:
3923 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3924 		}
3925 	}
3926 
3927 	req->dcid   = cpu_to_le16(chan->dcid);
3928 	req->flags  = cpu_to_le16(0);
3929 
3930 	return ptr - data;
3931 }
3932 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3933 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3934 				u16 result, u16 flags)
3935 {
3936 	struct l2cap_conf_rsp *rsp = data;
3937 	void *ptr = rsp->data;
3938 
3939 	BT_DBG("chan %p", chan);
3940 
3941 	rsp->scid   = cpu_to_le16(chan->dcid);
3942 	rsp->result = cpu_to_le16(result);
3943 	rsp->flags  = cpu_to_le16(flags);
3944 
3945 	return ptr - data;
3946 }
3947 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3948 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3949 {
3950 	struct l2cap_le_conn_rsp rsp;
3951 	struct l2cap_conn *conn = chan->conn;
3952 
3953 	BT_DBG("chan %p", chan);
3954 
3955 	rsp.dcid    = cpu_to_le16(chan->scid);
3956 	rsp.mtu     = cpu_to_le16(chan->imtu);
3957 	rsp.mps     = cpu_to_le16(chan->mps);
3958 	rsp.credits = cpu_to_le16(chan->rx_credits);
3959 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3960 
3961 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3962 		       &rsp);
3963 }
3964 
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3965 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3966 {
3967 	struct {
3968 		struct l2cap_ecred_conn_rsp rsp;
3969 		__le16 dcid[5];
3970 	} __packed pdu;
3971 	struct l2cap_conn *conn = chan->conn;
3972 	u16 ident = chan->ident;
3973 	int i = 0;
3974 
3975 	if (!ident)
3976 		return;
3977 
3978 	BT_DBG("chan %p ident %d", chan, ident);
3979 
3980 	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3981 	pdu.rsp.mps     = cpu_to_le16(chan->mps);
3982 	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3983 	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3984 
3985 	mutex_lock(&conn->chan_lock);
3986 
3987 	list_for_each_entry(chan, &conn->chan_l, list) {
3988 		if (chan->ident != ident)
3989 			continue;
3990 
3991 		/* Reset ident so only one response is sent */
3992 		chan->ident = 0;
3993 
3994 		/* Include all channels pending with the same ident */
3995 		pdu.dcid[i++] = cpu_to_le16(chan->scid);
3996 	}
3997 
3998 	mutex_unlock(&conn->chan_lock);
3999 
4000 	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
4001 			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
4002 }
4003 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)4004 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4005 {
4006 	struct l2cap_conn_rsp rsp;
4007 	struct l2cap_conn *conn = chan->conn;
4008 	u8 buf[128];
4009 	u8 rsp_code;
4010 
4011 	rsp.scid   = cpu_to_le16(chan->dcid);
4012 	rsp.dcid   = cpu_to_le16(chan->scid);
4013 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4014 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4015 
4016 	if (chan->hs_hcon)
4017 		rsp_code = L2CAP_CREATE_CHAN_RSP;
4018 	else
4019 		rsp_code = L2CAP_CONN_RSP;
4020 
4021 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4022 
4023 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4024 
4025 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4026 		return;
4027 
4028 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4029 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4030 	chan->num_conf_req++;
4031 }
4032 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)4033 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4034 {
4035 	int type, olen;
4036 	unsigned long val;
4037 	/* Use sane default values in case a misbehaving remote device
4038 	 * did not send an RFC or extended window size option.
4039 	 */
4040 	u16 txwin_ext = chan->ack_win;
4041 	struct l2cap_conf_rfc rfc = {
4042 		.mode = chan->mode,
4043 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4044 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4045 		.max_pdu_size = cpu_to_le16(chan->imtu),
4046 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4047 	};
4048 
4049 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4050 
4051 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4052 		return;
4053 
4054 	while (len >= L2CAP_CONF_OPT_SIZE) {
4055 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4056 		if (len < 0)
4057 			break;
4058 
4059 		switch (type) {
4060 		case L2CAP_CONF_RFC:
4061 			if (olen != sizeof(rfc))
4062 				break;
4063 			memcpy(&rfc, (void *)val, olen);
4064 			break;
4065 		case L2CAP_CONF_EWS:
4066 			if (olen != 2)
4067 				break;
4068 			txwin_ext = val;
4069 			break;
4070 		}
4071 	}
4072 
4073 	switch (rfc.mode) {
4074 	case L2CAP_MODE_ERTM:
4075 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4076 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4077 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4078 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4079 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4080 		else
4081 			chan->ack_win = min_t(u16, chan->ack_win,
4082 					      rfc.txwin_size);
4083 		break;
4084 	case L2CAP_MODE_STREAMING:
4085 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4086 	}
4087 }
4088 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4089 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4090 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4091 				    u8 *data)
4092 {
4093 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4094 
4095 	if (cmd_len < sizeof(*rej))
4096 		return -EPROTO;
4097 
4098 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4099 		return 0;
4100 
4101 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4102 	    cmd->ident == conn->info_ident) {
4103 		cancel_delayed_work(&conn->info_timer);
4104 
4105 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4106 		conn->info_ident = 0;
4107 
4108 		l2cap_conn_start(conn);
4109 	}
4110 
4111 	return 0;
4112 }
4113 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code,u8 amp_id)4114 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4115 					struct l2cap_cmd_hdr *cmd,
4116 					u8 *data, u8 rsp_code, u8 amp_id)
4117 {
4118 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4119 	struct l2cap_conn_rsp rsp;
4120 	struct l2cap_chan *chan = NULL, *pchan;
4121 	int result, status = L2CAP_CS_NO_INFO;
4122 
4123 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4124 	__le16 psm = req->psm;
4125 
4126 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4127 
4128 	/* Check if we have socket listening on psm */
4129 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4130 					 &conn->hcon->dst, ACL_LINK);
4131 	if (!pchan) {
4132 		result = L2CAP_CR_BAD_PSM;
4133 		goto sendresp;
4134 	}
4135 
4136 	mutex_lock(&conn->chan_lock);
4137 	l2cap_chan_lock(pchan);
4138 
4139 	/* Check if the ACL is secure enough (if not SDP) */
4140 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4141 	    !hci_conn_check_link_mode(conn->hcon)) {
4142 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4143 		result = L2CAP_CR_SEC_BLOCK;
4144 		goto response;
4145 	}
4146 
4147 	result = L2CAP_CR_NO_MEM;
4148 
4149 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4150 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4151 		result = L2CAP_CR_INVALID_SCID;
4152 		goto response;
4153 	}
4154 
4155 	/* Check if we already have channel with that dcid */
4156 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4157 		result = L2CAP_CR_SCID_IN_USE;
4158 		goto response;
4159 	}
4160 
4161 	chan = pchan->ops->new_connection(pchan);
4162 	if (!chan)
4163 		goto response;
4164 
4165 	/* For certain devices (ex: HID mouse), support for authentication,
4166 	 * pairing and bonding is optional. For such devices, inorder to avoid
4167 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4168 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4169 	 */
4170 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4171 
4172 	bacpy(&chan->src, &conn->hcon->src);
4173 	bacpy(&chan->dst, &conn->hcon->dst);
4174 	chan->src_type = bdaddr_src_type(conn->hcon);
4175 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4176 	chan->psm  = psm;
4177 	chan->dcid = scid;
4178 	chan->local_amp_id = amp_id;
4179 
4180 	__l2cap_chan_add(conn, chan);
4181 
4182 	dcid = chan->scid;
4183 
4184 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4185 
4186 	chan->ident = cmd->ident;
4187 
4188 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4189 		if (l2cap_chan_check_security(chan, false)) {
4190 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4191 				l2cap_state_change(chan, BT_CONNECT2);
4192 				result = L2CAP_CR_PEND;
4193 				status = L2CAP_CS_AUTHOR_PEND;
4194 				chan->ops->defer(chan);
4195 			} else {
4196 				/* Force pending result for AMP controllers.
4197 				 * The connection will succeed after the
4198 				 * physical link is up.
4199 				 */
4200 				if (amp_id == AMP_ID_BREDR) {
4201 					l2cap_state_change(chan, BT_CONFIG);
4202 					result = L2CAP_CR_SUCCESS;
4203 				} else {
4204 					l2cap_state_change(chan, BT_CONNECT2);
4205 					result = L2CAP_CR_PEND;
4206 				}
4207 				status = L2CAP_CS_NO_INFO;
4208 			}
4209 		} else {
4210 			l2cap_state_change(chan, BT_CONNECT2);
4211 			result = L2CAP_CR_PEND;
4212 			status = L2CAP_CS_AUTHEN_PEND;
4213 		}
4214 	} else {
4215 		l2cap_state_change(chan, BT_CONNECT2);
4216 		result = L2CAP_CR_PEND;
4217 		status = L2CAP_CS_NO_INFO;
4218 	}
4219 
4220 response:
4221 	l2cap_chan_unlock(pchan);
4222 	mutex_unlock(&conn->chan_lock);
4223 	l2cap_chan_put(pchan);
4224 
4225 sendresp:
4226 	rsp.scid   = cpu_to_le16(scid);
4227 	rsp.dcid   = cpu_to_le16(dcid);
4228 	rsp.result = cpu_to_le16(result);
4229 	rsp.status = cpu_to_le16(status);
4230 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4231 
4232 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4233 		struct l2cap_info_req info;
4234 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4235 
4236 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4237 		conn->info_ident = l2cap_get_ident(conn);
4238 
4239 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4240 
4241 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4242 			       sizeof(info), &info);
4243 	}
4244 
4245 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4246 	    result == L2CAP_CR_SUCCESS) {
4247 		u8 buf[128];
4248 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4249 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4250 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4251 		chan->num_conf_req++;
4252 	}
4253 
4254 	return chan;
4255 }
4256 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4257 static int l2cap_connect_req(struct l2cap_conn *conn,
4258 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4259 {
4260 	struct hci_dev *hdev = conn->hcon->hdev;
4261 	struct hci_conn *hcon = conn->hcon;
4262 
4263 	if (cmd_len < sizeof(struct l2cap_conn_req))
4264 		return -EPROTO;
4265 
4266 	hci_dev_lock(hdev);
4267 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4268 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4269 		mgmt_device_connected(hdev, hcon, NULL, 0);
4270 	hci_dev_unlock(hdev);
4271 
4272 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4273 	return 0;
4274 }
4275 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4276 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4277 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4278 				    u8 *data)
4279 {
4280 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4281 	u16 scid, dcid, result, status;
4282 	struct l2cap_chan *chan;
4283 	u8 req[128];
4284 	int err;
4285 
4286 	if (cmd_len < sizeof(*rsp))
4287 		return -EPROTO;
4288 
4289 	scid   = __le16_to_cpu(rsp->scid);
4290 	dcid   = __le16_to_cpu(rsp->dcid);
4291 	result = __le16_to_cpu(rsp->result);
4292 	status = __le16_to_cpu(rsp->status);
4293 
4294 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4295 	       dcid, scid, result, status);
4296 
4297 	mutex_lock(&conn->chan_lock);
4298 
4299 	if (scid) {
4300 		chan = __l2cap_get_chan_by_scid(conn, scid);
4301 		if (!chan) {
4302 			err = -EBADSLT;
4303 			goto unlock;
4304 		}
4305 	} else {
4306 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4307 		if (!chan) {
4308 			err = -EBADSLT;
4309 			goto unlock;
4310 		}
4311 	}
4312 
4313 	chan = l2cap_chan_hold_unless_zero(chan);
4314 	if (!chan) {
4315 		err = -EBADSLT;
4316 		goto unlock;
4317 	}
4318 
4319 	err = 0;
4320 
4321 	l2cap_chan_lock(chan);
4322 
4323 	switch (result) {
4324 	case L2CAP_CR_SUCCESS:
4325 		l2cap_state_change(chan, BT_CONFIG);
4326 		chan->ident = 0;
4327 		chan->dcid = dcid;
4328 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4329 
4330 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4331 			break;
4332 
4333 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4334 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4335 		chan->num_conf_req++;
4336 		break;
4337 
4338 	case L2CAP_CR_PEND:
4339 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4340 		break;
4341 
4342 	default:
4343 		l2cap_chan_del(chan, ECONNREFUSED);
4344 		break;
4345 	}
4346 
4347 	l2cap_chan_unlock(chan);
4348 	l2cap_chan_put(chan);
4349 
4350 unlock:
4351 	mutex_unlock(&conn->chan_lock);
4352 
4353 	return err;
4354 }
4355 
set_default_fcs(struct l2cap_chan * chan)4356 static inline void set_default_fcs(struct l2cap_chan *chan)
4357 {
4358 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4359 	 * sides request it.
4360 	 */
4361 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4362 		chan->fcs = L2CAP_FCS_NONE;
4363 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4364 		chan->fcs = L2CAP_FCS_CRC16;
4365 }
4366 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4367 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4368 				    u8 ident, u16 flags)
4369 {
4370 	struct l2cap_conn *conn = chan->conn;
4371 
4372 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4373 	       flags);
4374 
4375 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4376 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4377 
4378 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4379 		       l2cap_build_conf_rsp(chan, data,
4380 					    L2CAP_CONF_SUCCESS, flags), data);
4381 }
4382 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4383 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4384 				   u16 scid, u16 dcid)
4385 {
4386 	struct l2cap_cmd_rej_cid rej;
4387 
4388 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4389 	rej.scid = __cpu_to_le16(scid);
4390 	rej.dcid = __cpu_to_le16(dcid);
4391 
4392 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4393 }
4394 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4395 static inline int l2cap_config_req(struct l2cap_conn *conn,
4396 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4397 				   u8 *data)
4398 {
4399 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4400 	u16 dcid, flags;
4401 	u8 rsp[64];
4402 	struct l2cap_chan *chan;
4403 	int len, err = 0;
4404 
4405 	if (cmd_len < sizeof(*req))
4406 		return -EPROTO;
4407 
4408 	dcid  = __le16_to_cpu(req->dcid);
4409 	flags = __le16_to_cpu(req->flags);
4410 
4411 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4412 
4413 	chan = l2cap_get_chan_by_scid(conn, dcid);
4414 	if (!chan) {
4415 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4416 		return 0;
4417 	}
4418 
4419 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4420 	    chan->state != BT_CONNECTED) {
4421 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4422 				       chan->dcid);
4423 		goto unlock;
4424 	}
4425 
4426 	/* Reject if config buffer is too small. */
4427 	len = cmd_len - sizeof(*req);
4428 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4429 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4430 			       l2cap_build_conf_rsp(chan, rsp,
4431 			       L2CAP_CONF_REJECT, flags), rsp);
4432 		goto unlock;
4433 	}
4434 
4435 	/* Store config. */
4436 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4437 	chan->conf_len += len;
4438 
4439 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4440 		/* Incomplete config. Send empty response. */
4441 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4442 			       l2cap_build_conf_rsp(chan, rsp,
4443 			       L2CAP_CONF_SUCCESS, flags), rsp);
4444 		goto unlock;
4445 	}
4446 
4447 	/* Complete config. */
4448 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4449 	if (len < 0) {
4450 		l2cap_send_disconn_req(chan, ECONNRESET);
4451 		goto unlock;
4452 	}
4453 
4454 	chan->ident = cmd->ident;
4455 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4456 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4457 		chan->num_conf_rsp++;
4458 
4459 	/* Reset config buffer. */
4460 	chan->conf_len = 0;
4461 
4462 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4463 		goto unlock;
4464 
4465 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4466 		set_default_fcs(chan);
4467 
4468 		if (chan->mode == L2CAP_MODE_ERTM ||
4469 		    chan->mode == L2CAP_MODE_STREAMING)
4470 			err = l2cap_ertm_init(chan);
4471 
4472 		if (err < 0)
4473 			l2cap_send_disconn_req(chan, -err);
4474 		else
4475 			l2cap_chan_ready(chan);
4476 
4477 		goto unlock;
4478 	}
4479 
4480 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4481 		u8 buf[64];
4482 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4483 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4484 		chan->num_conf_req++;
4485 	}
4486 
4487 	/* Got Conf Rsp PENDING from remote side and assume we sent
4488 	   Conf Rsp PENDING in the code above */
4489 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4490 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4491 
4492 		/* check compatibility */
4493 
4494 		/* Send rsp for BR/EDR channel */
4495 		if (!chan->hs_hcon)
4496 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4497 		else
4498 			chan->ident = cmd->ident;
4499 	}
4500 
4501 unlock:
4502 	l2cap_chan_unlock(chan);
4503 	l2cap_chan_put(chan);
4504 	return err;
4505 }
4506 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4507 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4508 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4509 				   u8 *data)
4510 {
4511 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4512 	u16 scid, flags, result;
4513 	struct l2cap_chan *chan;
4514 	int len = cmd_len - sizeof(*rsp);
4515 	int err = 0;
4516 
4517 	if (cmd_len < sizeof(*rsp))
4518 		return -EPROTO;
4519 
4520 	scid   = __le16_to_cpu(rsp->scid);
4521 	flags  = __le16_to_cpu(rsp->flags);
4522 	result = __le16_to_cpu(rsp->result);
4523 
4524 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4525 	       result, len);
4526 
4527 	chan = l2cap_get_chan_by_scid(conn, scid);
4528 	if (!chan)
4529 		return 0;
4530 
4531 	switch (result) {
4532 	case L2CAP_CONF_SUCCESS:
4533 		l2cap_conf_rfc_get(chan, rsp->data, len);
4534 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4535 		break;
4536 
4537 	case L2CAP_CONF_PENDING:
4538 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4539 
4540 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4541 			char buf[64];
4542 
4543 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4544 						   buf, sizeof(buf), &result);
4545 			if (len < 0) {
4546 				l2cap_send_disconn_req(chan, ECONNRESET);
4547 				goto done;
4548 			}
4549 
4550 			if (!chan->hs_hcon) {
4551 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4552 							0);
4553 			} else {
4554 				if (l2cap_check_efs(chan)) {
4555 					amp_create_logical_link(chan);
4556 					chan->ident = cmd->ident;
4557 				}
4558 			}
4559 		}
4560 		goto done;
4561 
4562 	case L2CAP_CONF_UNKNOWN:
4563 	case L2CAP_CONF_UNACCEPT:
4564 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4565 			char req[64];
4566 
4567 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4568 				l2cap_send_disconn_req(chan, ECONNRESET);
4569 				goto done;
4570 			}
4571 
4572 			/* throw out any old stored conf requests */
4573 			result = L2CAP_CONF_SUCCESS;
4574 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4575 						   req, sizeof(req), &result);
4576 			if (len < 0) {
4577 				l2cap_send_disconn_req(chan, ECONNRESET);
4578 				goto done;
4579 			}
4580 
4581 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4582 				       L2CAP_CONF_REQ, len, req);
4583 			chan->num_conf_req++;
4584 			if (result != L2CAP_CONF_SUCCESS)
4585 				goto done;
4586 			break;
4587 		}
4588 		fallthrough;
4589 
4590 	default:
4591 		l2cap_chan_set_err(chan, ECONNRESET);
4592 
4593 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4594 		l2cap_send_disconn_req(chan, ECONNRESET);
4595 		goto done;
4596 	}
4597 
4598 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4599 		goto done;
4600 
4601 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4602 
4603 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4604 		set_default_fcs(chan);
4605 
4606 		if (chan->mode == L2CAP_MODE_ERTM ||
4607 		    chan->mode == L2CAP_MODE_STREAMING)
4608 			err = l2cap_ertm_init(chan);
4609 
4610 		if (err < 0)
4611 			l2cap_send_disconn_req(chan, -err);
4612 		else
4613 			l2cap_chan_ready(chan);
4614 	}
4615 
4616 done:
4617 	l2cap_chan_unlock(chan);
4618 	l2cap_chan_put(chan);
4619 	return err;
4620 }
4621 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4622 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4623 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4624 				       u8 *data)
4625 {
4626 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4627 	struct l2cap_disconn_rsp rsp;
4628 	u16 dcid, scid;
4629 	struct l2cap_chan *chan;
4630 
4631 	if (cmd_len != sizeof(*req))
4632 		return -EPROTO;
4633 
4634 	scid = __le16_to_cpu(req->scid);
4635 	dcid = __le16_to_cpu(req->dcid);
4636 
4637 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4638 
4639 	mutex_lock(&conn->chan_lock);
4640 
4641 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4642 	if (!chan) {
4643 		mutex_unlock(&conn->chan_lock);
4644 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4645 		return 0;
4646 	}
4647 
4648 	l2cap_chan_hold(chan);
4649 	l2cap_chan_lock(chan);
4650 
4651 	rsp.dcid = cpu_to_le16(chan->scid);
4652 	rsp.scid = cpu_to_le16(chan->dcid);
4653 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4654 
4655 	chan->ops->set_shutdown(chan);
4656 
4657 	l2cap_chan_del(chan, ECONNRESET);
4658 
4659 	chan->ops->close(chan);
4660 
4661 	l2cap_chan_unlock(chan);
4662 	l2cap_chan_put(chan);
4663 
4664 	mutex_unlock(&conn->chan_lock);
4665 
4666 	return 0;
4667 }
4668 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4669 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4670 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4671 				       u8 *data)
4672 {
4673 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4674 	u16 dcid, scid;
4675 	struct l2cap_chan *chan;
4676 
4677 	if (cmd_len != sizeof(*rsp))
4678 		return -EPROTO;
4679 
4680 	scid = __le16_to_cpu(rsp->scid);
4681 	dcid = __le16_to_cpu(rsp->dcid);
4682 
4683 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4684 
4685 	mutex_lock(&conn->chan_lock);
4686 
4687 	chan = __l2cap_get_chan_by_scid(conn, scid);
4688 	if (!chan) {
4689 		mutex_unlock(&conn->chan_lock);
4690 		return 0;
4691 	}
4692 
4693 	l2cap_chan_hold(chan);
4694 	l2cap_chan_lock(chan);
4695 
4696 	if (chan->state != BT_DISCONN) {
4697 		l2cap_chan_unlock(chan);
4698 		l2cap_chan_put(chan);
4699 		mutex_unlock(&conn->chan_lock);
4700 		return 0;
4701 	}
4702 
4703 	l2cap_chan_del(chan, 0);
4704 
4705 	chan->ops->close(chan);
4706 
4707 	l2cap_chan_unlock(chan);
4708 	l2cap_chan_put(chan);
4709 
4710 	mutex_unlock(&conn->chan_lock);
4711 
4712 	return 0;
4713 }
4714 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4715 static inline int l2cap_information_req(struct l2cap_conn *conn,
4716 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4717 					u8 *data)
4718 {
4719 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4720 	u16 type;
4721 
4722 	if (cmd_len != sizeof(*req))
4723 		return -EPROTO;
4724 
4725 	type = __le16_to_cpu(req->type);
4726 
4727 	BT_DBG("type 0x%4.4x", type);
4728 
4729 	if (type == L2CAP_IT_FEAT_MASK) {
4730 		u8 buf[8];
4731 		u32 feat_mask = l2cap_feat_mask;
4732 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4733 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4734 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4735 		if (!disable_ertm)
4736 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4737 				| L2CAP_FEAT_FCS;
4738 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4739 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4740 				| L2CAP_FEAT_EXT_WINDOW;
4741 
4742 		put_unaligned_le32(feat_mask, rsp->data);
4743 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4744 			       buf);
4745 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4746 		u8 buf[12];
4747 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4748 
4749 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4750 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4751 		rsp->data[0] = conn->local_fixed_chan;
4752 		memset(rsp->data + 1, 0, 7);
4753 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4754 			       buf);
4755 	} else {
4756 		struct l2cap_info_rsp rsp;
4757 		rsp.type   = cpu_to_le16(type);
4758 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4759 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4760 			       &rsp);
4761 	}
4762 
4763 	return 0;
4764 }
4765 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4766 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4767 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4768 					u8 *data)
4769 {
4770 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4771 	u16 type, result;
4772 
4773 	if (cmd_len < sizeof(*rsp))
4774 		return -EPROTO;
4775 
4776 	type   = __le16_to_cpu(rsp->type);
4777 	result = __le16_to_cpu(rsp->result);
4778 
4779 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4780 
4781 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4782 	if (cmd->ident != conn->info_ident ||
4783 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4784 		return 0;
4785 
4786 	cancel_delayed_work(&conn->info_timer);
4787 
4788 	if (result != L2CAP_IR_SUCCESS) {
4789 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4790 		conn->info_ident = 0;
4791 
4792 		l2cap_conn_start(conn);
4793 
4794 		return 0;
4795 	}
4796 
4797 	switch (type) {
4798 	case L2CAP_IT_FEAT_MASK:
4799 		conn->feat_mask = get_unaligned_le32(rsp->data);
4800 
4801 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4802 			struct l2cap_info_req req;
4803 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4804 
4805 			conn->info_ident = l2cap_get_ident(conn);
4806 
4807 			l2cap_send_cmd(conn, conn->info_ident,
4808 				       L2CAP_INFO_REQ, sizeof(req), &req);
4809 		} else {
4810 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4811 			conn->info_ident = 0;
4812 
4813 			l2cap_conn_start(conn);
4814 		}
4815 		break;
4816 
4817 	case L2CAP_IT_FIXED_CHAN:
4818 		conn->remote_fixed_chan = rsp->data[0];
4819 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4820 		conn->info_ident = 0;
4821 
4822 		l2cap_conn_start(conn);
4823 		break;
4824 	}
4825 
4826 	return 0;
4827 }
4828 
l2cap_create_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4829 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4830 				    struct l2cap_cmd_hdr *cmd,
4831 				    u16 cmd_len, void *data)
4832 {
4833 	struct l2cap_create_chan_req *req = data;
4834 	struct l2cap_create_chan_rsp rsp;
4835 	struct l2cap_chan *chan;
4836 	struct hci_dev *hdev;
4837 	u16 psm, scid;
4838 
4839 	if (cmd_len != sizeof(*req))
4840 		return -EPROTO;
4841 
4842 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4843 		return -EINVAL;
4844 
4845 	psm = le16_to_cpu(req->psm);
4846 	scid = le16_to_cpu(req->scid);
4847 
4848 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4849 
4850 	/* For controller id 0 make BR/EDR connection */
4851 	if (req->amp_id == AMP_ID_BREDR) {
4852 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4853 			      req->amp_id);
4854 		return 0;
4855 	}
4856 
4857 	/* Validate AMP controller id */
4858 	hdev = hci_dev_get(req->amp_id);
4859 	if (!hdev)
4860 		goto error;
4861 
4862 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4863 		hci_dev_put(hdev);
4864 		goto error;
4865 	}
4866 
4867 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4868 			     req->amp_id);
4869 	if (chan) {
4870 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4871 		struct hci_conn *hs_hcon;
4872 
4873 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4874 						  &conn->hcon->dst);
4875 		if (!hs_hcon) {
4876 			hci_dev_put(hdev);
4877 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4878 					       chan->dcid);
4879 			return 0;
4880 		}
4881 
4882 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4883 
4884 		mgr->bredr_chan = chan;
4885 		chan->hs_hcon = hs_hcon;
4886 		chan->fcs = L2CAP_FCS_NONE;
4887 		conn->mtu = hdev->block_mtu;
4888 	}
4889 
4890 	hci_dev_put(hdev);
4891 
4892 	return 0;
4893 
4894 error:
4895 	rsp.dcid = 0;
4896 	rsp.scid = cpu_to_le16(scid);
4897 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4898 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4899 
4900 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4901 		       sizeof(rsp), &rsp);
4902 
4903 	return 0;
4904 }
4905 
l2cap_send_move_chan_req(struct l2cap_chan * chan,u8 dest_amp_id)4906 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4907 {
4908 	struct l2cap_move_chan_req req;
4909 	u8 ident;
4910 
4911 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4912 
4913 	ident = l2cap_get_ident(chan->conn);
4914 	chan->ident = ident;
4915 
4916 	req.icid = cpu_to_le16(chan->scid);
4917 	req.dest_amp_id = dest_amp_id;
4918 
4919 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4920 		       &req);
4921 
4922 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4923 }
4924 
l2cap_send_move_chan_rsp(struct l2cap_chan * chan,u16 result)4925 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4926 {
4927 	struct l2cap_move_chan_rsp rsp;
4928 
4929 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4930 
4931 	rsp.icid = cpu_to_le16(chan->dcid);
4932 	rsp.result = cpu_to_le16(result);
4933 
4934 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4935 		       sizeof(rsp), &rsp);
4936 }
4937 
l2cap_send_move_chan_cfm(struct l2cap_chan * chan,u16 result)4938 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4939 {
4940 	struct l2cap_move_chan_cfm cfm;
4941 
4942 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4943 
4944 	chan->ident = l2cap_get_ident(chan->conn);
4945 
4946 	cfm.icid = cpu_to_le16(chan->scid);
4947 	cfm.result = cpu_to_le16(result);
4948 
4949 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4950 		       sizeof(cfm), &cfm);
4951 
4952 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4953 }
4954 
l2cap_send_move_chan_cfm_icid(struct l2cap_conn * conn,u16 icid)4955 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4956 {
4957 	struct l2cap_move_chan_cfm cfm;
4958 
4959 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4960 
4961 	cfm.icid = cpu_to_le16(icid);
4962 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4963 
4964 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4965 		       sizeof(cfm), &cfm);
4966 }
4967 
l2cap_send_move_chan_cfm_rsp(struct l2cap_conn * conn,u8 ident,u16 icid)4968 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4969 					 u16 icid)
4970 {
4971 	struct l2cap_move_chan_cfm_rsp rsp;
4972 
4973 	BT_DBG("icid 0x%4.4x", icid);
4974 
4975 	rsp.icid = cpu_to_le16(icid);
4976 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4977 }
4978 
__release_logical_link(struct l2cap_chan * chan)4979 static void __release_logical_link(struct l2cap_chan *chan)
4980 {
4981 	chan->hs_hchan = NULL;
4982 	chan->hs_hcon = NULL;
4983 
4984 	/* Placeholder - release the logical link */
4985 }
4986 
l2cap_logical_fail(struct l2cap_chan * chan)4987 static void l2cap_logical_fail(struct l2cap_chan *chan)
4988 {
4989 	/* Logical link setup failed */
4990 	if (chan->state != BT_CONNECTED) {
4991 		/* Create channel failure, disconnect */
4992 		l2cap_send_disconn_req(chan, ECONNRESET);
4993 		return;
4994 	}
4995 
4996 	switch (chan->move_role) {
4997 	case L2CAP_MOVE_ROLE_RESPONDER:
4998 		l2cap_move_done(chan);
4999 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
5000 		break;
5001 	case L2CAP_MOVE_ROLE_INITIATOR:
5002 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
5003 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
5004 			/* Remote has only sent pending or
5005 			 * success responses, clean up
5006 			 */
5007 			l2cap_move_done(chan);
5008 		}
5009 
5010 		/* Other amp move states imply that the move
5011 		 * has already aborted
5012 		 */
5013 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5014 		break;
5015 	}
5016 }
5017 
l2cap_logical_finish_create(struct l2cap_chan * chan,struct hci_chan * hchan)5018 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5019 					struct hci_chan *hchan)
5020 {
5021 	struct l2cap_conf_rsp rsp;
5022 
5023 	chan->hs_hchan = hchan;
5024 	chan->hs_hcon->l2cap_data = chan->conn;
5025 
5026 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5027 
5028 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5029 		int err;
5030 
5031 		set_default_fcs(chan);
5032 
5033 		err = l2cap_ertm_init(chan);
5034 		if (err < 0)
5035 			l2cap_send_disconn_req(chan, -err);
5036 		else
5037 			l2cap_chan_ready(chan);
5038 	}
5039 }
5040 
l2cap_logical_finish_move(struct l2cap_chan * chan,struct hci_chan * hchan)5041 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5042 				      struct hci_chan *hchan)
5043 {
5044 	chan->hs_hcon = hchan->conn;
5045 	chan->hs_hcon->l2cap_data = chan->conn;
5046 
5047 	BT_DBG("move_state %d", chan->move_state);
5048 
5049 	switch (chan->move_state) {
5050 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5051 		/* Move confirm will be sent after a success
5052 		 * response is received
5053 		 */
5054 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5055 		break;
5056 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5057 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5058 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5059 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5060 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5061 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5062 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5063 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5064 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5065 		}
5066 		break;
5067 	default:
5068 		/* Move was not in expected state, free the channel */
5069 		__release_logical_link(chan);
5070 
5071 		chan->move_state = L2CAP_MOVE_STABLE;
5072 	}
5073 }
5074 
5075 /* Call with chan locked */
l2cap_logical_cfm(struct l2cap_chan * chan,struct hci_chan * hchan,u8 status)5076 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5077 		       u8 status)
5078 {
5079 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5080 
5081 	if (status) {
5082 		l2cap_logical_fail(chan);
5083 		__release_logical_link(chan);
5084 		return;
5085 	}
5086 
5087 	if (chan->state != BT_CONNECTED) {
5088 		/* Ignore logical link if channel is on BR/EDR */
5089 		if (chan->local_amp_id != AMP_ID_BREDR)
5090 			l2cap_logical_finish_create(chan, hchan);
5091 	} else {
5092 		l2cap_logical_finish_move(chan, hchan);
5093 	}
5094 }
5095 
l2cap_move_start(struct l2cap_chan * chan)5096 void l2cap_move_start(struct l2cap_chan *chan)
5097 {
5098 	BT_DBG("chan %p", chan);
5099 
5100 	if (chan->local_amp_id == AMP_ID_BREDR) {
5101 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5102 			return;
5103 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5104 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5105 		/* Placeholder - start physical link setup */
5106 	} else {
5107 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5108 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5109 		chan->move_id = 0;
5110 		l2cap_move_setup(chan);
5111 		l2cap_send_move_chan_req(chan, 0);
5112 	}
5113 }
5114 
l2cap_do_create(struct l2cap_chan * chan,int result,u8 local_amp_id,u8 remote_amp_id)5115 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5116 			    u8 local_amp_id, u8 remote_amp_id)
5117 {
5118 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5119 	       local_amp_id, remote_amp_id);
5120 
5121 	chan->fcs = L2CAP_FCS_NONE;
5122 
5123 	/* Outgoing channel on AMP */
5124 	if (chan->state == BT_CONNECT) {
5125 		if (result == L2CAP_CR_SUCCESS) {
5126 			chan->local_amp_id = local_amp_id;
5127 			l2cap_send_create_chan_req(chan, remote_amp_id);
5128 		} else {
5129 			/* Revert to BR/EDR connect */
5130 			l2cap_send_conn_req(chan);
5131 		}
5132 
5133 		return;
5134 	}
5135 
5136 	/* Incoming channel on AMP */
5137 	if (__l2cap_no_conn_pending(chan)) {
5138 		struct l2cap_conn_rsp rsp;
5139 		char buf[128];
5140 		rsp.scid = cpu_to_le16(chan->dcid);
5141 		rsp.dcid = cpu_to_le16(chan->scid);
5142 
5143 		if (result == L2CAP_CR_SUCCESS) {
5144 			/* Send successful response */
5145 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5146 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5147 		} else {
5148 			/* Send negative response */
5149 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5150 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5151 		}
5152 
5153 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5154 			       sizeof(rsp), &rsp);
5155 
5156 		if (result == L2CAP_CR_SUCCESS) {
5157 			l2cap_state_change(chan, BT_CONFIG);
5158 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5159 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5160 				       L2CAP_CONF_REQ,
5161 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5162 			chan->num_conf_req++;
5163 		}
5164 	}
5165 }
5166 
l2cap_do_move_initiate(struct l2cap_chan * chan,u8 local_amp_id,u8 remote_amp_id)5167 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5168 				   u8 remote_amp_id)
5169 {
5170 	l2cap_move_setup(chan);
5171 	chan->move_id = local_amp_id;
5172 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5173 
5174 	l2cap_send_move_chan_req(chan, remote_amp_id);
5175 }
5176 
l2cap_do_move_respond(struct l2cap_chan * chan,int result)5177 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5178 {
5179 	struct hci_chan *hchan = NULL;
5180 
5181 	/* Placeholder - get hci_chan for logical link */
5182 
5183 	if (hchan) {
5184 		if (hchan->state == BT_CONNECTED) {
5185 			/* Logical link is ready to go */
5186 			chan->hs_hcon = hchan->conn;
5187 			chan->hs_hcon->l2cap_data = chan->conn;
5188 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5189 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5190 
5191 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5192 		} else {
5193 			/* Wait for logical link to be ready */
5194 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5195 		}
5196 	} else {
5197 		/* Logical link not available */
5198 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5199 	}
5200 }
5201 
l2cap_do_move_cancel(struct l2cap_chan * chan,int result)5202 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5203 {
5204 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5205 		u8 rsp_result;
5206 		if (result == -EINVAL)
5207 			rsp_result = L2CAP_MR_BAD_ID;
5208 		else
5209 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5210 
5211 		l2cap_send_move_chan_rsp(chan, rsp_result);
5212 	}
5213 
5214 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5215 	chan->move_state = L2CAP_MOVE_STABLE;
5216 
5217 	/* Restart data transmission */
5218 	l2cap_ertm_send(chan);
5219 }
5220 
5221 /* Invoke with locked chan */
__l2cap_physical_cfm(struct l2cap_chan * chan,int result)5222 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5223 {
5224 	u8 local_amp_id = chan->local_amp_id;
5225 	u8 remote_amp_id = chan->remote_amp_id;
5226 
5227 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5228 	       chan, result, local_amp_id, remote_amp_id);
5229 
5230 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5231 		return;
5232 
5233 	if (chan->state != BT_CONNECTED) {
5234 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5235 	} else if (result != L2CAP_MR_SUCCESS) {
5236 		l2cap_do_move_cancel(chan, result);
5237 	} else {
5238 		switch (chan->move_role) {
5239 		case L2CAP_MOVE_ROLE_INITIATOR:
5240 			l2cap_do_move_initiate(chan, local_amp_id,
5241 					       remote_amp_id);
5242 			break;
5243 		case L2CAP_MOVE_ROLE_RESPONDER:
5244 			l2cap_do_move_respond(chan, result);
5245 			break;
5246 		default:
5247 			l2cap_do_move_cancel(chan, result);
5248 			break;
5249 		}
5250 	}
5251 }
5252 
l2cap_move_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5253 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5254 					 struct l2cap_cmd_hdr *cmd,
5255 					 u16 cmd_len, void *data)
5256 {
5257 	struct l2cap_move_chan_req *req = data;
5258 	struct l2cap_move_chan_rsp rsp;
5259 	struct l2cap_chan *chan;
5260 	u16 icid = 0;
5261 	u16 result = L2CAP_MR_NOT_ALLOWED;
5262 
5263 	if (cmd_len != sizeof(*req))
5264 		return -EPROTO;
5265 
5266 	icid = le16_to_cpu(req->icid);
5267 
5268 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5269 
5270 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5271 		return -EINVAL;
5272 
5273 	chan = l2cap_get_chan_by_dcid(conn, icid);
5274 	if (!chan) {
5275 		rsp.icid = cpu_to_le16(icid);
5276 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5277 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5278 			       sizeof(rsp), &rsp);
5279 		return 0;
5280 	}
5281 
5282 	chan->ident = cmd->ident;
5283 
5284 	if (chan->scid < L2CAP_CID_DYN_START ||
5285 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5286 	    (chan->mode != L2CAP_MODE_ERTM &&
5287 	     chan->mode != L2CAP_MODE_STREAMING)) {
5288 		result = L2CAP_MR_NOT_ALLOWED;
5289 		goto send_move_response;
5290 	}
5291 
5292 	if (chan->local_amp_id == req->dest_amp_id) {
5293 		result = L2CAP_MR_SAME_ID;
5294 		goto send_move_response;
5295 	}
5296 
5297 	if (req->dest_amp_id != AMP_ID_BREDR) {
5298 		struct hci_dev *hdev;
5299 		hdev = hci_dev_get(req->dest_amp_id);
5300 		if (!hdev || hdev->dev_type != HCI_AMP ||
5301 		    !test_bit(HCI_UP, &hdev->flags)) {
5302 			if (hdev)
5303 				hci_dev_put(hdev);
5304 
5305 			result = L2CAP_MR_BAD_ID;
5306 			goto send_move_response;
5307 		}
5308 		hci_dev_put(hdev);
5309 	}
5310 
5311 	/* Detect a move collision.  Only send a collision response
5312 	 * if this side has "lost", otherwise proceed with the move.
5313 	 * The winner has the larger bd_addr.
5314 	 */
5315 	if ((__chan_is_moving(chan) ||
5316 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5317 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5318 		result = L2CAP_MR_COLLISION;
5319 		goto send_move_response;
5320 	}
5321 
5322 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5323 	l2cap_move_setup(chan);
5324 	chan->move_id = req->dest_amp_id;
5325 
5326 	if (req->dest_amp_id == AMP_ID_BREDR) {
5327 		/* Moving to BR/EDR */
5328 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5329 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5330 			result = L2CAP_MR_PEND;
5331 		} else {
5332 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5333 			result = L2CAP_MR_SUCCESS;
5334 		}
5335 	} else {
5336 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5337 		/* Placeholder - uncomment when amp functions are available */
5338 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5339 		result = L2CAP_MR_PEND;
5340 	}
5341 
5342 send_move_response:
5343 	l2cap_send_move_chan_rsp(chan, result);
5344 
5345 	l2cap_chan_unlock(chan);
5346 	l2cap_chan_put(chan);
5347 
5348 	return 0;
5349 }
5350 
l2cap_move_continue(struct l2cap_conn * conn,u16 icid,u16 result)5351 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5352 {
5353 	struct l2cap_chan *chan;
5354 	struct hci_chan *hchan = NULL;
5355 
5356 	chan = l2cap_get_chan_by_scid(conn, icid);
5357 	if (!chan) {
5358 		l2cap_send_move_chan_cfm_icid(conn, icid);
5359 		return;
5360 	}
5361 
5362 	__clear_chan_timer(chan);
5363 	if (result == L2CAP_MR_PEND)
5364 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5365 
5366 	switch (chan->move_state) {
5367 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5368 		/* Move confirm will be sent when logical link
5369 		 * is complete.
5370 		 */
5371 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5372 		break;
5373 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5374 		if (result == L2CAP_MR_PEND) {
5375 			break;
5376 		} else if (test_bit(CONN_LOCAL_BUSY,
5377 				    &chan->conn_state)) {
5378 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5379 		} else {
5380 			/* Logical link is up or moving to BR/EDR,
5381 			 * proceed with move
5382 			 */
5383 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5384 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5385 		}
5386 		break;
5387 	case L2CAP_MOVE_WAIT_RSP:
5388 		/* Moving to AMP */
5389 		if (result == L2CAP_MR_SUCCESS) {
5390 			/* Remote is ready, send confirm immediately
5391 			 * after logical link is ready
5392 			 */
5393 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5394 		} else {
5395 			/* Both logical link and move success
5396 			 * are required to confirm
5397 			 */
5398 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5399 		}
5400 
5401 		/* Placeholder - get hci_chan for logical link */
5402 		if (!hchan) {
5403 			/* Logical link not available */
5404 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5405 			break;
5406 		}
5407 
5408 		/* If the logical link is not yet connected, do not
5409 		 * send confirmation.
5410 		 */
5411 		if (hchan->state != BT_CONNECTED)
5412 			break;
5413 
5414 		/* Logical link is already ready to go */
5415 
5416 		chan->hs_hcon = hchan->conn;
5417 		chan->hs_hcon->l2cap_data = chan->conn;
5418 
5419 		if (result == L2CAP_MR_SUCCESS) {
5420 			/* Can confirm now */
5421 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5422 		} else {
5423 			/* Now only need move success
5424 			 * to confirm
5425 			 */
5426 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5427 		}
5428 
5429 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5430 		break;
5431 	default:
5432 		/* Any other amp move state means the move failed. */
5433 		chan->move_id = chan->local_amp_id;
5434 		l2cap_move_done(chan);
5435 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5436 	}
5437 
5438 	l2cap_chan_unlock(chan);
5439 	l2cap_chan_put(chan);
5440 }
5441 
l2cap_move_fail(struct l2cap_conn * conn,u8 ident,u16 icid,u16 result)5442 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5443 			    u16 result)
5444 {
5445 	struct l2cap_chan *chan;
5446 
5447 	chan = l2cap_get_chan_by_ident(conn, ident);
5448 	if (!chan) {
5449 		/* Could not locate channel, icid is best guess */
5450 		l2cap_send_move_chan_cfm_icid(conn, icid);
5451 		return;
5452 	}
5453 
5454 	__clear_chan_timer(chan);
5455 
5456 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5457 		if (result == L2CAP_MR_COLLISION) {
5458 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5459 		} else {
5460 			/* Cleanup - cancel move */
5461 			chan->move_id = chan->local_amp_id;
5462 			l2cap_move_done(chan);
5463 		}
5464 	}
5465 
5466 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5467 
5468 	l2cap_chan_unlock(chan);
5469 	l2cap_chan_put(chan);
5470 }
5471 
l2cap_move_channel_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5472 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5473 				  struct l2cap_cmd_hdr *cmd,
5474 				  u16 cmd_len, void *data)
5475 {
5476 	struct l2cap_move_chan_rsp *rsp = data;
5477 	u16 icid, result;
5478 
5479 	if (cmd_len != sizeof(*rsp))
5480 		return -EPROTO;
5481 
5482 	icid = le16_to_cpu(rsp->icid);
5483 	result = le16_to_cpu(rsp->result);
5484 
5485 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5486 
5487 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5488 		l2cap_move_continue(conn, icid, result);
5489 	else
5490 		l2cap_move_fail(conn, cmd->ident, icid, result);
5491 
5492 	return 0;
5493 }
5494 
l2cap_move_channel_confirm(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5495 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5496 				      struct l2cap_cmd_hdr *cmd,
5497 				      u16 cmd_len, void *data)
5498 {
5499 	struct l2cap_move_chan_cfm *cfm = data;
5500 	struct l2cap_chan *chan;
5501 	u16 icid, result;
5502 
5503 	if (cmd_len != sizeof(*cfm))
5504 		return -EPROTO;
5505 
5506 	icid = le16_to_cpu(cfm->icid);
5507 	result = le16_to_cpu(cfm->result);
5508 
5509 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5510 
5511 	chan = l2cap_get_chan_by_dcid(conn, icid);
5512 	if (!chan) {
5513 		/* Spec requires a response even if the icid was not found */
5514 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5515 		return 0;
5516 	}
5517 
5518 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5519 		if (result == L2CAP_MC_CONFIRMED) {
5520 			chan->local_amp_id = chan->move_id;
5521 			if (chan->local_amp_id == AMP_ID_BREDR)
5522 				__release_logical_link(chan);
5523 		} else {
5524 			chan->move_id = chan->local_amp_id;
5525 		}
5526 
5527 		l2cap_move_done(chan);
5528 	}
5529 
5530 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5531 
5532 	l2cap_chan_unlock(chan);
5533 	l2cap_chan_put(chan);
5534 
5535 	return 0;
5536 }
5537 
l2cap_move_channel_confirm_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5538 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5539 						 struct l2cap_cmd_hdr *cmd,
5540 						 u16 cmd_len, void *data)
5541 {
5542 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5543 	struct l2cap_chan *chan;
5544 	u16 icid;
5545 
5546 	if (cmd_len != sizeof(*rsp))
5547 		return -EPROTO;
5548 
5549 	icid = le16_to_cpu(rsp->icid);
5550 
5551 	BT_DBG("icid 0x%4.4x", icid);
5552 
5553 	chan = l2cap_get_chan_by_scid(conn, icid);
5554 	if (!chan)
5555 		return 0;
5556 
5557 	__clear_chan_timer(chan);
5558 
5559 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5560 		chan->local_amp_id = chan->move_id;
5561 
5562 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5563 			__release_logical_link(chan);
5564 
5565 		l2cap_move_done(chan);
5566 	}
5567 
5568 	l2cap_chan_unlock(chan);
5569 	l2cap_chan_put(chan);
5570 
5571 	return 0;
5572 }
5573 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5574 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5575 					      struct l2cap_cmd_hdr *cmd,
5576 					      u16 cmd_len, u8 *data)
5577 {
5578 	struct hci_conn *hcon = conn->hcon;
5579 	struct l2cap_conn_param_update_req *req;
5580 	struct l2cap_conn_param_update_rsp rsp;
5581 	u16 min, max, latency, to_multiplier;
5582 	int err;
5583 
5584 	if (hcon->role != HCI_ROLE_MASTER)
5585 		return -EINVAL;
5586 
5587 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5588 		return -EPROTO;
5589 
5590 	req = (struct l2cap_conn_param_update_req *) data;
5591 	min		= __le16_to_cpu(req->min);
5592 	max		= __le16_to_cpu(req->max);
5593 	latency		= __le16_to_cpu(req->latency);
5594 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5595 
5596 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5597 	       min, max, latency, to_multiplier);
5598 
5599 	memset(&rsp, 0, sizeof(rsp));
5600 
5601 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5602 	if (err)
5603 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5604 	else
5605 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5606 
5607 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5608 		       sizeof(rsp), &rsp);
5609 
5610 	if (!err) {
5611 		u8 store_hint;
5612 
5613 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5614 						to_multiplier);
5615 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5616 				    store_hint, min, max, latency,
5617 				    to_multiplier);
5618 
5619 	}
5620 
5621 	return 0;
5622 }
5623 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5624 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5625 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5626 				u8 *data)
5627 {
5628 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5629 	struct hci_conn *hcon = conn->hcon;
5630 	u16 dcid, mtu, mps, credits, result;
5631 	struct l2cap_chan *chan;
5632 	int err, sec_level;
5633 
5634 	if (cmd_len < sizeof(*rsp))
5635 		return -EPROTO;
5636 
5637 	dcid    = __le16_to_cpu(rsp->dcid);
5638 	mtu     = __le16_to_cpu(rsp->mtu);
5639 	mps     = __le16_to_cpu(rsp->mps);
5640 	credits = __le16_to_cpu(rsp->credits);
5641 	result  = __le16_to_cpu(rsp->result);
5642 
5643 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5644 					   dcid < L2CAP_CID_DYN_START ||
5645 					   dcid > L2CAP_CID_LE_DYN_END))
5646 		return -EPROTO;
5647 
5648 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5649 	       dcid, mtu, mps, credits, result);
5650 
5651 	mutex_lock(&conn->chan_lock);
5652 
5653 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5654 	if (!chan) {
5655 		err = -EBADSLT;
5656 		goto unlock;
5657 	}
5658 
5659 	err = 0;
5660 
5661 	l2cap_chan_lock(chan);
5662 
5663 	switch (result) {
5664 	case L2CAP_CR_LE_SUCCESS:
5665 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5666 			err = -EBADSLT;
5667 			break;
5668 		}
5669 
5670 		chan->ident = 0;
5671 		chan->dcid = dcid;
5672 		chan->omtu = mtu;
5673 		chan->remote_mps = mps;
5674 		chan->tx_credits = credits;
5675 		l2cap_chan_ready(chan);
5676 		break;
5677 
5678 	case L2CAP_CR_LE_AUTHENTICATION:
5679 	case L2CAP_CR_LE_ENCRYPTION:
5680 		/* If we already have MITM protection we can't do
5681 		 * anything.
5682 		 */
5683 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5684 			l2cap_chan_del(chan, ECONNREFUSED);
5685 			break;
5686 		}
5687 
5688 		sec_level = hcon->sec_level + 1;
5689 		if (chan->sec_level < sec_level)
5690 			chan->sec_level = sec_level;
5691 
5692 		/* We'll need to send a new Connect Request */
5693 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5694 
5695 		smp_conn_security(hcon, chan->sec_level);
5696 		break;
5697 
5698 	default:
5699 		l2cap_chan_del(chan, ECONNREFUSED);
5700 		break;
5701 	}
5702 
5703 	l2cap_chan_unlock(chan);
5704 
5705 unlock:
5706 	mutex_unlock(&conn->chan_lock);
5707 
5708 	return err;
5709 }
5710 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5711 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5712 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5713 				      u8 *data)
5714 {
5715 	int err = 0;
5716 
5717 	switch (cmd->code) {
5718 	case L2CAP_COMMAND_REJ:
5719 		l2cap_command_rej(conn, cmd, cmd_len, data);
5720 		break;
5721 
5722 	case L2CAP_CONN_REQ:
5723 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5724 		break;
5725 
5726 	case L2CAP_CONN_RSP:
5727 	case L2CAP_CREATE_CHAN_RSP:
5728 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5729 		break;
5730 
5731 	case L2CAP_CONF_REQ:
5732 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5733 		break;
5734 
5735 	case L2CAP_CONF_RSP:
5736 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5737 		break;
5738 
5739 	case L2CAP_DISCONN_REQ:
5740 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5741 		break;
5742 
5743 	case L2CAP_DISCONN_RSP:
5744 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5745 		break;
5746 
5747 	case L2CAP_ECHO_REQ:
5748 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5749 		break;
5750 
5751 	case L2CAP_ECHO_RSP:
5752 		break;
5753 
5754 	case L2CAP_INFO_REQ:
5755 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5756 		break;
5757 
5758 	case L2CAP_INFO_RSP:
5759 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5760 		break;
5761 
5762 	case L2CAP_CREATE_CHAN_REQ:
5763 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5764 		break;
5765 
5766 	case L2CAP_MOVE_CHAN_REQ:
5767 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5768 		break;
5769 
5770 	case L2CAP_MOVE_CHAN_RSP:
5771 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5772 		break;
5773 
5774 	case L2CAP_MOVE_CHAN_CFM:
5775 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5776 		break;
5777 
5778 	case L2CAP_MOVE_CHAN_CFM_RSP:
5779 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5780 		break;
5781 
5782 	default:
5783 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5784 		err = -EINVAL;
5785 		break;
5786 	}
5787 
5788 	return err;
5789 }
5790 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5791 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5792 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5793 				u8 *data)
5794 {
5795 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5796 	struct l2cap_le_conn_rsp rsp;
5797 	struct l2cap_chan *chan, *pchan;
5798 	u16 dcid, scid, credits, mtu, mps;
5799 	__le16 psm;
5800 	u8 result;
5801 
5802 	if (cmd_len != sizeof(*req))
5803 		return -EPROTO;
5804 
5805 	scid = __le16_to_cpu(req->scid);
5806 	mtu  = __le16_to_cpu(req->mtu);
5807 	mps  = __le16_to_cpu(req->mps);
5808 	psm  = req->psm;
5809 	dcid = 0;
5810 	credits = 0;
5811 
5812 	if (mtu < 23 || mps < 23)
5813 		return -EPROTO;
5814 
5815 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5816 	       scid, mtu, mps);
5817 
5818 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5819 	 * page 1059:
5820 	 *
5821 	 * Valid range: 0x0001-0x00ff
5822 	 *
5823 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5824 	 */
5825 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5826 		result = L2CAP_CR_LE_BAD_PSM;
5827 		chan = NULL;
5828 		goto response;
5829 	}
5830 
5831 	/* Check if we have socket listening on psm */
5832 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5833 					 &conn->hcon->dst, LE_LINK);
5834 	if (!pchan) {
5835 		result = L2CAP_CR_LE_BAD_PSM;
5836 		chan = NULL;
5837 		goto response;
5838 	}
5839 
5840 	mutex_lock(&conn->chan_lock);
5841 	l2cap_chan_lock(pchan);
5842 
5843 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5844 				     SMP_ALLOW_STK)) {
5845 		result = L2CAP_CR_LE_AUTHENTICATION;
5846 		chan = NULL;
5847 		goto response_unlock;
5848 	}
5849 
5850 	/* Check for valid dynamic CID range */
5851 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5852 		result = L2CAP_CR_LE_INVALID_SCID;
5853 		chan = NULL;
5854 		goto response_unlock;
5855 	}
5856 
5857 	/* Check if we already have channel with that dcid */
5858 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5859 		result = L2CAP_CR_LE_SCID_IN_USE;
5860 		chan = NULL;
5861 		goto response_unlock;
5862 	}
5863 
5864 	chan = pchan->ops->new_connection(pchan);
5865 	if (!chan) {
5866 		result = L2CAP_CR_LE_NO_MEM;
5867 		goto response_unlock;
5868 	}
5869 
5870 	bacpy(&chan->src, &conn->hcon->src);
5871 	bacpy(&chan->dst, &conn->hcon->dst);
5872 	chan->src_type = bdaddr_src_type(conn->hcon);
5873 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5874 	chan->psm  = psm;
5875 	chan->dcid = scid;
5876 	chan->omtu = mtu;
5877 	chan->remote_mps = mps;
5878 
5879 	__l2cap_chan_add(conn, chan);
5880 
5881 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5882 
5883 	dcid = chan->scid;
5884 	credits = chan->rx_credits;
5885 
5886 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5887 
5888 	chan->ident = cmd->ident;
5889 
5890 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5891 		l2cap_state_change(chan, BT_CONNECT2);
5892 		/* The following result value is actually not defined
5893 		 * for LE CoC but we use it to let the function know
5894 		 * that it should bail out after doing its cleanup
5895 		 * instead of sending a response.
5896 		 */
5897 		result = L2CAP_CR_PEND;
5898 		chan->ops->defer(chan);
5899 	} else {
5900 		l2cap_chan_ready(chan);
5901 		result = L2CAP_CR_LE_SUCCESS;
5902 	}
5903 
5904 response_unlock:
5905 	l2cap_chan_unlock(pchan);
5906 	mutex_unlock(&conn->chan_lock);
5907 	l2cap_chan_put(pchan);
5908 
5909 	if (result == L2CAP_CR_PEND)
5910 		return 0;
5911 
5912 response:
5913 	if (chan) {
5914 		rsp.mtu = cpu_to_le16(chan->imtu);
5915 		rsp.mps = cpu_to_le16(chan->mps);
5916 	} else {
5917 		rsp.mtu = 0;
5918 		rsp.mps = 0;
5919 	}
5920 
5921 	rsp.dcid    = cpu_to_le16(dcid);
5922 	rsp.credits = cpu_to_le16(credits);
5923 	rsp.result  = cpu_to_le16(result);
5924 
5925 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5926 
5927 	return 0;
5928 }
5929 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5930 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5931 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5932 				   u8 *data)
5933 {
5934 	struct l2cap_le_credits *pkt;
5935 	struct l2cap_chan *chan;
5936 	u16 cid, credits, max_credits;
5937 
5938 	if (cmd_len != sizeof(*pkt))
5939 		return -EPROTO;
5940 
5941 	pkt = (struct l2cap_le_credits *) data;
5942 	cid	= __le16_to_cpu(pkt->cid);
5943 	credits	= __le16_to_cpu(pkt->credits);
5944 
5945 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5946 
5947 	chan = l2cap_get_chan_by_dcid(conn, cid);
5948 	if (!chan)
5949 		return -EBADSLT;
5950 
5951 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5952 	if (credits > max_credits) {
5953 		BT_ERR("LE credits overflow");
5954 		l2cap_send_disconn_req(chan, ECONNRESET);
5955 
5956 		/* Return 0 so that we don't trigger an unnecessary
5957 		 * command reject packet.
5958 		 */
5959 		goto unlock;
5960 	}
5961 
5962 	chan->tx_credits += credits;
5963 
5964 	/* Resume sending */
5965 	l2cap_le_flowctl_send(chan);
5966 
5967 	if (chan->tx_credits)
5968 		chan->ops->resume(chan);
5969 
5970 unlock:
5971 	l2cap_chan_unlock(chan);
5972 	l2cap_chan_put(chan);
5973 
5974 	return 0;
5975 }
5976 
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5977 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5978 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5979 				       u8 *data)
5980 {
5981 	struct l2cap_ecred_conn_req *req = (void *) data;
5982 	struct {
5983 		struct l2cap_ecred_conn_rsp rsp;
5984 		__le16 dcid[L2CAP_ECRED_MAX_CID];
5985 	} __packed pdu;
5986 	struct l2cap_chan *chan, *pchan;
5987 	u16 mtu, mps;
5988 	__le16 psm;
5989 	u8 result, len = 0;
5990 	int i, num_scid;
5991 	bool defer = false;
5992 
5993 	if (!enable_ecred)
5994 		return -EINVAL;
5995 
5996 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5997 		result = L2CAP_CR_LE_INVALID_PARAMS;
5998 		goto response;
5999 	}
6000 
6001 	cmd_len -= sizeof(*req);
6002 	num_scid = cmd_len / sizeof(u16);
6003 
6004 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
6005 		result = L2CAP_CR_LE_INVALID_PARAMS;
6006 		goto response;
6007 	}
6008 
6009 	mtu  = __le16_to_cpu(req->mtu);
6010 	mps  = __le16_to_cpu(req->mps);
6011 
6012 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
6013 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
6014 		goto response;
6015 	}
6016 
6017 	psm  = req->psm;
6018 
6019 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
6020 	 * page 1059:
6021 	 *
6022 	 * Valid range: 0x0001-0x00ff
6023 	 *
6024 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
6025 	 */
6026 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
6027 		result = L2CAP_CR_LE_BAD_PSM;
6028 		goto response;
6029 	}
6030 
6031 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6032 
6033 	memset(&pdu, 0, sizeof(pdu));
6034 
6035 	/* Check if we have socket listening on psm */
6036 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6037 					 &conn->hcon->dst, LE_LINK);
6038 	if (!pchan) {
6039 		result = L2CAP_CR_LE_BAD_PSM;
6040 		goto response;
6041 	}
6042 
6043 	mutex_lock(&conn->chan_lock);
6044 	l2cap_chan_lock(pchan);
6045 
6046 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6047 				     SMP_ALLOW_STK)) {
6048 		result = L2CAP_CR_LE_AUTHENTICATION;
6049 		goto unlock;
6050 	}
6051 
6052 	result = L2CAP_CR_LE_SUCCESS;
6053 
6054 	for (i = 0; i < num_scid; i++) {
6055 		u16 scid = __le16_to_cpu(req->scid[i]);
6056 
6057 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6058 
6059 		pdu.dcid[i] = 0x0000;
6060 		len += sizeof(*pdu.dcid);
6061 
6062 		/* Check for valid dynamic CID range */
6063 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6064 			result = L2CAP_CR_LE_INVALID_SCID;
6065 			continue;
6066 		}
6067 
6068 		/* Check if we already have channel with that dcid */
6069 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6070 			result = L2CAP_CR_LE_SCID_IN_USE;
6071 			continue;
6072 		}
6073 
6074 		chan = pchan->ops->new_connection(pchan);
6075 		if (!chan) {
6076 			result = L2CAP_CR_LE_NO_MEM;
6077 			continue;
6078 		}
6079 
6080 		bacpy(&chan->src, &conn->hcon->src);
6081 		bacpy(&chan->dst, &conn->hcon->dst);
6082 		chan->src_type = bdaddr_src_type(conn->hcon);
6083 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6084 		chan->psm  = psm;
6085 		chan->dcid = scid;
6086 		chan->omtu = mtu;
6087 		chan->remote_mps = mps;
6088 
6089 		__l2cap_chan_add(conn, chan);
6090 
6091 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6092 
6093 		/* Init response */
6094 		if (!pdu.rsp.credits) {
6095 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6096 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6097 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6098 		}
6099 
6100 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6101 
6102 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6103 
6104 		chan->ident = cmd->ident;
6105 
6106 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6107 			l2cap_state_change(chan, BT_CONNECT2);
6108 			defer = true;
6109 			chan->ops->defer(chan);
6110 		} else {
6111 			l2cap_chan_ready(chan);
6112 		}
6113 	}
6114 
6115 unlock:
6116 	l2cap_chan_unlock(pchan);
6117 	mutex_unlock(&conn->chan_lock);
6118 	l2cap_chan_put(pchan);
6119 
6120 response:
6121 	pdu.rsp.result = cpu_to_le16(result);
6122 
6123 	if (defer)
6124 		return 0;
6125 
6126 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6127 		       sizeof(pdu.rsp) + len, &pdu);
6128 
6129 	return 0;
6130 }
6131 
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6132 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6133 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6134 				       u8 *data)
6135 {
6136 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6137 	struct hci_conn *hcon = conn->hcon;
6138 	u16 mtu, mps, credits, result;
6139 	struct l2cap_chan *chan, *tmp;
6140 	int err = 0, sec_level;
6141 	int i = 0;
6142 
6143 	if (cmd_len < sizeof(*rsp))
6144 		return -EPROTO;
6145 
6146 	mtu     = __le16_to_cpu(rsp->mtu);
6147 	mps     = __le16_to_cpu(rsp->mps);
6148 	credits = __le16_to_cpu(rsp->credits);
6149 	result  = __le16_to_cpu(rsp->result);
6150 
6151 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6152 	       result);
6153 
6154 	mutex_lock(&conn->chan_lock);
6155 
6156 	cmd_len -= sizeof(*rsp);
6157 
6158 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6159 		u16 dcid;
6160 
6161 		if (chan->ident != cmd->ident ||
6162 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6163 		    chan->state == BT_CONNECTED)
6164 			continue;
6165 
6166 		l2cap_chan_lock(chan);
6167 
6168 		/* Check that there is a dcid for each pending channel */
6169 		if (cmd_len < sizeof(dcid)) {
6170 			l2cap_chan_del(chan, ECONNREFUSED);
6171 			l2cap_chan_unlock(chan);
6172 			continue;
6173 		}
6174 
6175 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6176 		cmd_len -= sizeof(u16);
6177 
6178 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6179 
6180 		/* Check if dcid is already in use */
6181 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6182 			/* If a device receives a
6183 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6184 			 * already-assigned Destination CID, then both the
6185 			 * original channel and the new channel shall be
6186 			 * immediately discarded and not used.
6187 			 */
6188 			l2cap_chan_del(chan, ECONNREFUSED);
6189 			l2cap_chan_unlock(chan);
6190 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6191 			l2cap_chan_lock(chan);
6192 			l2cap_chan_del(chan, ECONNRESET);
6193 			l2cap_chan_unlock(chan);
6194 			continue;
6195 		}
6196 
6197 		switch (result) {
6198 		case L2CAP_CR_LE_AUTHENTICATION:
6199 		case L2CAP_CR_LE_ENCRYPTION:
6200 			/* If we already have MITM protection we can't do
6201 			 * anything.
6202 			 */
6203 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6204 				l2cap_chan_del(chan, ECONNREFUSED);
6205 				break;
6206 			}
6207 
6208 			sec_level = hcon->sec_level + 1;
6209 			if (chan->sec_level < sec_level)
6210 				chan->sec_level = sec_level;
6211 
6212 			/* We'll need to send a new Connect Request */
6213 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6214 
6215 			smp_conn_security(hcon, chan->sec_level);
6216 			break;
6217 
6218 		case L2CAP_CR_LE_BAD_PSM:
6219 			l2cap_chan_del(chan, ECONNREFUSED);
6220 			break;
6221 
6222 		default:
6223 			/* If dcid was not set it means channels was refused */
6224 			if (!dcid) {
6225 				l2cap_chan_del(chan, ECONNREFUSED);
6226 				break;
6227 			}
6228 
6229 			chan->ident = 0;
6230 			chan->dcid = dcid;
6231 			chan->omtu = mtu;
6232 			chan->remote_mps = mps;
6233 			chan->tx_credits = credits;
6234 			l2cap_chan_ready(chan);
6235 			break;
6236 		}
6237 
6238 		l2cap_chan_unlock(chan);
6239 	}
6240 
6241 	mutex_unlock(&conn->chan_lock);
6242 
6243 	return err;
6244 }
6245 
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6246 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6247 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6248 					 u8 *data)
6249 {
6250 	struct l2cap_ecred_reconf_req *req = (void *) data;
6251 	struct l2cap_ecred_reconf_rsp rsp;
6252 	u16 mtu, mps, result;
6253 	struct l2cap_chan *chan;
6254 	int i, num_scid;
6255 
6256 	if (!enable_ecred)
6257 		return -EINVAL;
6258 
6259 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6260 		result = L2CAP_CR_LE_INVALID_PARAMS;
6261 		goto respond;
6262 	}
6263 
6264 	mtu = __le16_to_cpu(req->mtu);
6265 	mps = __le16_to_cpu(req->mps);
6266 
6267 	BT_DBG("mtu %u mps %u", mtu, mps);
6268 
6269 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6270 		result = L2CAP_RECONF_INVALID_MTU;
6271 		goto respond;
6272 	}
6273 
6274 	if (mps < L2CAP_ECRED_MIN_MPS) {
6275 		result = L2CAP_RECONF_INVALID_MPS;
6276 		goto respond;
6277 	}
6278 
6279 	cmd_len -= sizeof(*req);
6280 	num_scid = cmd_len / sizeof(u16);
6281 	result = L2CAP_RECONF_SUCCESS;
6282 
6283 	for (i = 0; i < num_scid; i++) {
6284 		u16 scid;
6285 
6286 		scid = __le16_to_cpu(req->scid[i]);
6287 		if (!scid)
6288 			return -EPROTO;
6289 
6290 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6291 		if (!chan)
6292 			continue;
6293 
6294 		/* If the MTU value is decreased for any of the included
6295 		 * channels, then the receiver shall disconnect all
6296 		 * included channels.
6297 		 */
6298 		if (chan->omtu > mtu) {
6299 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6300 			       chan->omtu, mtu);
6301 			result = L2CAP_RECONF_INVALID_MTU;
6302 		}
6303 
6304 		chan->omtu = mtu;
6305 		chan->remote_mps = mps;
6306 	}
6307 
6308 respond:
6309 	rsp.result = cpu_to_le16(result);
6310 
6311 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6312 		       &rsp);
6313 
6314 	return 0;
6315 }
6316 
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6317 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6318 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6319 					 u8 *data)
6320 {
6321 	struct l2cap_chan *chan, *tmp;
6322 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6323 	u16 result;
6324 
6325 	if (cmd_len < sizeof(*rsp))
6326 		return -EPROTO;
6327 
6328 	result = __le16_to_cpu(rsp->result);
6329 
6330 	BT_DBG("result 0x%4.4x", rsp->result);
6331 
6332 	if (!result)
6333 		return 0;
6334 
6335 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6336 		if (chan->ident != cmd->ident)
6337 			continue;
6338 
6339 		l2cap_chan_del(chan, ECONNRESET);
6340 	}
6341 
6342 	return 0;
6343 }
6344 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6345 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6346 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6347 				       u8 *data)
6348 {
6349 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6350 	struct l2cap_chan *chan;
6351 
6352 	if (cmd_len < sizeof(*rej))
6353 		return -EPROTO;
6354 
6355 	mutex_lock(&conn->chan_lock);
6356 
6357 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6358 	if (!chan)
6359 		goto done;
6360 
6361 	l2cap_chan_lock(chan);
6362 	l2cap_chan_del(chan, ECONNREFUSED);
6363 	l2cap_chan_unlock(chan);
6364 
6365 done:
6366 	mutex_unlock(&conn->chan_lock);
6367 	return 0;
6368 }
6369 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6370 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6371 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6372 				   u8 *data)
6373 {
6374 	int err = 0;
6375 
6376 	switch (cmd->code) {
6377 	case L2CAP_COMMAND_REJ:
6378 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6379 		break;
6380 
6381 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6382 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6383 		break;
6384 
6385 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6386 		break;
6387 
6388 	case L2CAP_LE_CONN_RSP:
6389 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6390 		break;
6391 
6392 	case L2CAP_LE_CONN_REQ:
6393 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6394 		break;
6395 
6396 	case L2CAP_LE_CREDITS:
6397 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6398 		break;
6399 
6400 	case L2CAP_ECRED_CONN_REQ:
6401 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6402 		break;
6403 
6404 	case L2CAP_ECRED_CONN_RSP:
6405 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6406 		break;
6407 
6408 	case L2CAP_ECRED_RECONF_REQ:
6409 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6410 		break;
6411 
6412 	case L2CAP_ECRED_RECONF_RSP:
6413 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6414 		break;
6415 
6416 	case L2CAP_DISCONN_REQ:
6417 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6418 		break;
6419 
6420 	case L2CAP_DISCONN_RSP:
6421 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6422 		break;
6423 
6424 	default:
6425 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6426 		err = -EINVAL;
6427 		break;
6428 	}
6429 
6430 	return err;
6431 }
6432 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6433 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6434 					struct sk_buff *skb)
6435 {
6436 	struct hci_conn *hcon = conn->hcon;
6437 	struct l2cap_cmd_hdr *cmd;
6438 	u16 len;
6439 	int err;
6440 
6441 	if (hcon->type != LE_LINK)
6442 		goto drop;
6443 
6444 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6445 		goto drop;
6446 
6447 	cmd = (void *) skb->data;
6448 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6449 
6450 	len = le16_to_cpu(cmd->len);
6451 
6452 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6453 
6454 	if (len != skb->len || !cmd->ident) {
6455 		BT_DBG("corrupted command");
6456 		goto drop;
6457 	}
6458 
6459 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6460 	if (err) {
6461 		struct l2cap_cmd_rej_unk rej;
6462 
6463 		BT_ERR("Wrong link type (%d)", err);
6464 
6465 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6466 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6467 			       sizeof(rej), &rej);
6468 	}
6469 
6470 drop:
6471 	kfree_skb(skb);
6472 }
6473 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6474 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6475 				     struct sk_buff *skb)
6476 {
6477 	struct hci_conn *hcon = conn->hcon;
6478 	struct l2cap_cmd_hdr *cmd;
6479 	int err;
6480 
6481 	l2cap_raw_recv(conn, skb);
6482 
6483 	if (hcon->type != ACL_LINK)
6484 		goto drop;
6485 
6486 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6487 		u16 len;
6488 
6489 		cmd = (void *) skb->data;
6490 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6491 
6492 		len = le16_to_cpu(cmd->len);
6493 
6494 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6495 		       cmd->ident);
6496 
6497 		if (len > skb->len || !cmd->ident) {
6498 			BT_DBG("corrupted command");
6499 			break;
6500 		}
6501 
6502 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6503 		if (err) {
6504 			struct l2cap_cmd_rej_unk rej;
6505 
6506 			BT_ERR("Wrong link type (%d)", err);
6507 
6508 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6509 			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6510 				       sizeof(rej), &rej);
6511 		}
6512 
6513 		skb_pull(skb, len);
6514 	}
6515 
6516 drop:
6517 	kfree_skb(skb);
6518 }
6519 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)6520 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6521 {
6522 	u16 our_fcs, rcv_fcs;
6523 	int hdr_size;
6524 
6525 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6526 		hdr_size = L2CAP_EXT_HDR_SIZE;
6527 	else
6528 		hdr_size = L2CAP_ENH_HDR_SIZE;
6529 
6530 	if (chan->fcs == L2CAP_FCS_CRC16) {
6531 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6532 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6533 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6534 
6535 		if (our_fcs != rcv_fcs)
6536 			return -EBADMSG;
6537 	}
6538 	return 0;
6539 }
6540 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)6541 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6542 {
6543 	struct l2cap_ctrl control;
6544 
6545 	BT_DBG("chan %p", chan);
6546 
6547 	memset(&control, 0, sizeof(control));
6548 	control.sframe = 1;
6549 	control.final = 1;
6550 	control.reqseq = chan->buffer_seq;
6551 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6552 
6553 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6554 		control.super = L2CAP_SUPER_RNR;
6555 		l2cap_send_sframe(chan, &control);
6556 	}
6557 
6558 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6559 	    chan->unacked_frames > 0)
6560 		__set_retrans_timer(chan);
6561 
6562 	/* Send pending iframes */
6563 	l2cap_ertm_send(chan);
6564 
6565 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6566 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6567 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6568 		 * send it now.
6569 		 */
6570 		control.super = L2CAP_SUPER_RR;
6571 		l2cap_send_sframe(chan, &control);
6572 	}
6573 }
6574 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)6575 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6576 			    struct sk_buff **last_frag)
6577 {
6578 	/* skb->len reflects data in skb as well as all fragments
6579 	 * skb->data_len reflects only data in fragments
6580 	 */
6581 	if (!skb_has_frag_list(skb))
6582 		skb_shinfo(skb)->frag_list = new_frag;
6583 
6584 	new_frag->next = NULL;
6585 
6586 	(*last_frag)->next = new_frag;
6587 	*last_frag = new_frag;
6588 
6589 	skb->len += new_frag->len;
6590 	skb->data_len += new_frag->len;
6591 	skb->truesize += new_frag->truesize;
6592 }
6593 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)6594 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6595 				struct l2cap_ctrl *control)
6596 {
6597 	int err = -EINVAL;
6598 
6599 	switch (control->sar) {
6600 	case L2CAP_SAR_UNSEGMENTED:
6601 		if (chan->sdu)
6602 			break;
6603 
6604 		err = chan->ops->recv(chan, skb);
6605 		break;
6606 
6607 	case L2CAP_SAR_START:
6608 		if (chan->sdu)
6609 			break;
6610 
6611 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6612 			break;
6613 
6614 		chan->sdu_len = get_unaligned_le16(skb->data);
6615 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6616 
6617 		if (chan->sdu_len > chan->imtu) {
6618 			err = -EMSGSIZE;
6619 			break;
6620 		}
6621 
6622 		if (skb->len >= chan->sdu_len)
6623 			break;
6624 
6625 		chan->sdu = skb;
6626 		chan->sdu_last_frag = skb;
6627 
6628 		skb = NULL;
6629 		err = 0;
6630 		break;
6631 
6632 	case L2CAP_SAR_CONTINUE:
6633 		if (!chan->sdu)
6634 			break;
6635 
6636 		append_skb_frag(chan->sdu, skb,
6637 				&chan->sdu_last_frag);
6638 		skb = NULL;
6639 
6640 		if (chan->sdu->len >= chan->sdu_len)
6641 			break;
6642 
6643 		err = 0;
6644 		break;
6645 
6646 	case L2CAP_SAR_END:
6647 		if (!chan->sdu)
6648 			break;
6649 
6650 		append_skb_frag(chan->sdu, skb,
6651 				&chan->sdu_last_frag);
6652 		skb = NULL;
6653 
6654 		if (chan->sdu->len != chan->sdu_len)
6655 			break;
6656 
6657 		err = chan->ops->recv(chan, chan->sdu);
6658 
6659 		if (!err) {
6660 			/* Reassembly complete */
6661 			chan->sdu = NULL;
6662 			chan->sdu_last_frag = NULL;
6663 			chan->sdu_len = 0;
6664 		}
6665 		break;
6666 	}
6667 
6668 	if (err) {
6669 		kfree_skb(skb);
6670 		kfree_skb(chan->sdu);
6671 		chan->sdu = NULL;
6672 		chan->sdu_last_frag = NULL;
6673 		chan->sdu_len = 0;
6674 	}
6675 
6676 	return err;
6677 }
6678 
l2cap_resegment(struct l2cap_chan * chan)6679 static int l2cap_resegment(struct l2cap_chan *chan)
6680 {
6681 	/* Placeholder */
6682 	return 0;
6683 }
6684 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)6685 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6686 {
6687 	u8 event;
6688 
6689 	if (chan->mode != L2CAP_MODE_ERTM)
6690 		return;
6691 
6692 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6693 	l2cap_tx(chan, NULL, NULL, event);
6694 }
6695 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)6696 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6697 {
6698 	int err = 0;
6699 	/* Pass sequential frames to l2cap_reassemble_sdu()
6700 	 * until a gap is encountered.
6701 	 */
6702 
6703 	BT_DBG("chan %p", chan);
6704 
6705 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6706 		struct sk_buff *skb;
6707 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6708 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6709 
6710 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6711 
6712 		if (!skb)
6713 			break;
6714 
6715 		skb_unlink(skb, &chan->srej_q);
6716 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6717 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6718 		if (err)
6719 			break;
6720 	}
6721 
6722 	if (skb_queue_empty(&chan->srej_q)) {
6723 		chan->rx_state = L2CAP_RX_STATE_RECV;
6724 		l2cap_send_ack(chan);
6725 	}
6726 
6727 	return err;
6728 }
6729 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6730 static void l2cap_handle_srej(struct l2cap_chan *chan,
6731 			      struct l2cap_ctrl *control)
6732 {
6733 	struct sk_buff *skb;
6734 
6735 	BT_DBG("chan %p, control %p", chan, control);
6736 
6737 	if (control->reqseq == chan->next_tx_seq) {
6738 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6739 		l2cap_send_disconn_req(chan, ECONNRESET);
6740 		return;
6741 	}
6742 
6743 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6744 
6745 	if (skb == NULL) {
6746 		BT_DBG("Seq %d not available for retransmission",
6747 		       control->reqseq);
6748 		return;
6749 	}
6750 
6751 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6752 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6753 		l2cap_send_disconn_req(chan, ECONNRESET);
6754 		return;
6755 	}
6756 
6757 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6758 
6759 	if (control->poll) {
6760 		l2cap_pass_to_tx(chan, control);
6761 
6762 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6763 		l2cap_retransmit(chan, control);
6764 		l2cap_ertm_send(chan);
6765 
6766 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6767 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6768 			chan->srej_save_reqseq = control->reqseq;
6769 		}
6770 	} else {
6771 		l2cap_pass_to_tx_fbit(chan, control);
6772 
6773 		if (control->final) {
6774 			if (chan->srej_save_reqseq != control->reqseq ||
6775 			    !test_and_clear_bit(CONN_SREJ_ACT,
6776 						&chan->conn_state))
6777 				l2cap_retransmit(chan, control);
6778 		} else {
6779 			l2cap_retransmit(chan, control);
6780 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6781 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6782 				chan->srej_save_reqseq = control->reqseq;
6783 			}
6784 		}
6785 	}
6786 }
6787 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6788 static void l2cap_handle_rej(struct l2cap_chan *chan,
6789 			     struct l2cap_ctrl *control)
6790 {
6791 	struct sk_buff *skb;
6792 
6793 	BT_DBG("chan %p, control %p", chan, control);
6794 
6795 	if (control->reqseq == chan->next_tx_seq) {
6796 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6797 		l2cap_send_disconn_req(chan, ECONNRESET);
6798 		return;
6799 	}
6800 
6801 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6802 
6803 	if (chan->max_tx && skb &&
6804 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6805 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6806 		l2cap_send_disconn_req(chan, ECONNRESET);
6807 		return;
6808 	}
6809 
6810 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6811 
6812 	l2cap_pass_to_tx(chan, control);
6813 
6814 	if (control->final) {
6815 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6816 			l2cap_retransmit_all(chan, control);
6817 	} else {
6818 		l2cap_retransmit_all(chan, control);
6819 		l2cap_ertm_send(chan);
6820 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6821 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6822 	}
6823 }
6824 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)6825 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6826 {
6827 	BT_DBG("chan %p, txseq %d", chan, txseq);
6828 
6829 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6830 	       chan->expected_tx_seq);
6831 
6832 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6833 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6834 		    chan->tx_win) {
6835 			/* See notes below regarding "double poll" and
6836 			 * invalid packets.
6837 			 */
6838 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6839 				BT_DBG("Invalid/Ignore - after SREJ");
6840 				return L2CAP_TXSEQ_INVALID_IGNORE;
6841 			} else {
6842 				BT_DBG("Invalid - in window after SREJ sent");
6843 				return L2CAP_TXSEQ_INVALID;
6844 			}
6845 		}
6846 
6847 		if (chan->srej_list.head == txseq) {
6848 			BT_DBG("Expected SREJ");
6849 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6850 		}
6851 
6852 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6853 			BT_DBG("Duplicate SREJ - txseq already stored");
6854 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6855 		}
6856 
6857 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6858 			BT_DBG("Unexpected SREJ - not requested");
6859 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6860 		}
6861 	}
6862 
6863 	if (chan->expected_tx_seq == txseq) {
6864 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6865 		    chan->tx_win) {
6866 			BT_DBG("Invalid - txseq outside tx window");
6867 			return L2CAP_TXSEQ_INVALID;
6868 		} else {
6869 			BT_DBG("Expected");
6870 			return L2CAP_TXSEQ_EXPECTED;
6871 		}
6872 	}
6873 
6874 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6875 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6876 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6877 		return L2CAP_TXSEQ_DUPLICATE;
6878 	}
6879 
6880 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6881 		/* A source of invalid packets is a "double poll" condition,
6882 		 * where delays cause us to send multiple poll packets.  If
6883 		 * the remote stack receives and processes both polls,
6884 		 * sequence numbers can wrap around in such a way that a
6885 		 * resent frame has a sequence number that looks like new data
6886 		 * with a sequence gap.  This would trigger an erroneous SREJ
6887 		 * request.
6888 		 *
6889 		 * Fortunately, this is impossible with a tx window that's
6890 		 * less than half of the maximum sequence number, which allows
6891 		 * invalid frames to be safely ignored.
6892 		 *
6893 		 * With tx window sizes greater than half of the tx window
6894 		 * maximum, the frame is invalid and cannot be ignored.  This
6895 		 * causes a disconnect.
6896 		 */
6897 
6898 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6899 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6900 			return L2CAP_TXSEQ_INVALID_IGNORE;
6901 		} else {
6902 			BT_DBG("Invalid - txseq outside tx window");
6903 			return L2CAP_TXSEQ_INVALID;
6904 		}
6905 	} else {
6906 		BT_DBG("Unexpected - txseq indicates missing frames");
6907 		return L2CAP_TXSEQ_UNEXPECTED;
6908 	}
6909 }
6910 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6911 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6912 			       struct l2cap_ctrl *control,
6913 			       struct sk_buff *skb, u8 event)
6914 {
6915 	struct l2cap_ctrl local_control;
6916 	int err = 0;
6917 	bool skb_in_use = false;
6918 
6919 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6920 	       event);
6921 
6922 	switch (event) {
6923 	case L2CAP_EV_RECV_IFRAME:
6924 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6925 		case L2CAP_TXSEQ_EXPECTED:
6926 			l2cap_pass_to_tx(chan, control);
6927 
6928 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6929 				BT_DBG("Busy, discarding expected seq %d",
6930 				       control->txseq);
6931 				break;
6932 			}
6933 
6934 			chan->expected_tx_seq = __next_seq(chan,
6935 							   control->txseq);
6936 
6937 			chan->buffer_seq = chan->expected_tx_seq;
6938 			skb_in_use = true;
6939 
6940 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6941 			 * control, so make a copy in advance to use it after
6942 			 * l2cap_reassemble_sdu returns and to avoid the race
6943 			 * condition, for example:
6944 			 *
6945 			 * The current thread calls:
6946 			 *   l2cap_reassemble_sdu
6947 			 *     chan->ops->recv == l2cap_sock_recv_cb
6948 			 *       __sock_queue_rcv_skb
6949 			 * Another thread calls:
6950 			 *   bt_sock_recvmsg
6951 			 *     skb_recv_datagram
6952 			 *     skb_free_datagram
6953 			 * Then the current thread tries to access control, but
6954 			 * it was freed by skb_free_datagram.
6955 			 */
6956 			local_control = *control;
6957 			err = l2cap_reassemble_sdu(chan, skb, control);
6958 			if (err)
6959 				break;
6960 
6961 			if (local_control.final) {
6962 				if (!test_and_clear_bit(CONN_REJ_ACT,
6963 							&chan->conn_state)) {
6964 					local_control.final = 0;
6965 					l2cap_retransmit_all(chan, &local_control);
6966 					l2cap_ertm_send(chan);
6967 				}
6968 			}
6969 
6970 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6971 				l2cap_send_ack(chan);
6972 			break;
6973 		case L2CAP_TXSEQ_UNEXPECTED:
6974 			l2cap_pass_to_tx(chan, control);
6975 
6976 			/* Can't issue SREJ frames in the local busy state.
6977 			 * Drop this frame, it will be seen as missing
6978 			 * when local busy is exited.
6979 			 */
6980 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6981 				BT_DBG("Busy, discarding unexpected seq %d",
6982 				       control->txseq);
6983 				break;
6984 			}
6985 
6986 			/* There was a gap in the sequence, so an SREJ
6987 			 * must be sent for each missing frame.  The
6988 			 * current frame is stored for later use.
6989 			 */
6990 			skb_queue_tail(&chan->srej_q, skb);
6991 			skb_in_use = true;
6992 			BT_DBG("Queued %p (queue len %d)", skb,
6993 			       skb_queue_len(&chan->srej_q));
6994 
6995 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6996 			l2cap_seq_list_clear(&chan->srej_list);
6997 			l2cap_send_srej(chan, control->txseq);
6998 
6999 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
7000 			break;
7001 		case L2CAP_TXSEQ_DUPLICATE:
7002 			l2cap_pass_to_tx(chan, control);
7003 			break;
7004 		case L2CAP_TXSEQ_INVALID_IGNORE:
7005 			break;
7006 		case L2CAP_TXSEQ_INVALID:
7007 		default:
7008 			l2cap_send_disconn_req(chan, ECONNRESET);
7009 			break;
7010 		}
7011 		break;
7012 	case L2CAP_EV_RECV_RR:
7013 		l2cap_pass_to_tx(chan, control);
7014 		if (control->final) {
7015 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7016 
7017 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
7018 			    !__chan_is_moving(chan)) {
7019 				control->final = 0;
7020 				l2cap_retransmit_all(chan, control);
7021 			}
7022 
7023 			l2cap_ertm_send(chan);
7024 		} else if (control->poll) {
7025 			l2cap_send_i_or_rr_or_rnr(chan);
7026 		} else {
7027 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7028 					       &chan->conn_state) &&
7029 			    chan->unacked_frames)
7030 				__set_retrans_timer(chan);
7031 
7032 			l2cap_ertm_send(chan);
7033 		}
7034 		break;
7035 	case L2CAP_EV_RECV_RNR:
7036 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7037 		l2cap_pass_to_tx(chan, control);
7038 		if (control && control->poll) {
7039 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7040 			l2cap_send_rr_or_rnr(chan, 0);
7041 		}
7042 		__clear_retrans_timer(chan);
7043 		l2cap_seq_list_clear(&chan->retrans_list);
7044 		break;
7045 	case L2CAP_EV_RECV_REJ:
7046 		l2cap_handle_rej(chan, control);
7047 		break;
7048 	case L2CAP_EV_RECV_SREJ:
7049 		l2cap_handle_srej(chan, control);
7050 		break;
7051 	default:
7052 		break;
7053 	}
7054 
7055 	if (skb && !skb_in_use) {
7056 		BT_DBG("Freeing %p", skb);
7057 		kfree_skb(skb);
7058 	}
7059 
7060 	return err;
7061 }
7062 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7063 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7064 				    struct l2cap_ctrl *control,
7065 				    struct sk_buff *skb, u8 event)
7066 {
7067 	int err = 0;
7068 	u16 txseq = control->txseq;
7069 	bool skb_in_use = false;
7070 
7071 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7072 	       event);
7073 
7074 	switch (event) {
7075 	case L2CAP_EV_RECV_IFRAME:
7076 		switch (l2cap_classify_txseq(chan, txseq)) {
7077 		case L2CAP_TXSEQ_EXPECTED:
7078 			/* Keep frame for reassembly later */
7079 			l2cap_pass_to_tx(chan, control);
7080 			skb_queue_tail(&chan->srej_q, skb);
7081 			skb_in_use = true;
7082 			BT_DBG("Queued %p (queue len %d)", skb,
7083 			       skb_queue_len(&chan->srej_q));
7084 
7085 			chan->expected_tx_seq = __next_seq(chan, txseq);
7086 			break;
7087 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7088 			l2cap_seq_list_pop(&chan->srej_list);
7089 
7090 			l2cap_pass_to_tx(chan, control);
7091 			skb_queue_tail(&chan->srej_q, skb);
7092 			skb_in_use = true;
7093 			BT_DBG("Queued %p (queue len %d)", skb,
7094 			       skb_queue_len(&chan->srej_q));
7095 
7096 			err = l2cap_rx_queued_iframes(chan);
7097 			if (err)
7098 				break;
7099 
7100 			break;
7101 		case L2CAP_TXSEQ_UNEXPECTED:
7102 			/* Got a frame that can't be reassembled yet.
7103 			 * Save it for later, and send SREJs to cover
7104 			 * the missing frames.
7105 			 */
7106 			skb_queue_tail(&chan->srej_q, skb);
7107 			skb_in_use = true;
7108 			BT_DBG("Queued %p (queue len %d)", skb,
7109 			       skb_queue_len(&chan->srej_q));
7110 
7111 			l2cap_pass_to_tx(chan, control);
7112 			l2cap_send_srej(chan, control->txseq);
7113 			break;
7114 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7115 			/* This frame was requested with an SREJ, but
7116 			 * some expected retransmitted frames are
7117 			 * missing.  Request retransmission of missing
7118 			 * SREJ'd frames.
7119 			 */
7120 			skb_queue_tail(&chan->srej_q, skb);
7121 			skb_in_use = true;
7122 			BT_DBG("Queued %p (queue len %d)", skb,
7123 			       skb_queue_len(&chan->srej_q));
7124 
7125 			l2cap_pass_to_tx(chan, control);
7126 			l2cap_send_srej_list(chan, control->txseq);
7127 			break;
7128 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7129 			/* We've already queued this frame.  Drop this copy. */
7130 			l2cap_pass_to_tx(chan, control);
7131 			break;
7132 		case L2CAP_TXSEQ_DUPLICATE:
7133 			/* Expecting a later sequence number, so this frame
7134 			 * was already received.  Ignore it completely.
7135 			 */
7136 			break;
7137 		case L2CAP_TXSEQ_INVALID_IGNORE:
7138 			break;
7139 		case L2CAP_TXSEQ_INVALID:
7140 		default:
7141 			l2cap_send_disconn_req(chan, ECONNRESET);
7142 			break;
7143 		}
7144 		break;
7145 	case L2CAP_EV_RECV_RR:
7146 		l2cap_pass_to_tx(chan, control);
7147 		if (control->final) {
7148 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7149 
7150 			if (!test_and_clear_bit(CONN_REJ_ACT,
7151 						&chan->conn_state)) {
7152 				control->final = 0;
7153 				l2cap_retransmit_all(chan, control);
7154 			}
7155 
7156 			l2cap_ertm_send(chan);
7157 		} else if (control->poll) {
7158 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7159 					       &chan->conn_state) &&
7160 			    chan->unacked_frames) {
7161 				__set_retrans_timer(chan);
7162 			}
7163 
7164 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7165 			l2cap_send_srej_tail(chan);
7166 		} else {
7167 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7168 					       &chan->conn_state) &&
7169 			    chan->unacked_frames)
7170 				__set_retrans_timer(chan);
7171 
7172 			l2cap_send_ack(chan);
7173 		}
7174 		break;
7175 	case L2CAP_EV_RECV_RNR:
7176 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7177 		l2cap_pass_to_tx(chan, control);
7178 		if (control->poll) {
7179 			l2cap_send_srej_tail(chan);
7180 		} else {
7181 			struct l2cap_ctrl rr_control;
7182 			memset(&rr_control, 0, sizeof(rr_control));
7183 			rr_control.sframe = 1;
7184 			rr_control.super = L2CAP_SUPER_RR;
7185 			rr_control.reqseq = chan->buffer_seq;
7186 			l2cap_send_sframe(chan, &rr_control);
7187 		}
7188 
7189 		break;
7190 	case L2CAP_EV_RECV_REJ:
7191 		l2cap_handle_rej(chan, control);
7192 		break;
7193 	case L2CAP_EV_RECV_SREJ:
7194 		l2cap_handle_srej(chan, control);
7195 		break;
7196 	}
7197 
7198 	if (skb && !skb_in_use) {
7199 		BT_DBG("Freeing %p", skb);
7200 		kfree_skb(skb);
7201 	}
7202 
7203 	return err;
7204 }
7205 
l2cap_finish_move(struct l2cap_chan * chan)7206 static int l2cap_finish_move(struct l2cap_chan *chan)
7207 {
7208 	BT_DBG("chan %p", chan);
7209 
7210 	chan->rx_state = L2CAP_RX_STATE_RECV;
7211 
7212 	if (chan->hs_hcon)
7213 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7214 	else
7215 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7216 
7217 	return l2cap_resegment(chan);
7218 }
7219 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7220 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7221 				 struct l2cap_ctrl *control,
7222 				 struct sk_buff *skb, u8 event)
7223 {
7224 	int err;
7225 
7226 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7227 	       event);
7228 
7229 	if (!control->poll)
7230 		return -EPROTO;
7231 
7232 	l2cap_process_reqseq(chan, control->reqseq);
7233 
7234 	if (!skb_queue_empty(&chan->tx_q))
7235 		chan->tx_send_head = skb_peek(&chan->tx_q);
7236 	else
7237 		chan->tx_send_head = NULL;
7238 
7239 	/* Rewind next_tx_seq to the point expected
7240 	 * by the receiver.
7241 	 */
7242 	chan->next_tx_seq = control->reqseq;
7243 	chan->unacked_frames = 0;
7244 
7245 	err = l2cap_finish_move(chan);
7246 	if (err)
7247 		return err;
7248 
7249 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7250 	l2cap_send_i_or_rr_or_rnr(chan);
7251 
7252 	if (event == L2CAP_EV_RECV_IFRAME)
7253 		return -EPROTO;
7254 
7255 	return l2cap_rx_state_recv(chan, control, NULL, event);
7256 }
7257 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7258 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7259 				 struct l2cap_ctrl *control,
7260 				 struct sk_buff *skb, u8 event)
7261 {
7262 	int err;
7263 
7264 	if (!control->final)
7265 		return -EPROTO;
7266 
7267 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7268 
7269 	chan->rx_state = L2CAP_RX_STATE_RECV;
7270 	l2cap_process_reqseq(chan, control->reqseq);
7271 
7272 	if (!skb_queue_empty(&chan->tx_q))
7273 		chan->tx_send_head = skb_peek(&chan->tx_q);
7274 	else
7275 		chan->tx_send_head = NULL;
7276 
7277 	/* Rewind next_tx_seq to the point expected
7278 	 * by the receiver.
7279 	 */
7280 	chan->next_tx_seq = control->reqseq;
7281 	chan->unacked_frames = 0;
7282 
7283 	if (chan->hs_hcon)
7284 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7285 	else
7286 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7287 
7288 	err = l2cap_resegment(chan);
7289 
7290 	if (!err)
7291 		err = l2cap_rx_state_recv(chan, control, skb, event);
7292 
7293 	return err;
7294 }
7295 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)7296 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7297 {
7298 	/* Make sure reqseq is for a packet that has been sent but not acked */
7299 	u16 unacked;
7300 
7301 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7302 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7303 }
7304 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7305 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7306 		    struct sk_buff *skb, u8 event)
7307 {
7308 	int err = 0;
7309 
7310 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7311 	       control, skb, event, chan->rx_state);
7312 
7313 	if (__valid_reqseq(chan, control->reqseq)) {
7314 		switch (chan->rx_state) {
7315 		case L2CAP_RX_STATE_RECV:
7316 			err = l2cap_rx_state_recv(chan, control, skb, event);
7317 			break;
7318 		case L2CAP_RX_STATE_SREJ_SENT:
7319 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7320 						       event);
7321 			break;
7322 		case L2CAP_RX_STATE_WAIT_P:
7323 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7324 			break;
7325 		case L2CAP_RX_STATE_WAIT_F:
7326 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7327 			break;
7328 		default:
7329 			/* shut it down */
7330 			break;
7331 		}
7332 	} else {
7333 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7334 		       control->reqseq, chan->next_tx_seq,
7335 		       chan->expected_ack_seq);
7336 		l2cap_send_disconn_req(chan, ECONNRESET);
7337 	}
7338 
7339 	return err;
7340 }
7341 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)7342 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7343 			   struct sk_buff *skb)
7344 {
7345 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7346 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
7347 	 * returns and to avoid the race condition, for example:
7348 	 *
7349 	 * The current thread calls:
7350 	 *   l2cap_reassemble_sdu
7351 	 *     chan->ops->recv == l2cap_sock_recv_cb
7352 	 *       __sock_queue_rcv_skb
7353 	 * Another thread calls:
7354 	 *   bt_sock_recvmsg
7355 	 *     skb_recv_datagram
7356 	 *     skb_free_datagram
7357 	 * Then the current thread tries to access control, but it was freed by
7358 	 * skb_free_datagram.
7359 	 */
7360 	u16 txseq = control->txseq;
7361 
7362 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7363 	       chan->rx_state);
7364 
7365 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7366 		l2cap_pass_to_tx(chan, control);
7367 
7368 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7369 		       __next_seq(chan, chan->buffer_seq));
7370 
7371 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7372 
7373 		l2cap_reassemble_sdu(chan, skb, control);
7374 	} else {
7375 		if (chan->sdu) {
7376 			kfree_skb(chan->sdu);
7377 			chan->sdu = NULL;
7378 		}
7379 		chan->sdu_last_frag = NULL;
7380 		chan->sdu_len = 0;
7381 
7382 		if (skb) {
7383 			BT_DBG("Freeing %p", skb);
7384 			kfree_skb(skb);
7385 		}
7386 	}
7387 
7388 	chan->last_acked_seq = txseq;
7389 	chan->expected_tx_seq = __next_seq(chan, txseq);
7390 
7391 	return 0;
7392 }
7393 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7394 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7395 {
7396 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7397 	u16 len;
7398 	u8 event;
7399 
7400 	__unpack_control(chan, skb);
7401 
7402 	len = skb->len;
7403 
7404 	/*
7405 	 * We can just drop the corrupted I-frame here.
7406 	 * Receiver will miss it and start proper recovery
7407 	 * procedures and ask for retransmission.
7408 	 */
7409 	if (l2cap_check_fcs(chan, skb))
7410 		goto drop;
7411 
7412 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7413 		len -= L2CAP_SDULEN_SIZE;
7414 
7415 	if (chan->fcs == L2CAP_FCS_CRC16)
7416 		len -= L2CAP_FCS_SIZE;
7417 
7418 	if (len > chan->mps) {
7419 		l2cap_send_disconn_req(chan, ECONNRESET);
7420 		goto drop;
7421 	}
7422 
7423 	if (chan->ops->filter) {
7424 		if (chan->ops->filter(chan, skb))
7425 			goto drop;
7426 	}
7427 
7428 	if (!control->sframe) {
7429 		int err;
7430 
7431 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7432 		       control->sar, control->reqseq, control->final,
7433 		       control->txseq);
7434 
7435 		/* Validate F-bit - F=0 always valid, F=1 only
7436 		 * valid in TX WAIT_F
7437 		 */
7438 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7439 			goto drop;
7440 
7441 		if (chan->mode != L2CAP_MODE_STREAMING) {
7442 			event = L2CAP_EV_RECV_IFRAME;
7443 			err = l2cap_rx(chan, control, skb, event);
7444 		} else {
7445 			err = l2cap_stream_rx(chan, control, skb);
7446 		}
7447 
7448 		if (err)
7449 			l2cap_send_disconn_req(chan, ECONNRESET);
7450 	} else {
7451 		const u8 rx_func_to_event[4] = {
7452 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7453 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7454 		};
7455 
7456 		/* Only I-frames are expected in streaming mode */
7457 		if (chan->mode == L2CAP_MODE_STREAMING)
7458 			goto drop;
7459 
7460 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7461 		       control->reqseq, control->final, control->poll,
7462 		       control->super);
7463 
7464 		if (len != 0) {
7465 			BT_ERR("Trailing bytes: %d in sframe", len);
7466 			l2cap_send_disconn_req(chan, ECONNRESET);
7467 			goto drop;
7468 		}
7469 
7470 		/* Validate F and P bits */
7471 		if (control->final && (control->poll ||
7472 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7473 			goto drop;
7474 
7475 		event = rx_func_to_event[control->super];
7476 		if (l2cap_rx(chan, control, skb, event))
7477 			l2cap_send_disconn_req(chan, ECONNRESET);
7478 	}
7479 
7480 	return 0;
7481 
7482 drop:
7483 	kfree_skb(skb);
7484 	return 0;
7485 }
7486 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)7487 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7488 {
7489 	struct l2cap_conn *conn = chan->conn;
7490 	struct l2cap_le_credits pkt;
7491 	u16 return_credits;
7492 
7493 	return_credits = (chan->imtu / chan->mps) + 1;
7494 
7495 	if (chan->rx_credits >= return_credits)
7496 		return;
7497 
7498 	return_credits -= chan->rx_credits;
7499 
7500 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7501 
7502 	chan->rx_credits += return_credits;
7503 
7504 	pkt.cid     = cpu_to_le16(chan->scid);
7505 	pkt.credits = cpu_to_le16(return_credits);
7506 
7507 	chan->ident = l2cap_get_ident(conn);
7508 
7509 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7510 }
7511 
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)7512 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7513 {
7514 	int err;
7515 
7516 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7517 
7518 	/* Wait recv to confirm reception before updating the credits */
7519 	err = chan->ops->recv(chan, skb);
7520 
7521 	/* Update credits whenever an SDU is received */
7522 	l2cap_chan_le_send_credits(chan);
7523 
7524 	return err;
7525 }
7526 
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7527 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7528 {
7529 	int err;
7530 
7531 	if (!chan->rx_credits) {
7532 		BT_ERR("No credits to receive LE L2CAP data");
7533 		l2cap_send_disconn_req(chan, ECONNRESET);
7534 		return -ENOBUFS;
7535 	}
7536 
7537 	if (chan->imtu < skb->len) {
7538 		BT_ERR("Too big LE L2CAP PDU");
7539 		return -ENOBUFS;
7540 	}
7541 
7542 	chan->rx_credits--;
7543 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7544 
7545 	/* Update if remote had run out of credits, this should only happens
7546 	 * if the remote is not using the entire MPS.
7547 	 */
7548 	if (!chan->rx_credits)
7549 		l2cap_chan_le_send_credits(chan);
7550 
7551 	err = 0;
7552 
7553 	if (!chan->sdu) {
7554 		u16 sdu_len;
7555 
7556 		sdu_len = get_unaligned_le16(skb->data);
7557 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7558 
7559 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7560 		       sdu_len, skb->len, chan->imtu);
7561 
7562 		if (sdu_len > chan->imtu) {
7563 			BT_ERR("Too big LE L2CAP SDU length received");
7564 			err = -EMSGSIZE;
7565 			goto failed;
7566 		}
7567 
7568 		if (skb->len > sdu_len) {
7569 			BT_ERR("Too much LE L2CAP data received");
7570 			err = -EINVAL;
7571 			goto failed;
7572 		}
7573 
7574 		if (skb->len == sdu_len)
7575 			return l2cap_ecred_recv(chan, skb);
7576 
7577 		chan->sdu = skb;
7578 		chan->sdu_len = sdu_len;
7579 		chan->sdu_last_frag = skb;
7580 
7581 		/* Detect if remote is not able to use the selected MPS */
7582 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7583 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7584 
7585 			/* Adjust the number of credits */
7586 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7587 			chan->mps = mps_len;
7588 			l2cap_chan_le_send_credits(chan);
7589 		}
7590 
7591 		return 0;
7592 	}
7593 
7594 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7595 	       chan->sdu->len, skb->len, chan->sdu_len);
7596 
7597 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7598 		BT_ERR("Too much LE L2CAP data received");
7599 		err = -EINVAL;
7600 		goto failed;
7601 	}
7602 
7603 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7604 	skb = NULL;
7605 
7606 	if (chan->sdu->len == chan->sdu_len) {
7607 		err = l2cap_ecred_recv(chan, chan->sdu);
7608 		if (!err) {
7609 			chan->sdu = NULL;
7610 			chan->sdu_last_frag = NULL;
7611 			chan->sdu_len = 0;
7612 		}
7613 	}
7614 
7615 failed:
7616 	if (err) {
7617 		kfree_skb(skb);
7618 		kfree_skb(chan->sdu);
7619 		chan->sdu = NULL;
7620 		chan->sdu_last_frag = NULL;
7621 		chan->sdu_len = 0;
7622 	}
7623 
7624 	/* We can't return an error here since we took care of the skb
7625 	 * freeing internally. An error return would cause the caller to
7626 	 * do a double-free of the skb.
7627 	 */
7628 	return 0;
7629 }
7630 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)7631 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7632 			       struct sk_buff *skb)
7633 {
7634 	struct l2cap_chan *chan;
7635 
7636 	chan = l2cap_get_chan_by_scid(conn, cid);
7637 	if (!chan) {
7638 		if (cid == L2CAP_CID_A2MP) {
7639 			chan = a2mp_channel_create(conn, skb);
7640 			if (!chan) {
7641 				kfree_skb(skb);
7642 				return;
7643 			}
7644 
7645 			l2cap_chan_hold(chan);
7646 			l2cap_chan_lock(chan);
7647 		} else {
7648 			BT_DBG("unknown cid 0x%4.4x", cid);
7649 			/* Drop packet and return */
7650 			kfree_skb(skb);
7651 			return;
7652 		}
7653 	}
7654 
7655 	BT_DBG("chan %p, len %d", chan, skb->len);
7656 
7657 	/* If we receive data on a fixed channel before the info req/rsp
7658 	 * procedure is done simply assume that the channel is supported
7659 	 * and mark it as ready.
7660 	 */
7661 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7662 		l2cap_chan_ready(chan);
7663 
7664 	if (chan->state != BT_CONNECTED)
7665 		goto drop;
7666 
7667 	switch (chan->mode) {
7668 	case L2CAP_MODE_LE_FLOWCTL:
7669 	case L2CAP_MODE_EXT_FLOWCTL:
7670 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7671 			goto drop;
7672 
7673 		goto done;
7674 
7675 	case L2CAP_MODE_BASIC:
7676 		/* If socket recv buffers overflows we drop data here
7677 		 * which is *bad* because L2CAP has to be reliable.
7678 		 * But we don't have any other choice. L2CAP doesn't
7679 		 * provide flow control mechanism. */
7680 
7681 		if (chan->imtu < skb->len) {
7682 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7683 			goto drop;
7684 		}
7685 
7686 		if (!chan->ops->recv(chan, skb))
7687 			goto done;
7688 		break;
7689 
7690 	case L2CAP_MODE_ERTM:
7691 	case L2CAP_MODE_STREAMING:
7692 		l2cap_data_rcv(chan, skb);
7693 		goto done;
7694 
7695 	default:
7696 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7697 		break;
7698 	}
7699 
7700 drop:
7701 	kfree_skb(skb);
7702 
7703 done:
7704 	l2cap_chan_unlock(chan);
7705 	l2cap_chan_put(chan);
7706 }
7707 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)7708 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7709 				  struct sk_buff *skb)
7710 {
7711 	struct hci_conn *hcon = conn->hcon;
7712 	struct l2cap_chan *chan;
7713 
7714 	if (hcon->type != ACL_LINK)
7715 		goto free_skb;
7716 
7717 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7718 					ACL_LINK);
7719 	if (!chan)
7720 		goto free_skb;
7721 
7722 	BT_DBG("chan %p, len %d", chan, skb->len);
7723 
7724 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7725 		goto drop;
7726 
7727 	if (chan->imtu < skb->len)
7728 		goto drop;
7729 
7730 	/* Store remote BD_ADDR and PSM for msg_name */
7731 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7732 	bt_cb(skb)->l2cap.psm = psm;
7733 
7734 	if (!chan->ops->recv(chan, skb)) {
7735 		l2cap_chan_put(chan);
7736 		return;
7737 	}
7738 
7739 drop:
7740 	l2cap_chan_put(chan);
7741 free_skb:
7742 	kfree_skb(skb);
7743 }
7744 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)7745 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7746 {
7747 	struct l2cap_hdr *lh = (void *) skb->data;
7748 	struct hci_conn *hcon = conn->hcon;
7749 	u16 cid, len;
7750 	__le16 psm;
7751 
7752 	if (hcon->state != BT_CONNECTED) {
7753 		BT_DBG("queueing pending rx skb");
7754 		skb_queue_tail(&conn->pending_rx, skb);
7755 		return;
7756 	}
7757 
7758 	skb_pull(skb, L2CAP_HDR_SIZE);
7759 	cid = __le16_to_cpu(lh->cid);
7760 	len = __le16_to_cpu(lh->len);
7761 
7762 	if (len != skb->len) {
7763 		kfree_skb(skb);
7764 		return;
7765 	}
7766 
7767 	/* Since we can't actively block incoming LE connections we must
7768 	 * at least ensure that we ignore incoming data from them.
7769 	 */
7770 	if (hcon->type == LE_LINK &&
7771 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7772 				   bdaddr_dst_type(hcon))) {
7773 		kfree_skb(skb);
7774 		return;
7775 	}
7776 
7777 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7778 
7779 	switch (cid) {
7780 	case L2CAP_CID_SIGNALING:
7781 		l2cap_sig_channel(conn, skb);
7782 		break;
7783 
7784 	case L2CAP_CID_CONN_LESS:
7785 		psm = get_unaligned((__le16 *) skb->data);
7786 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7787 		l2cap_conless_channel(conn, psm, skb);
7788 		break;
7789 
7790 	case L2CAP_CID_LE_SIGNALING:
7791 		l2cap_le_sig_channel(conn, skb);
7792 		break;
7793 
7794 	default:
7795 		l2cap_data_channel(conn, cid, skb);
7796 		break;
7797 	}
7798 }
7799 
process_pending_rx(struct work_struct * work)7800 static void process_pending_rx(struct work_struct *work)
7801 {
7802 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7803 					       pending_rx_work);
7804 	struct sk_buff *skb;
7805 
7806 	BT_DBG("");
7807 
7808 	while ((skb = skb_dequeue(&conn->pending_rx)))
7809 		l2cap_recv_frame(conn, skb);
7810 }
7811 
l2cap_conn_add(struct hci_conn * hcon)7812 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7813 {
7814 	struct l2cap_conn *conn = hcon->l2cap_data;
7815 	struct hci_chan *hchan;
7816 
7817 	if (conn)
7818 		return conn;
7819 
7820 	hchan = hci_chan_create(hcon);
7821 	if (!hchan)
7822 		return NULL;
7823 
7824 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7825 	if (!conn) {
7826 		hci_chan_del(hchan);
7827 		return NULL;
7828 	}
7829 
7830 	kref_init(&conn->ref);
7831 	hcon->l2cap_data = conn;
7832 	conn->hcon = hci_conn_get(hcon);
7833 	conn->hchan = hchan;
7834 
7835 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7836 
7837 	switch (hcon->type) {
7838 	case LE_LINK:
7839 		if (hcon->hdev->le_mtu) {
7840 			conn->mtu = hcon->hdev->le_mtu;
7841 			break;
7842 		}
7843 		fallthrough;
7844 	default:
7845 		conn->mtu = hcon->hdev->acl_mtu;
7846 		break;
7847 	}
7848 
7849 	conn->feat_mask = 0;
7850 
7851 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7852 
7853 	if (hcon->type == ACL_LINK &&
7854 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7855 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7856 
7857 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7858 	    (bredr_sc_enabled(hcon->hdev) ||
7859 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7860 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7861 
7862 	mutex_init(&conn->ident_lock);
7863 	mutex_init(&conn->chan_lock);
7864 
7865 	INIT_LIST_HEAD(&conn->chan_l);
7866 	INIT_LIST_HEAD(&conn->users);
7867 
7868 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7869 
7870 	skb_queue_head_init(&conn->pending_rx);
7871 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7872 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7873 
7874 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7875 
7876 	return conn;
7877 }
7878 
is_valid_psm(u16 psm,u8 dst_type)7879 static bool is_valid_psm(u16 psm, u8 dst_type)
7880 {
7881 	if (!psm)
7882 		return false;
7883 
7884 	if (bdaddr_type_is_le(dst_type))
7885 		return (psm <= 0x00ff);
7886 
7887 	/* PSM must be odd and lsb of upper byte must be 0 */
7888 	return ((psm & 0x0101) == 0x0001);
7889 }
7890 
7891 struct l2cap_chan_data {
7892 	struct l2cap_chan *chan;
7893 	struct pid *pid;
7894 	int count;
7895 };
7896 
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)7897 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7898 {
7899 	struct l2cap_chan_data *d = data;
7900 	struct pid *pid;
7901 
7902 	if (chan == d->chan)
7903 		return;
7904 
7905 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7906 		return;
7907 
7908 	pid = chan->ops->get_peer_pid(chan);
7909 
7910 	/* Only count deferred channels with the same PID/PSM */
7911 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7912 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7913 		return;
7914 
7915 	d->count++;
7916 }
7917 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)7918 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7919 		       bdaddr_t *dst, u8 dst_type)
7920 {
7921 	struct l2cap_conn *conn;
7922 	struct hci_conn *hcon;
7923 	struct hci_dev *hdev;
7924 	int err;
7925 
7926 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7927 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7928 
7929 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7930 	if (!hdev)
7931 		return -EHOSTUNREACH;
7932 
7933 	hci_dev_lock(hdev);
7934 
7935 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7936 	    chan->chan_type != L2CAP_CHAN_RAW) {
7937 		err = -EINVAL;
7938 		goto done;
7939 	}
7940 
7941 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7942 		err = -EINVAL;
7943 		goto done;
7944 	}
7945 
7946 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7947 		err = -EINVAL;
7948 		goto done;
7949 	}
7950 
7951 	switch (chan->mode) {
7952 	case L2CAP_MODE_BASIC:
7953 		break;
7954 	case L2CAP_MODE_LE_FLOWCTL:
7955 		break;
7956 	case L2CAP_MODE_EXT_FLOWCTL:
7957 		if (!enable_ecred) {
7958 			err = -EOPNOTSUPP;
7959 			goto done;
7960 		}
7961 		break;
7962 	case L2CAP_MODE_ERTM:
7963 	case L2CAP_MODE_STREAMING:
7964 		if (!disable_ertm)
7965 			break;
7966 		fallthrough;
7967 	default:
7968 		err = -EOPNOTSUPP;
7969 		goto done;
7970 	}
7971 
7972 	switch (chan->state) {
7973 	case BT_CONNECT:
7974 	case BT_CONNECT2:
7975 	case BT_CONFIG:
7976 		/* Already connecting */
7977 		err = 0;
7978 		goto done;
7979 
7980 	case BT_CONNECTED:
7981 		/* Already connected */
7982 		err = -EISCONN;
7983 		goto done;
7984 
7985 	case BT_OPEN:
7986 	case BT_BOUND:
7987 		/* Can connect */
7988 		break;
7989 
7990 	default:
7991 		err = -EBADFD;
7992 		goto done;
7993 	}
7994 
7995 	/* Set destination address and psm */
7996 	bacpy(&chan->dst, dst);
7997 	chan->dst_type = dst_type;
7998 
7999 	chan->psm = psm;
8000 	chan->dcid = cid;
8001 
8002 	if (bdaddr_type_is_le(dst_type)) {
8003 		/* Convert from L2CAP channel address type to HCI address type
8004 		 */
8005 		if (dst_type == BDADDR_LE_PUBLIC)
8006 			dst_type = ADDR_LE_DEV_PUBLIC;
8007 		else
8008 			dst_type = ADDR_LE_DEV_RANDOM;
8009 
8010 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8011 			hcon = hci_connect_le(hdev, dst, dst_type, false,
8012 					      chan->sec_level,
8013 					      HCI_LE_CONN_TIMEOUT,
8014 					      HCI_ROLE_SLAVE);
8015 		else
8016 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
8017 						   chan->sec_level,
8018 						   HCI_LE_CONN_TIMEOUT,
8019 						   CONN_REASON_L2CAP_CHAN);
8020 
8021 	} else {
8022 		u8 auth_type = l2cap_get_auth_type(chan);
8023 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
8024 				       CONN_REASON_L2CAP_CHAN);
8025 	}
8026 
8027 	if (IS_ERR(hcon)) {
8028 		err = PTR_ERR(hcon);
8029 		goto done;
8030 	}
8031 
8032 	conn = l2cap_conn_add(hcon);
8033 	if (!conn) {
8034 		hci_conn_drop(hcon);
8035 		err = -ENOMEM;
8036 		goto done;
8037 	}
8038 
8039 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8040 		struct l2cap_chan_data data;
8041 
8042 		data.chan = chan;
8043 		data.pid = chan->ops->get_peer_pid(chan);
8044 		data.count = 1;
8045 
8046 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8047 
8048 		/* Check if there isn't too many channels being connected */
8049 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8050 			hci_conn_drop(hcon);
8051 			err = -EPROTO;
8052 			goto done;
8053 		}
8054 	}
8055 
8056 	mutex_lock(&conn->chan_lock);
8057 	l2cap_chan_lock(chan);
8058 
8059 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8060 		hci_conn_drop(hcon);
8061 		err = -EBUSY;
8062 		goto chan_unlock;
8063 	}
8064 
8065 	/* Update source addr of the socket */
8066 	bacpy(&chan->src, &hcon->src);
8067 	chan->src_type = bdaddr_src_type(hcon);
8068 
8069 	__l2cap_chan_add(conn, chan);
8070 
8071 	/* l2cap_chan_add takes its own ref so we can drop this one */
8072 	hci_conn_drop(hcon);
8073 
8074 	l2cap_state_change(chan, BT_CONNECT);
8075 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8076 
8077 	/* Release chan->sport so that it can be reused by other
8078 	 * sockets (as it's only used for listening sockets).
8079 	 */
8080 	write_lock(&chan_list_lock);
8081 	chan->sport = 0;
8082 	write_unlock(&chan_list_lock);
8083 
8084 	if (hcon->state == BT_CONNECTED) {
8085 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8086 			__clear_chan_timer(chan);
8087 			if (l2cap_chan_check_security(chan, true))
8088 				l2cap_state_change(chan, BT_CONNECTED);
8089 		} else
8090 			l2cap_do_start(chan);
8091 	}
8092 
8093 	err = 0;
8094 
8095 chan_unlock:
8096 	l2cap_chan_unlock(chan);
8097 	mutex_unlock(&conn->chan_lock);
8098 done:
8099 	hci_dev_unlock(hdev);
8100 	hci_dev_put(hdev);
8101 	return err;
8102 }
8103 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8104 
l2cap_ecred_reconfigure(struct l2cap_chan * chan)8105 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8106 {
8107 	struct l2cap_conn *conn = chan->conn;
8108 	struct {
8109 		struct l2cap_ecred_reconf_req req;
8110 		__le16 scid;
8111 	} pdu;
8112 
8113 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8114 	pdu.req.mps = cpu_to_le16(chan->mps);
8115 	pdu.scid    = cpu_to_le16(chan->scid);
8116 
8117 	chan->ident = l2cap_get_ident(conn);
8118 
8119 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8120 		       sizeof(pdu), &pdu);
8121 }
8122 
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)8123 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8124 {
8125 	if (chan->imtu > mtu)
8126 		return -EINVAL;
8127 
8128 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8129 
8130 	chan->imtu = mtu;
8131 
8132 	l2cap_ecred_reconfigure(chan);
8133 
8134 	return 0;
8135 }
8136 
8137 /* ---- L2CAP interface with lower layer (HCI) ---- */
8138 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)8139 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8140 {
8141 	int exact = 0, lm1 = 0, lm2 = 0;
8142 	struct l2cap_chan *c;
8143 
8144 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8145 
8146 	/* Find listening sockets and check their link_mode */
8147 	read_lock(&chan_list_lock);
8148 	list_for_each_entry(c, &chan_list, global_l) {
8149 		if (c->state != BT_LISTEN)
8150 			continue;
8151 
8152 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8153 			lm1 |= HCI_LM_ACCEPT;
8154 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8155 				lm1 |= HCI_LM_MASTER;
8156 			exact++;
8157 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8158 			lm2 |= HCI_LM_ACCEPT;
8159 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8160 				lm2 |= HCI_LM_MASTER;
8161 		}
8162 	}
8163 	read_unlock(&chan_list_lock);
8164 
8165 	return exact ? lm1 : lm2;
8166 }
8167 
8168 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8169  * from an existing channel in the list or from the beginning of the
8170  * global list (by passing NULL as first parameter).
8171  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)8172 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8173 						  struct hci_conn *hcon)
8174 {
8175 	u8 src_type = bdaddr_src_type(hcon);
8176 
8177 	read_lock(&chan_list_lock);
8178 
8179 	if (c)
8180 		c = list_next_entry(c, global_l);
8181 	else
8182 		c = list_entry(chan_list.next, typeof(*c), global_l);
8183 
8184 	list_for_each_entry_from(c, &chan_list, global_l) {
8185 		if (c->chan_type != L2CAP_CHAN_FIXED)
8186 			continue;
8187 		if (c->state != BT_LISTEN)
8188 			continue;
8189 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8190 			continue;
8191 		if (src_type != c->src_type)
8192 			continue;
8193 
8194 		c = l2cap_chan_hold_unless_zero(c);
8195 		read_unlock(&chan_list_lock);
8196 		return c;
8197 	}
8198 
8199 	read_unlock(&chan_list_lock);
8200 
8201 	return NULL;
8202 }
8203 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)8204 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8205 {
8206 	struct hci_dev *hdev = hcon->hdev;
8207 	struct l2cap_conn *conn;
8208 	struct l2cap_chan *pchan;
8209 	u8 dst_type;
8210 
8211 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8212 		return;
8213 
8214 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8215 
8216 	if (status) {
8217 		l2cap_conn_del(hcon, bt_to_errno(status));
8218 		return;
8219 	}
8220 
8221 	conn = l2cap_conn_add(hcon);
8222 	if (!conn)
8223 		return;
8224 
8225 	dst_type = bdaddr_dst_type(hcon);
8226 
8227 	/* If device is blocked, do not create channels for it */
8228 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8229 		return;
8230 
8231 	/* Find fixed channels and notify them of the new connection. We
8232 	 * use multiple individual lookups, continuing each time where
8233 	 * we left off, because the list lock would prevent calling the
8234 	 * potentially sleeping l2cap_chan_lock() function.
8235 	 */
8236 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8237 	while (pchan) {
8238 		struct l2cap_chan *chan, *next;
8239 
8240 		/* Client fixed channels should override server ones */
8241 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8242 			goto next;
8243 
8244 		l2cap_chan_lock(pchan);
8245 		chan = pchan->ops->new_connection(pchan);
8246 		if (chan) {
8247 			bacpy(&chan->src, &hcon->src);
8248 			bacpy(&chan->dst, &hcon->dst);
8249 			chan->src_type = bdaddr_src_type(hcon);
8250 			chan->dst_type = dst_type;
8251 
8252 			__l2cap_chan_add(conn, chan);
8253 		}
8254 
8255 		l2cap_chan_unlock(pchan);
8256 next:
8257 		next = l2cap_global_fixed_chan(pchan, hcon);
8258 		l2cap_chan_put(pchan);
8259 		pchan = next;
8260 	}
8261 
8262 	l2cap_conn_ready(conn);
8263 }
8264 
l2cap_disconn_ind(struct hci_conn * hcon)8265 int l2cap_disconn_ind(struct hci_conn *hcon)
8266 {
8267 	struct l2cap_conn *conn = hcon->l2cap_data;
8268 
8269 	BT_DBG("hcon %p", hcon);
8270 
8271 	if (!conn)
8272 		return HCI_ERROR_REMOTE_USER_TERM;
8273 	return conn->disc_reason;
8274 }
8275 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)8276 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8277 {
8278 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8279 		return;
8280 
8281 	BT_DBG("hcon %p reason %d", hcon, reason);
8282 
8283 	l2cap_conn_del(hcon, bt_to_errno(reason));
8284 }
8285 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)8286 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8287 {
8288 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8289 		return;
8290 
8291 	if (encrypt == 0x00) {
8292 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8293 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8294 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8295 			   chan->sec_level == BT_SECURITY_FIPS)
8296 			l2cap_chan_close(chan, ECONNREFUSED);
8297 	} else {
8298 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8299 			__clear_chan_timer(chan);
8300 	}
8301 }
8302 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)8303 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8304 {
8305 	struct l2cap_conn *conn = hcon->l2cap_data;
8306 	struct l2cap_chan *chan;
8307 
8308 	if (!conn)
8309 		return;
8310 
8311 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8312 
8313 	mutex_lock(&conn->chan_lock);
8314 
8315 	list_for_each_entry(chan, &conn->chan_l, list) {
8316 		l2cap_chan_lock(chan);
8317 
8318 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8319 		       state_to_string(chan->state));
8320 
8321 		if (chan->scid == L2CAP_CID_A2MP) {
8322 			l2cap_chan_unlock(chan);
8323 			continue;
8324 		}
8325 
8326 		if (!status && encrypt)
8327 			chan->sec_level = hcon->sec_level;
8328 
8329 		if (!__l2cap_no_conn_pending(chan)) {
8330 			l2cap_chan_unlock(chan);
8331 			continue;
8332 		}
8333 
8334 		if (!status && (chan->state == BT_CONNECTED ||
8335 				chan->state == BT_CONFIG)) {
8336 			chan->ops->resume(chan);
8337 			l2cap_check_encryption(chan, encrypt);
8338 			l2cap_chan_unlock(chan);
8339 			continue;
8340 		}
8341 
8342 		if (chan->state == BT_CONNECT) {
8343 			if (!status && l2cap_check_enc_key_size(hcon))
8344 				l2cap_start_connection(chan);
8345 			else
8346 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8347 		} else if (chan->state == BT_CONNECT2 &&
8348 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8349 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8350 			struct l2cap_conn_rsp rsp;
8351 			__u16 res, stat;
8352 
8353 			if (!status && l2cap_check_enc_key_size(hcon)) {
8354 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8355 					res = L2CAP_CR_PEND;
8356 					stat = L2CAP_CS_AUTHOR_PEND;
8357 					chan->ops->defer(chan);
8358 				} else {
8359 					l2cap_state_change(chan, BT_CONFIG);
8360 					res = L2CAP_CR_SUCCESS;
8361 					stat = L2CAP_CS_NO_INFO;
8362 				}
8363 			} else {
8364 				l2cap_state_change(chan, BT_DISCONN);
8365 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8366 				res = L2CAP_CR_SEC_BLOCK;
8367 				stat = L2CAP_CS_NO_INFO;
8368 			}
8369 
8370 			rsp.scid   = cpu_to_le16(chan->dcid);
8371 			rsp.dcid   = cpu_to_le16(chan->scid);
8372 			rsp.result = cpu_to_le16(res);
8373 			rsp.status = cpu_to_le16(stat);
8374 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8375 				       sizeof(rsp), &rsp);
8376 
8377 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8378 			    res == L2CAP_CR_SUCCESS) {
8379 				char buf[128];
8380 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8381 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8382 					       L2CAP_CONF_REQ,
8383 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8384 					       buf);
8385 				chan->num_conf_req++;
8386 			}
8387 		}
8388 
8389 		l2cap_chan_unlock(chan);
8390 	}
8391 
8392 	mutex_unlock(&conn->chan_lock);
8393 }
8394 
8395 /* Append fragment into frame respecting the maximum len of rx_skb */
l2cap_recv_frag(struct l2cap_conn * conn,struct sk_buff * skb,u16 len)8396 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8397 			   u16 len)
8398 {
8399 	if (!conn->rx_skb) {
8400 		/* Allocate skb for the complete frame (with header) */
8401 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8402 		if (!conn->rx_skb)
8403 			return -ENOMEM;
8404 		/* Init rx_len */
8405 		conn->rx_len = len;
8406 	}
8407 
8408 	/* Copy as much as the rx_skb can hold */
8409 	len = min_t(u16, len, skb->len);
8410 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8411 	skb_pull(skb, len);
8412 	conn->rx_len -= len;
8413 
8414 	return len;
8415 }
8416 
l2cap_recv_len(struct l2cap_conn * conn,struct sk_buff * skb)8417 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8418 {
8419 	struct sk_buff *rx_skb;
8420 	int len;
8421 
8422 	/* Append just enough to complete the header */
8423 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8424 
8425 	/* If header could not be read just continue */
8426 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8427 		return len;
8428 
8429 	rx_skb = conn->rx_skb;
8430 	len = get_unaligned_le16(rx_skb->data);
8431 
8432 	/* Check if rx_skb has enough space to received all fragments */
8433 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8434 		/* Update expected len */
8435 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8436 		return L2CAP_LEN_SIZE;
8437 	}
8438 
8439 	/* Reset conn->rx_skb since it will need to be reallocated in order to
8440 	 * fit all fragments.
8441 	 */
8442 	conn->rx_skb = NULL;
8443 
8444 	/* Reallocates rx_skb using the exact expected length */
8445 	len = l2cap_recv_frag(conn, rx_skb,
8446 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8447 	kfree_skb(rx_skb);
8448 
8449 	return len;
8450 }
8451 
l2cap_recv_reset(struct l2cap_conn * conn)8452 static void l2cap_recv_reset(struct l2cap_conn *conn)
8453 {
8454 	kfree_skb(conn->rx_skb);
8455 	conn->rx_skb = NULL;
8456 	conn->rx_len = 0;
8457 }
8458 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)8459 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8460 {
8461 	struct l2cap_conn *conn = hcon->l2cap_data;
8462 	int len;
8463 
8464 	/* For AMP controller do not create l2cap conn */
8465 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8466 		goto drop;
8467 
8468 	if (!conn)
8469 		conn = l2cap_conn_add(hcon);
8470 
8471 	if (!conn)
8472 		goto drop;
8473 
8474 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8475 
8476 	switch (flags) {
8477 	case ACL_START:
8478 	case ACL_START_NO_FLUSH:
8479 	case ACL_COMPLETE:
8480 		if (conn->rx_skb) {
8481 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8482 			l2cap_recv_reset(conn);
8483 			l2cap_conn_unreliable(conn, ECOMM);
8484 		}
8485 
8486 		/* Start fragment may not contain the L2CAP length so just
8487 		 * copy the initial byte when that happens and use conn->mtu as
8488 		 * expected length.
8489 		 */
8490 		if (skb->len < L2CAP_LEN_SIZE) {
8491 			l2cap_recv_frag(conn, skb, conn->mtu);
8492 			break;
8493 		}
8494 
8495 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8496 
8497 		if (len == skb->len) {
8498 			/* Complete frame received */
8499 			l2cap_recv_frame(conn, skb);
8500 			return;
8501 		}
8502 
8503 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8504 
8505 		if (skb->len > len) {
8506 			BT_ERR("Frame is too long (len %u, expected len %d)",
8507 			       skb->len, len);
8508 			l2cap_conn_unreliable(conn, ECOMM);
8509 			goto drop;
8510 		}
8511 
8512 		/* Append fragment into frame (with header) */
8513 		if (l2cap_recv_frag(conn, skb, len) < 0)
8514 			goto drop;
8515 
8516 		break;
8517 
8518 	case ACL_CONT:
8519 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8520 
8521 		if (!conn->rx_skb) {
8522 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8523 			l2cap_conn_unreliable(conn, ECOMM);
8524 			goto drop;
8525 		}
8526 
8527 		/* Complete the L2CAP length if it has not been read */
8528 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8529 			if (l2cap_recv_len(conn, skb) < 0) {
8530 				l2cap_conn_unreliable(conn, ECOMM);
8531 				goto drop;
8532 			}
8533 
8534 			/* Header still could not be read just continue */
8535 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8536 				break;
8537 		}
8538 
8539 		if (skb->len > conn->rx_len) {
8540 			BT_ERR("Fragment is too long (len %u, expected %u)",
8541 			       skb->len, conn->rx_len);
8542 			l2cap_recv_reset(conn);
8543 			l2cap_conn_unreliable(conn, ECOMM);
8544 			goto drop;
8545 		}
8546 
8547 		/* Append fragment into frame (with header) */
8548 		l2cap_recv_frag(conn, skb, skb->len);
8549 
8550 		if (!conn->rx_len) {
8551 			/* Complete frame received. l2cap_recv_frame
8552 			 * takes ownership of the skb so set the global
8553 			 * rx_skb pointer to NULL first.
8554 			 */
8555 			struct sk_buff *rx_skb = conn->rx_skb;
8556 			conn->rx_skb = NULL;
8557 			l2cap_recv_frame(conn, rx_skb);
8558 		}
8559 		break;
8560 	}
8561 
8562 drop:
8563 	kfree_skb(skb);
8564 }
8565 
8566 static struct hci_cb l2cap_cb = {
8567 	.name		= "L2CAP",
8568 	.connect_cfm	= l2cap_connect_cfm,
8569 	.disconn_cfm	= l2cap_disconn_cfm,
8570 	.security_cfm	= l2cap_security_cfm,
8571 };
8572 
l2cap_debugfs_show(struct seq_file * f,void * p)8573 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8574 {
8575 	struct l2cap_chan *c;
8576 
8577 	read_lock(&chan_list_lock);
8578 
8579 	list_for_each_entry(c, &chan_list, global_l) {
8580 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8581 			   &c->src, c->src_type, &c->dst, c->dst_type,
8582 			   c->state, __le16_to_cpu(c->psm),
8583 			   c->scid, c->dcid, c->imtu, c->omtu,
8584 			   c->sec_level, c->mode);
8585 	}
8586 
8587 	read_unlock(&chan_list_lock);
8588 
8589 	return 0;
8590 }
8591 
8592 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8593 
8594 static struct dentry *l2cap_debugfs;
8595 
l2cap_init(void)8596 int __init l2cap_init(void)
8597 {
8598 	int err;
8599 
8600 	err = l2cap_init_sockets();
8601 	if (err < 0)
8602 		return err;
8603 
8604 	hci_register_cb(&l2cap_cb);
8605 
8606 	if (IS_ERR_OR_NULL(bt_debugfs))
8607 		return 0;
8608 
8609 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8610 					    NULL, &l2cap_debugfs_fops);
8611 
8612 	return 0;
8613 }
8614 
l2cap_exit(void)8615 void l2cap_exit(void)
8616 {
8617 	debugfs_remove(l2cap_debugfs);
8618 	hci_unregister_cb(&l2cap_cb);
8619 	l2cap_cleanup_sockets();
8620 }
8621 
8622 module_param(disable_ertm, bool, 0644);
8623 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8624 
8625 module_param(enable_ecred, bool, 0644);
8626 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8627