1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 
bdaddr_type(u8 link_type,u8 bdaddr_type)65 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
66 {
67 	if (link_type == LE_LINK) {
68 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
69 			return BDADDR_LE_PUBLIC;
70 		else
71 			return BDADDR_LE_RANDOM;
72 	}
73 
74 	return BDADDR_BREDR;
75 }
76 
bdaddr_src_type(struct hci_conn * hcon)77 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
78 {
79 	return bdaddr_type(hcon->type, hcon->src_type);
80 }
81 
bdaddr_dst_type(struct hci_conn * hcon)82 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
83 {
84 	return bdaddr_type(hcon->type, hcon->dst_type);
85 }
86 
87 /* ---- L2CAP channels ---- */
88 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)89 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
90 						   u16 cid)
91 {
92 	struct l2cap_chan *c;
93 
94 	list_for_each_entry(c, &conn->chan_l, list) {
95 		if (c->dcid == cid)
96 			return c;
97 	}
98 	return NULL;
99 }
100 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)101 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
102 						   u16 cid)
103 {
104 	struct l2cap_chan *c;
105 
106 	list_for_each_entry(c, &conn->chan_l, list) {
107 		if (c->scid == cid)
108 			return c;
109 	}
110 	return NULL;
111 }
112 
113 /* Find channel with given SCID.
114  * Returns a reference locked channel.
115  */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
117 						 u16 cid)
118 {
119 	struct l2cap_chan *c;
120 
121 	mutex_lock(&conn->chan_lock);
122 	c = __l2cap_get_chan_by_scid(conn, cid);
123 	if (c) {
124 		/* Only lock if chan reference is not 0 */
125 		c = l2cap_chan_hold_unless_zero(c);
126 		if (c)
127 			l2cap_chan_lock(c);
128 	}
129 	mutex_unlock(&conn->chan_lock);
130 
131 	return c;
132 }
133 
134 /* Find channel with given DCID.
135  * Returns a reference locked channel.
136  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)137 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
138 						 u16 cid)
139 {
140 	struct l2cap_chan *c;
141 
142 	mutex_lock(&conn->chan_lock);
143 	c = __l2cap_get_chan_by_dcid(conn, cid);
144 	if (c) {
145 		/* Only lock if chan reference is not 0 */
146 		c = l2cap_chan_hold_unless_zero(c);
147 		if (c)
148 			l2cap_chan_lock(c);
149 	}
150 	mutex_unlock(&conn->chan_lock);
151 
152 	return c;
153 }
154 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)155 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
156 						    u8 ident)
157 {
158 	struct l2cap_chan *c;
159 
160 	list_for_each_entry(c, &conn->chan_l, list) {
161 		if (c->ident == ident)
162 			return c;
163 	}
164 	return NULL;
165 }
166 
l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)167 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
168 						  u8 ident)
169 {
170 	struct l2cap_chan *c;
171 
172 	mutex_lock(&conn->chan_lock);
173 	c = __l2cap_get_chan_by_ident(conn, ident);
174 	if (c) {
175 		/* Only lock if chan reference is not 0 */
176 		c = l2cap_chan_hold_unless_zero(c);
177 		if (c)
178 			l2cap_chan_lock(c);
179 	}
180 	mutex_unlock(&conn->chan_lock);
181 
182 	return c;
183 }
184 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)185 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
186 						      u8 src_type)
187 {
188 	struct l2cap_chan *c;
189 
190 	list_for_each_entry(c, &chan_list, global_l) {
191 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
192 			continue;
193 
194 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
195 			continue;
196 
197 		if (c->sport == psm && !bacmp(&c->src, src))
198 			return c;
199 	}
200 	return NULL;
201 }
202 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)203 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
204 {
205 	int err;
206 
207 	write_lock(&chan_list_lock);
208 
209 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
210 		err = -EADDRINUSE;
211 		goto done;
212 	}
213 
214 	if (psm) {
215 		chan->psm = psm;
216 		chan->sport = psm;
217 		err = 0;
218 	} else {
219 		u16 p, start, end, incr;
220 
221 		if (chan->src_type == BDADDR_BREDR) {
222 			start = L2CAP_PSM_DYN_START;
223 			end = L2CAP_PSM_AUTO_END;
224 			incr = 2;
225 		} else {
226 			start = L2CAP_PSM_LE_DYN_START;
227 			end = L2CAP_PSM_LE_DYN_END;
228 			incr = 1;
229 		}
230 
231 		err = -EINVAL;
232 		for (p = start; p <= end; p += incr)
233 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
234 							 chan->src_type)) {
235 				chan->psm   = cpu_to_le16(p);
236 				chan->sport = cpu_to_le16(p);
237 				err = 0;
238 				break;
239 			}
240 	}
241 
242 done:
243 	write_unlock(&chan_list_lock);
244 	return err;
245 }
246 EXPORT_SYMBOL_GPL(l2cap_add_psm);
247 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)248 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
249 {
250 	write_lock(&chan_list_lock);
251 
252 	/* Override the defaults (which are for conn-oriented) */
253 	chan->omtu = L2CAP_DEFAULT_MTU;
254 	chan->chan_type = L2CAP_CHAN_FIXED;
255 
256 	chan->scid = scid;
257 
258 	write_unlock(&chan_list_lock);
259 
260 	return 0;
261 }
262 
l2cap_alloc_cid(struct l2cap_conn * conn)263 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
264 {
265 	u16 cid, dyn_end;
266 
267 	if (conn->hcon->type == LE_LINK)
268 		dyn_end = L2CAP_CID_LE_DYN_END;
269 	else
270 		dyn_end = L2CAP_CID_DYN_END;
271 
272 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
273 		if (!__l2cap_get_chan_by_scid(conn, cid))
274 			return cid;
275 	}
276 
277 	return 0;
278 }
279 
l2cap_state_change(struct l2cap_chan * chan,int state)280 static void l2cap_state_change(struct l2cap_chan *chan, int state)
281 {
282 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
283 	       state_to_string(state));
284 
285 	chan->state = state;
286 	chan->ops->state_change(chan, state, 0);
287 }
288 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)289 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
290 						int state, int err)
291 {
292 	chan->state = state;
293 	chan->ops->state_change(chan, chan->state, err);
294 }
295 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)296 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
297 {
298 	chan->ops->state_change(chan, chan->state, err);
299 }
300 
__set_retrans_timer(struct l2cap_chan * chan)301 static void __set_retrans_timer(struct l2cap_chan *chan)
302 {
303 	if (!delayed_work_pending(&chan->monitor_timer) &&
304 	    chan->retrans_timeout) {
305 		l2cap_set_timer(chan, &chan->retrans_timer,
306 				msecs_to_jiffies(chan->retrans_timeout));
307 	}
308 }
309 
__set_monitor_timer(struct l2cap_chan * chan)310 static void __set_monitor_timer(struct l2cap_chan *chan)
311 {
312 	__clear_retrans_timer(chan);
313 	if (chan->monitor_timeout) {
314 		l2cap_set_timer(chan, &chan->monitor_timer,
315 				msecs_to_jiffies(chan->monitor_timeout));
316 	}
317 }
318 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)319 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
320 					       u16 seq)
321 {
322 	struct sk_buff *skb;
323 
324 	skb_queue_walk(head, skb) {
325 		if (bt_cb(skb)->l2cap.txseq == seq)
326 			return skb;
327 	}
328 
329 	return NULL;
330 }
331 
332 /* ---- L2CAP sequence number lists ---- */
333 
334 /* For ERTM, ordered lists of sequence numbers must be tracked for
335  * SREJ requests that are received and for frames that are to be
336  * retransmitted. These seq_list functions implement a singly-linked
337  * list in an array, where membership in the list can also be checked
338  * in constant time. Items can also be added to the tail of the list
339  * and removed from the head in constant time, without further memory
340  * allocs or frees.
341  */
342 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)343 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
344 {
345 	size_t alloc_size, i;
346 
347 	/* Allocated size is a power of 2 to map sequence numbers
348 	 * (which may be up to 14 bits) in to a smaller array that is
349 	 * sized for the negotiated ERTM transmit windows.
350 	 */
351 	alloc_size = roundup_pow_of_two(size);
352 
353 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
354 	if (!seq_list->list)
355 		return -ENOMEM;
356 
357 	seq_list->mask = alloc_size - 1;
358 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
359 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
360 	for (i = 0; i < alloc_size; i++)
361 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
362 
363 	return 0;
364 }
365 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)366 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
367 {
368 	kfree(seq_list->list);
369 }
370 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)371 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
372 					   u16 seq)
373 {
374 	/* Constant-time check for list membership */
375 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
376 }
377 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)378 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
379 {
380 	u16 seq = seq_list->head;
381 	u16 mask = seq_list->mask;
382 
383 	seq_list->head = seq_list->list[seq & mask];
384 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
385 
386 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
387 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
388 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
389 	}
390 
391 	return seq;
392 }
393 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)394 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
395 {
396 	u16 i;
397 
398 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
399 		return;
400 
401 	for (i = 0; i <= seq_list->mask; i++)
402 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
403 
404 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
405 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
406 }
407 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)408 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
409 {
410 	u16 mask = seq_list->mask;
411 
412 	/* All appends happen in constant time */
413 
414 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
415 		return;
416 
417 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
418 		seq_list->head = seq;
419 	else
420 		seq_list->list[seq_list->tail & mask] = seq;
421 
422 	seq_list->tail = seq;
423 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
424 }
425 
l2cap_chan_timeout(struct work_struct * work)426 static void l2cap_chan_timeout(struct work_struct *work)
427 {
428 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
429 					       chan_timer.work);
430 	struct l2cap_conn *conn = chan->conn;
431 	int reason;
432 
433 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
434 
435 	mutex_lock(&conn->chan_lock);
436 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
437 	 * this work. No need to call l2cap_chan_hold(chan) here again.
438 	 */
439 	l2cap_chan_lock(chan);
440 
441 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
442 		reason = ECONNREFUSED;
443 	else if (chan->state == BT_CONNECT &&
444 		 chan->sec_level != BT_SECURITY_SDP)
445 		reason = ECONNREFUSED;
446 	else
447 		reason = ETIMEDOUT;
448 
449 	l2cap_chan_close(chan, reason);
450 
451 	chan->ops->close(chan);
452 
453 	l2cap_chan_unlock(chan);
454 	l2cap_chan_put(chan);
455 
456 	mutex_unlock(&conn->chan_lock);
457 }
458 
l2cap_chan_create(void)459 struct l2cap_chan *l2cap_chan_create(void)
460 {
461 	struct l2cap_chan *chan;
462 
463 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
464 	if (!chan)
465 		return NULL;
466 
467 	skb_queue_head_init(&chan->tx_q);
468 	skb_queue_head_init(&chan->srej_q);
469 	mutex_init(&chan->lock);
470 
471 	/* Set default lock nesting level */
472 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
473 
474 	write_lock(&chan_list_lock);
475 	list_add(&chan->global_l, &chan_list);
476 	write_unlock(&chan_list_lock);
477 
478 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
479 
480 	chan->state = BT_OPEN;
481 
482 	kref_init(&chan->kref);
483 
484 	/* This flag is cleared in l2cap_chan_ready() */
485 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
486 
487 	BT_DBG("chan %p", chan);
488 
489 	return chan;
490 }
491 EXPORT_SYMBOL_GPL(l2cap_chan_create);
492 
l2cap_chan_destroy(struct kref * kref)493 static void l2cap_chan_destroy(struct kref *kref)
494 {
495 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
496 
497 	BT_DBG("chan %p", chan);
498 
499 	write_lock(&chan_list_lock);
500 	list_del(&chan->global_l);
501 	write_unlock(&chan_list_lock);
502 
503 	kfree(chan);
504 }
505 
l2cap_chan_hold(struct l2cap_chan * c)506 void l2cap_chan_hold(struct l2cap_chan *c)
507 {
508 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
509 
510 	kref_get(&c->kref);
511 }
512 
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)513 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
514 {
515 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
516 
517 	if (!kref_get_unless_zero(&c->kref))
518 		return NULL;
519 
520 	return c;
521 }
522 
l2cap_chan_put(struct l2cap_chan * c)523 void l2cap_chan_put(struct l2cap_chan *c)
524 {
525 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
526 
527 	kref_put(&c->kref, l2cap_chan_destroy);
528 }
529 EXPORT_SYMBOL_GPL(l2cap_chan_put);
530 
l2cap_chan_set_defaults(struct l2cap_chan * chan)531 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
532 {
533 	chan->fcs  = L2CAP_FCS_CRC16;
534 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
535 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
536 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
537 	chan->remote_max_tx = chan->max_tx;
538 	chan->remote_tx_win = chan->tx_win;
539 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
540 	chan->sec_level = BT_SECURITY_LOW;
541 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
542 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
543 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
544 
545 	chan->conf_state = 0;
546 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
547 
548 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
549 }
550 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
551 
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)552 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
553 {
554 	chan->sdu = NULL;
555 	chan->sdu_last_frag = NULL;
556 	chan->sdu_len = 0;
557 	chan->tx_credits = tx_credits;
558 	/* Derive MPS from connection MTU to stop HCI fragmentation */
559 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
560 	/* Give enough credits for a full packet */
561 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
562 
563 	skb_queue_head_init(&chan->tx_q);
564 }
565 
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)566 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
567 {
568 	l2cap_le_flowctl_init(chan, tx_credits);
569 
570 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
571 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
572 		chan->mps = L2CAP_ECRED_MIN_MPS;
573 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
574 	}
575 }
576 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)577 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
578 {
579 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
580 	       __le16_to_cpu(chan->psm), chan->dcid);
581 
582 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
583 
584 	chan->conn = conn;
585 
586 	switch (chan->chan_type) {
587 	case L2CAP_CHAN_CONN_ORIENTED:
588 		/* Alloc CID for connection-oriented socket */
589 		chan->scid = l2cap_alloc_cid(conn);
590 		if (conn->hcon->type == ACL_LINK)
591 			chan->omtu = L2CAP_DEFAULT_MTU;
592 		break;
593 
594 	case L2CAP_CHAN_CONN_LESS:
595 		/* Connectionless socket */
596 		chan->scid = L2CAP_CID_CONN_LESS;
597 		chan->dcid = L2CAP_CID_CONN_LESS;
598 		chan->omtu = L2CAP_DEFAULT_MTU;
599 		break;
600 
601 	case L2CAP_CHAN_FIXED:
602 		/* Caller will set CID and CID specific MTU values */
603 		break;
604 
605 	default:
606 		/* Raw socket can send/recv signalling messages only */
607 		chan->scid = L2CAP_CID_SIGNALING;
608 		chan->dcid = L2CAP_CID_SIGNALING;
609 		chan->omtu = L2CAP_DEFAULT_MTU;
610 	}
611 
612 	chan->local_id		= L2CAP_BESTEFFORT_ID;
613 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
614 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
615 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
616 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
617 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
618 
619 	l2cap_chan_hold(chan);
620 
621 	/* Only keep a reference for fixed channels if they requested it */
622 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
623 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
624 		hci_conn_hold(conn->hcon);
625 
626 	list_add(&chan->list, &conn->chan_l);
627 }
628 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)629 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
630 {
631 	mutex_lock(&conn->chan_lock);
632 	__l2cap_chan_add(conn, chan);
633 	mutex_unlock(&conn->chan_lock);
634 }
635 
l2cap_chan_del(struct l2cap_chan * chan,int err)636 void l2cap_chan_del(struct l2cap_chan *chan, int err)
637 {
638 	struct l2cap_conn *conn = chan->conn;
639 
640 	__clear_chan_timer(chan);
641 
642 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
643 	       state_to_string(chan->state));
644 
645 	chan->ops->teardown(chan, err);
646 
647 	if (conn) {
648 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
649 		/* Delete from channel list */
650 		list_del(&chan->list);
651 
652 		l2cap_chan_put(chan);
653 
654 		chan->conn = NULL;
655 
656 		/* Reference was only held for non-fixed channels or
657 		 * fixed channels that explicitly requested it using the
658 		 * FLAG_HOLD_HCI_CONN flag.
659 		 */
660 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
661 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
662 			hci_conn_drop(conn->hcon);
663 
664 		if (mgr && mgr->bredr_chan == chan)
665 			mgr->bredr_chan = NULL;
666 	}
667 
668 	if (chan->hs_hchan) {
669 		struct hci_chan *hs_hchan = chan->hs_hchan;
670 
671 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
672 		amp_disconnect_logical_link(hs_hchan);
673 	}
674 
675 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
676 		return;
677 
678 	switch (chan->mode) {
679 	case L2CAP_MODE_BASIC:
680 		break;
681 
682 	case L2CAP_MODE_LE_FLOWCTL:
683 	case L2CAP_MODE_EXT_FLOWCTL:
684 		skb_queue_purge(&chan->tx_q);
685 		break;
686 
687 	case L2CAP_MODE_ERTM:
688 		__clear_retrans_timer(chan);
689 		__clear_monitor_timer(chan);
690 		__clear_ack_timer(chan);
691 
692 		skb_queue_purge(&chan->srej_q);
693 
694 		l2cap_seq_list_free(&chan->srej_list);
695 		l2cap_seq_list_free(&chan->retrans_list);
696 		fallthrough;
697 
698 	case L2CAP_MODE_STREAMING:
699 		skb_queue_purge(&chan->tx_q);
700 		break;
701 	}
702 }
703 EXPORT_SYMBOL_GPL(l2cap_chan_del);
704 
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)705 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
706 			      void *data)
707 {
708 	struct l2cap_chan *chan;
709 
710 	list_for_each_entry(chan, &conn->chan_l, list) {
711 		func(chan, data);
712 	}
713 }
714 
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)715 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
716 		     void *data)
717 {
718 	if (!conn)
719 		return;
720 
721 	mutex_lock(&conn->chan_lock);
722 	__l2cap_chan_list(conn, func, data);
723 	mutex_unlock(&conn->chan_lock);
724 }
725 
726 EXPORT_SYMBOL_GPL(l2cap_chan_list);
727 
l2cap_conn_update_id_addr(struct work_struct * work)728 static void l2cap_conn_update_id_addr(struct work_struct *work)
729 {
730 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
731 					       id_addr_update_work);
732 	struct hci_conn *hcon = conn->hcon;
733 	struct l2cap_chan *chan;
734 
735 	mutex_lock(&conn->chan_lock);
736 
737 	list_for_each_entry(chan, &conn->chan_l, list) {
738 		l2cap_chan_lock(chan);
739 		bacpy(&chan->dst, &hcon->dst);
740 		chan->dst_type = bdaddr_dst_type(hcon);
741 		l2cap_chan_unlock(chan);
742 	}
743 
744 	mutex_unlock(&conn->chan_lock);
745 }
746 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)747 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
748 {
749 	struct l2cap_conn *conn = chan->conn;
750 	struct l2cap_le_conn_rsp rsp;
751 	u16 result;
752 
753 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
754 		result = L2CAP_CR_LE_AUTHORIZATION;
755 	else
756 		result = L2CAP_CR_LE_BAD_PSM;
757 
758 	l2cap_state_change(chan, BT_DISCONN);
759 
760 	rsp.dcid    = cpu_to_le16(chan->scid);
761 	rsp.mtu     = cpu_to_le16(chan->imtu);
762 	rsp.mps     = cpu_to_le16(chan->mps);
763 	rsp.credits = cpu_to_le16(chan->rx_credits);
764 	rsp.result  = cpu_to_le16(result);
765 
766 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
767 		       &rsp);
768 }
769 
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)770 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
771 {
772 	struct l2cap_conn *conn = chan->conn;
773 	struct l2cap_ecred_conn_rsp rsp;
774 	u16 result;
775 
776 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
777 		result = L2CAP_CR_LE_AUTHORIZATION;
778 	else
779 		result = L2CAP_CR_LE_BAD_PSM;
780 
781 	l2cap_state_change(chan, BT_DISCONN);
782 
783 	memset(&rsp, 0, sizeof(rsp));
784 
785 	rsp.result  = cpu_to_le16(result);
786 
787 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
788 		       &rsp);
789 }
790 
l2cap_chan_connect_reject(struct l2cap_chan * chan)791 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
792 {
793 	struct l2cap_conn *conn = chan->conn;
794 	struct l2cap_conn_rsp rsp;
795 	u16 result;
796 
797 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
798 		result = L2CAP_CR_SEC_BLOCK;
799 	else
800 		result = L2CAP_CR_BAD_PSM;
801 
802 	l2cap_state_change(chan, BT_DISCONN);
803 
804 	rsp.scid   = cpu_to_le16(chan->dcid);
805 	rsp.dcid   = cpu_to_le16(chan->scid);
806 	rsp.result = cpu_to_le16(result);
807 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
808 
809 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
810 }
811 
l2cap_chan_close(struct l2cap_chan * chan,int reason)812 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
813 {
814 	struct l2cap_conn *conn = chan->conn;
815 
816 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
817 
818 	switch (chan->state) {
819 	case BT_LISTEN:
820 		chan->ops->teardown(chan, 0);
821 		break;
822 
823 	case BT_CONNECTED:
824 	case BT_CONFIG:
825 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
826 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
827 			l2cap_send_disconn_req(chan, reason);
828 		} else
829 			l2cap_chan_del(chan, reason);
830 		break;
831 
832 	case BT_CONNECT2:
833 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
834 			if (conn->hcon->type == ACL_LINK)
835 				l2cap_chan_connect_reject(chan);
836 			else if (conn->hcon->type == LE_LINK) {
837 				switch (chan->mode) {
838 				case L2CAP_MODE_LE_FLOWCTL:
839 					l2cap_chan_le_connect_reject(chan);
840 					break;
841 				case L2CAP_MODE_EXT_FLOWCTL:
842 					l2cap_chan_ecred_connect_reject(chan);
843 					break;
844 				}
845 			}
846 		}
847 
848 		l2cap_chan_del(chan, reason);
849 		break;
850 
851 	case BT_CONNECT:
852 	case BT_DISCONN:
853 		l2cap_chan_del(chan, reason);
854 		break;
855 
856 	default:
857 		chan->ops->teardown(chan, 0);
858 		break;
859 	}
860 }
861 EXPORT_SYMBOL(l2cap_chan_close);
862 
l2cap_get_auth_type(struct l2cap_chan * chan)863 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
864 {
865 	switch (chan->chan_type) {
866 	case L2CAP_CHAN_RAW:
867 		switch (chan->sec_level) {
868 		case BT_SECURITY_HIGH:
869 		case BT_SECURITY_FIPS:
870 			return HCI_AT_DEDICATED_BONDING_MITM;
871 		case BT_SECURITY_MEDIUM:
872 			return HCI_AT_DEDICATED_BONDING;
873 		default:
874 			return HCI_AT_NO_BONDING;
875 		}
876 		break;
877 	case L2CAP_CHAN_CONN_LESS:
878 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
879 			if (chan->sec_level == BT_SECURITY_LOW)
880 				chan->sec_level = BT_SECURITY_SDP;
881 		}
882 		if (chan->sec_level == BT_SECURITY_HIGH ||
883 		    chan->sec_level == BT_SECURITY_FIPS)
884 			return HCI_AT_NO_BONDING_MITM;
885 		else
886 			return HCI_AT_NO_BONDING;
887 		break;
888 	case L2CAP_CHAN_CONN_ORIENTED:
889 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
890 			if (chan->sec_level == BT_SECURITY_LOW)
891 				chan->sec_level = BT_SECURITY_SDP;
892 
893 			if (chan->sec_level == BT_SECURITY_HIGH ||
894 			    chan->sec_level == BT_SECURITY_FIPS)
895 				return HCI_AT_NO_BONDING_MITM;
896 			else
897 				return HCI_AT_NO_BONDING;
898 		}
899 		fallthrough;
900 
901 	default:
902 		switch (chan->sec_level) {
903 		case BT_SECURITY_HIGH:
904 		case BT_SECURITY_FIPS:
905 			return HCI_AT_GENERAL_BONDING_MITM;
906 		case BT_SECURITY_MEDIUM:
907 			return HCI_AT_GENERAL_BONDING;
908 		default:
909 			return HCI_AT_NO_BONDING;
910 		}
911 		break;
912 	}
913 }
914 
915 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)916 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
917 {
918 	struct l2cap_conn *conn = chan->conn;
919 	__u8 auth_type;
920 
921 	if (conn->hcon->type == LE_LINK)
922 		return smp_conn_security(conn->hcon, chan->sec_level);
923 
924 	auth_type = l2cap_get_auth_type(chan);
925 
926 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
927 				 initiator);
928 }
929 
l2cap_get_ident(struct l2cap_conn * conn)930 static u8 l2cap_get_ident(struct l2cap_conn *conn)
931 {
932 	u8 id;
933 
934 	/* Get next available identificator.
935 	 *    1 - 128 are used by kernel.
936 	 *  129 - 199 are reserved.
937 	 *  200 - 254 are used by utilities like l2ping, etc.
938 	 */
939 
940 	mutex_lock(&conn->ident_lock);
941 
942 	if (++conn->tx_ident > 128)
943 		conn->tx_ident = 1;
944 
945 	id = conn->tx_ident;
946 
947 	mutex_unlock(&conn->ident_lock);
948 
949 	return id;
950 }
951 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)952 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
953 			   void *data)
954 {
955 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
956 	u8 flags;
957 
958 	BT_DBG("code 0x%2.2x", code);
959 
960 	if (!skb)
961 		return;
962 
963 	/* Use NO_FLUSH if supported or we have an LE link (which does
964 	 * not support auto-flushing packets) */
965 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
966 	    conn->hcon->type == LE_LINK)
967 		flags = ACL_START_NO_FLUSH;
968 	else
969 		flags = ACL_START;
970 
971 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
972 	skb->priority = HCI_PRIO_MAX;
973 
974 	hci_send_acl(conn->hchan, skb, flags);
975 }
976 
__chan_is_moving(struct l2cap_chan * chan)977 static bool __chan_is_moving(struct l2cap_chan *chan)
978 {
979 	return chan->move_state != L2CAP_MOVE_STABLE &&
980 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
981 }
982 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)983 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
984 {
985 	struct hci_conn *hcon = chan->conn->hcon;
986 	u16 flags;
987 
988 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
989 	       skb->priority);
990 
991 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
992 		if (chan->hs_hchan)
993 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
994 		else
995 			kfree_skb(skb);
996 
997 		return;
998 	}
999 
1000 	/* Use NO_FLUSH for LE links (where this is the only option) or
1001 	 * if the BR/EDR link supports it and flushing has not been
1002 	 * explicitly requested (through FLAG_FLUSHABLE).
1003 	 */
1004 	if (hcon->type == LE_LINK ||
1005 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1006 	     lmp_no_flush_capable(hcon->hdev)))
1007 		flags = ACL_START_NO_FLUSH;
1008 	else
1009 		flags = ACL_START;
1010 
1011 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1012 	hci_send_acl(chan->conn->hchan, skb, flags);
1013 }
1014 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1015 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1016 {
1017 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1018 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1019 
1020 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1021 		/* S-Frame */
1022 		control->sframe = 1;
1023 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1024 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1025 
1026 		control->sar = 0;
1027 		control->txseq = 0;
1028 	} else {
1029 		/* I-Frame */
1030 		control->sframe = 0;
1031 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1032 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1033 
1034 		control->poll = 0;
1035 		control->super = 0;
1036 	}
1037 }
1038 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1039 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1040 {
1041 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1042 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1043 
1044 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1045 		/* S-Frame */
1046 		control->sframe = 1;
1047 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1048 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1049 
1050 		control->sar = 0;
1051 		control->txseq = 0;
1052 	} else {
1053 		/* I-Frame */
1054 		control->sframe = 0;
1055 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1056 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1057 
1058 		control->poll = 0;
1059 		control->super = 0;
1060 	}
1061 }
1062 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1063 static inline void __unpack_control(struct l2cap_chan *chan,
1064 				    struct sk_buff *skb)
1065 {
1066 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1067 		__unpack_extended_control(get_unaligned_le32(skb->data),
1068 					  &bt_cb(skb)->l2cap);
1069 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1070 	} else {
1071 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1072 					  &bt_cb(skb)->l2cap);
1073 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1074 	}
1075 }
1076 
__pack_extended_control(struct l2cap_ctrl * control)1077 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1078 {
1079 	u32 packed;
1080 
1081 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1082 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1083 
1084 	if (control->sframe) {
1085 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1086 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1087 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1088 	} else {
1089 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1090 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1091 	}
1092 
1093 	return packed;
1094 }
1095 
__pack_enhanced_control(struct l2cap_ctrl * control)1096 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1097 {
1098 	u16 packed;
1099 
1100 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1101 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1102 
1103 	if (control->sframe) {
1104 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1105 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1106 		packed |= L2CAP_CTRL_FRAME_TYPE;
1107 	} else {
1108 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1109 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1110 	}
1111 
1112 	return packed;
1113 }
1114 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1115 static inline void __pack_control(struct l2cap_chan *chan,
1116 				  struct l2cap_ctrl *control,
1117 				  struct sk_buff *skb)
1118 {
1119 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1120 		put_unaligned_le32(__pack_extended_control(control),
1121 				   skb->data + L2CAP_HDR_SIZE);
1122 	} else {
1123 		put_unaligned_le16(__pack_enhanced_control(control),
1124 				   skb->data + L2CAP_HDR_SIZE);
1125 	}
1126 }
1127 
__ertm_hdr_size(struct l2cap_chan * chan)1128 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1129 {
1130 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1131 		return L2CAP_EXT_HDR_SIZE;
1132 	else
1133 		return L2CAP_ENH_HDR_SIZE;
1134 }
1135 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1136 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1137 					       u32 control)
1138 {
1139 	struct sk_buff *skb;
1140 	struct l2cap_hdr *lh;
1141 	int hlen = __ertm_hdr_size(chan);
1142 
1143 	if (chan->fcs == L2CAP_FCS_CRC16)
1144 		hlen += L2CAP_FCS_SIZE;
1145 
1146 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1147 
1148 	if (!skb)
1149 		return ERR_PTR(-ENOMEM);
1150 
1151 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1152 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1153 	lh->cid = cpu_to_le16(chan->dcid);
1154 
1155 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1156 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1157 	else
1158 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1159 
1160 	if (chan->fcs == L2CAP_FCS_CRC16) {
1161 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1162 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1163 	}
1164 
1165 	skb->priority = HCI_PRIO_MAX;
1166 	return skb;
1167 }
1168 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1169 static void l2cap_send_sframe(struct l2cap_chan *chan,
1170 			      struct l2cap_ctrl *control)
1171 {
1172 	struct sk_buff *skb;
1173 	u32 control_field;
1174 
1175 	BT_DBG("chan %p, control %p", chan, control);
1176 
1177 	if (!control->sframe)
1178 		return;
1179 
1180 	if (__chan_is_moving(chan))
1181 		return;
1182 
1183 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1184 	    !control->poll)
1185 		control->final = 1;
1186 
1187 	if (control->super == L2CAP_SUPER_RR)
1188 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1189 	else if (control->super == L2CAP_SUPER_RNR)
1190 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1191 
1192 	if (control->super != L2CAP_SUPER_SREJ) {
1193 		chan->last_acked_seq = control->reqseq;
1194 		__clear_ack_timer(chan);
1195 	}
1196 
1197 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1198 	       control->final, control->poll, control->super);
1199 
1200 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1201 		control_field = __pack_extended_control(control);
1202 	else
1203 		control_field = __pack_enhanced_control(control);
1204 
1205 	skb = l2cap_create_sframe_pdu(chan, control_field);
1206 	if (!IS_ERR(skb))
1207 		l2cap_do_send(chan, skb);
1208 }
1209 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1210 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1211 {
1212 	struct l2cap_ctrl control;
1213 
1214 	BT_DBG("chan %p, poll %d", chan, poll);
1215 
1216 	memset(&control, 0, sizeof(control));
1217 	control.sframe = 1;
1218 	control.poll = poll;
1219 
1220 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1221 		control.super = L2CAP_SUPER_RNR;
1222 	else
1223 		control.super = L2CAP_SUPER_RR;
1224 
1225 	control.reqseq = chan->buffer_seq;
1226 	l2cap_send_sframe(chan, &control);
1227 }
1228 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1229 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1230 {
1231 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1232 		return true;
1233 
1234 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1235 }
1236 
__amp_capable(struct l2cap_chan * chan)1237 static bool __amp_capable(struct l2cap_chan *chan)
1238 {
1239 	struct l2cap_conn *conn = chan->conn;
1240 	struct hci_dev *hdev;
1241 	bool amp_available = false;
1242 
1243 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1244 		return false;
1245 
1246 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1247 		return false;
1248 
1249 	read_lock(&hci_dev_list_lock);
1250 	list_for_each_entry(hdev, &hci_dev_list, list) {
1251 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1252 		    test_bit(HCI_UP, &hdev->flags)) {
1253 			amp_available = true;
1254 			break;
1255 		}
1256 	}
1257 	read_unlock(&hci_dev_list_lock);
1258 
1259 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1260 		return amp_available;
1261 
1262 	return false;
1263 }
1264 
l2cap_check_efs(struct l2cap_chan * chan)1265 static bool l2cap_check_efs(struct l2cap_chan *chan)
1266 {
1267 	/* Check EFS parameters */
1268 	return true;
1269 }
1270 
l2cap_send_conn_req(struct l2cap_chan * chan)1271 void l2cap_send_conn_req(struct l2cap_chan *chan)
1272 {
1273 	struct l2cap_conn *conn = chan->conn;
1274 	struct l2cap_conn_req req;
1275 
1276 	req.scid = cpu_to_le16(chan->scid);
1277 	req.psm  = chan->psm;
1278 
1279 	chan->ident = l2cap_get_ident(conn);
1280 
1281 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1282 
1283 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1284 }
1285 
l2cap_send_create_chan_req(struct l2cap_chan * chan,u8 amp_id)1286 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1287 {
1288 	struct l2cap_create_chan_req req;
1289 	req.scid = cpu_to_le16(chan->scid);
1290 	req.psm  = chan->psm;
1291 	req.amp_id = amp_id;
1292 
1293 	chan->ident = l2cap_get_ident(chan->conn);
1294 
1295 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1296 		       sizeof(req), &req);
1297 }
1298 
l2cap_move_setup(struct l2cap_chan * chan)1299 static void l2cap_move_setup(struct l2cap_chan *chan)
1300 {
1301 	struct sk_buff *skb;
1302 
1303 	BT_DBG("chan %p", chan);
1304 
1305 	if (chan->mode != L2CAP_MODE_ERTM)
1306 		return;
1307 
1308 	__clear_retrans_timer(chan);
1309 	__clear_monitor_timer(chan);
1310 	__clear_ack_timer(chan);
1311 
1312 	chan->retry_count = 0;
1313 	skb_queue_walk(&chan->tx_q, skb) {
1314 		if (bt_cb(skb)->l2cap.retries)
1315 			bt_cb(skb)->l2cap.retries = 1;
1316 		else
1317 			break;
1318 	}
1319 
1320 	chan->expected_tx_seq = chan->buffer_seq;
1321 
1322 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1323 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1324 	l2cap_seq_list_clear(&chan->retrans_list);
1325 	l2cap_seq_list_clear(&chan->srej_list);
1326 	skb_queue_purge(&chan->srej_q);
1327 
1328 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1329 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1330 
1331 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1332 }
1333 
l2cap_move_done(struct l2cap_chan * chan)1334 static void l2cap_move_done(struct l2cap_chan *chan)
1335 {
1336 	u8 move_role = chan->move_role;
1337 	BT_DBG("chan %p", chan);
1338 
1339 	chan->move_state = L2CAP_MOVE_STABLE;
1340 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1341 
1342 	if (chan->mode != L2CAP_MODE_ERTM)
1343 		return;
1344 
1345 	switch (move_role) {
1346 	case L2CAP_MOVE_ROLE_INITIATOR:
1347 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1348 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1349 		break;
1350 	case L2CAP_MOVE_ROLE_RESPONDER:
1351 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1352 		break;
1353 	}
1354 }
1355 
l2cap_chan_ready(struct l2cap_chan * chan)1356 static void l2cap_chan_ready(struct l2cap_chan *chan)
1357 {
1358 	/* The channel may have already been flagged as connected in
1359 	 * case of receiving data before the L2CAP info req/rsp
1360 	 * procedure is complete.
1361 	 */
1362 	if (chan->state == BT_CONNECTED)
1363 		return;
1364 
1365 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1366 	chan->conf_state = 0;
1367 	__clear_chan_timer(chan);
1368 
1369 	switch (chan->mode) {
1370 	case L2CAP_MODE_LE_FLOWCTL:
1371 	case L2CAP_MODE_EXT_FLOWCTL:
1372 		if (!chan->tx_credits)
1373 			chan->ops->suspend(chan);
1374 		break;
1375 	}
1376 
1377 	chan->state = BT_CONNECTED;
1378 
1379 	chan->ops->ready(chan);
1380 }
1381 
l2cap_le_connect(struct l2cap_chan * chan)1382 static void l2cap_le_connect(struct l2cap_chan *chan)
1383 {
1384 	struct l2cap_conn *conn = chan->conn;
1385 	struct l2cap_le_conn_req req;
1386 
1387 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1388 		return;
1389 
1390 	if (!chan->imtu)
1391 		chan->imtu = chan->conn->mtu;
1392 
1393 	l2cap_le_flowctl_init(chan, 0);
1394 
1395 	req.psm     = chan->psm;
1396 	req.scid    = cpu_to_le16(chan->scid);
1397 	req.mtu     = cpu_to_le16(chan->imtu);
1398 	req.mps     = cpu_to_le16(chan->mps);
1399 	req.credits = cpu_to_le16(chan->rx_credits);
1400 
1401 	chan->ident = l2cap_get_ident(conn);
1402 
1403 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1404 		       sizeof(req), &req);
1405 }
1406 
1407 struct l2cap_ecred_conn_data {
1408 	struct {
1409 		struct l2cap_ecred_conn_req req;
1410 		__le16 scid[5];
1411 	} __packed pdu;
1412 	struct l2cap_chan *chan;
1413 	struct pid *pid;
1414 	int count;
1415 };
1416 
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1417 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1418 {
1419 	struct l2cap_ecred_conn_data *conn = data;
1420 	struct pid *pid;
1421 
1422 	if (chan == conn->chan)
1423 		return;
1424 
1425 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1426 		return;
1427 
1428 	pid = chan->ops->get_peer_pid(chan);
1429 
1430 	/* Only add deferred channels with the same PID/PSM */
1431 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1432 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1433 		return;
1434 
1435 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1436 		return;
1437 
1438 	l2cap_ecred_init(chan, 0);
1439 
1440 	/* Set the same ident so we can match on the rsp */
1441 	chan->ident = conn->chan->ident;
1442 
1443 	/* Include all channels deferred */
1444 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1445 
1446 	conn->count++;
1447 }
1448 
l2cap_ecred_connect(struct l2cap_chan * chan)1449 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1450 {
1451 	struct l2cap_conn *conn = chan->conn;
1452 	struct l2cap_ecred_conn_data data;
1453 
1454 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1455 		return;
1456 
1457 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1458 		return;
1459 
1460 	l2cap_ecred_init(chan, 0);
1461 
1462 	memset(&data, 0, sizeof(data));
1463 	data.pdu.req.psm     = chan->psm;
1464 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1465 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1466 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1467 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1468 
1469 	chan->ident = l2cap_get_ident(conn);
1470 
1471 	data.count = 1;
1472 	data.chan = chan;
1473 	data.pid = chan->ops->get_peer_pid(chan);
1474 
1475 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1476 
1477 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1478 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1479 		       &data.pdu);
1480 }
1481 
l2cap_le_start(struct l2cap_chan * chan)1482 static void l2cap_le_start(struct l2cap_chan *chan)
1483 {
1484 	struct l2cap_conn *conn = chan->conn;
1485 
1486 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1487 		return;
1488 
1489 	if (!chan->psm) {
1490 		l2cap_chan_ready(chan);
1491 		return;
1492 	}
1493 
1494 	if (chan->state == BT_CONNECT) {
1495 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1496 			l2cap_ecred_connect(chan);
1497 		else
1498 			l2cap_le_connect(chan);
1499 	}
1500 }
1501 
l2cap_start_connection(struct l2cap_chan * chan)1502 static void l2cap_start_connection(struct l2cap_chan *chan)
1503 {
1504 	if (__amp_capable(chan)) {
1505 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1506 		a2mp_discover_amp(chan);
1507 	} else if (chan->conn->hcon->type == LE_LINK) {
1508 		l2cap_le_start(chan);
1509 	} else {
1510 		l2cap_send_conn_req(chan);
1511 	}
1512 }
1513 
l2cap_request_info(struct l2cap_conn * conn)1514 static void l2cap_request_info(struct l2cap_conn *conn)
1515 {
1516 	struct l2cap_info_req req;
1517 
1518 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1519 		return;
1520 
1521 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1522 
1523 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1524 	conn->info_ident = l2cap_get_ident(conn);
1525 
1526 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1527 
1528 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1529 		       sizeof(req), &req);
1530 }
1531 
l2cap_check_enc_key_size(struct hci_conn * hcon)1532 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1533 {
1534 	/* The minimum encryption key size needs to be enforced by the
1535 	 * host stack before establishing any L2CAP connections. The
1536 	 * specification in theory allows a minimum of 1, but to align
1537 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1538 	 *
1539 	 * This check might also be called for unencrypted connections
1540 	 * that have no key size requirements. Ensure that the link is
1541 	 * actually encrypted before enforcing a key size.
1542 	 */
1543 	int min_key_size = hcon->hdev->min_enc_key_size;
1544 
1545 	/* On FIPS security level, key size must be 16 bytes */
1546 	if (hcon->sec_level == BT_SECURITY_FIPS)
1547 		min_key_size = 16;
1548 
1549 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1550 		hcon->enc_key_size >= min_key_size);
1551 }
1552 
l2cap_do_start(struct l2cap_chan * chan)1553 static void l2cap_do_start(struct l2cap_chan *chan)
1554 {
1555 	struct l2cap_conn *conn = chan->conn;
1556 
1557 	if (conn->hcon->type == LE_LINK) {
1558 		l2cap_le_start(chan);
1559 		return;
1560 	}
1561 
1562 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1563 		l2cap_request_info(conn);
1564 		return;
1565 	}
1566 
1567 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1568 		return;
1569 
1570 	if (!l2cap_chan_check_security(chan, true) ||
1571 	    !__l2cap_no_conn_pending(chan))
1572 		return;
1573 
1574 	if (l2cap_check_enc_key_size(conn->hcon))
1575 		l2cap_start_connection(chan);
1576 	else
1577 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1578 }
1579 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1580 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1581 {
1582 	u32 local_feat_mask = l2cap_feat_mask;
1583 	if (!disable_ertm)
1584 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1585 
1586 	switch (mode) {
1587 	case L2CAP_MODE_ERTM:
1588 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1589 	case L2CAP_MODE_STREAMING:
1590 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1591 	default:
1592 		return 0x00;
1593 	}
1594 }
1595 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1596 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1597 {
1598 	struct l2cap_conn *conn = chan->conn;
1599 	struct l2cap_disconn_req req;
1600 
1601 	if (!conn)
1602 		return;
1603 
1604 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1605 		__clear_retrans_timer(chan);
1606 		__clear_monitor_timer(chan);
1607 		__clear_ack_timer(chan);
1608 	}
1609 
1610 	if (chan->scid == L2CAP_CID_A2MP) {
1611 		l2cap_state_change(chan, BT_DISCONN);
1612 		return;
1613 	}
1614 
1615 	req.dcid = cpu_to_le16(chan->dcid);
1616 	req.scid = cpu_to_le16(chan->scid);
1617 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1618 		       sizeof(req), &req);
1619 
1620 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1621 }
1622 
1623 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1624 static void l2cap_conn_start(struct l2cap_conn *conn)
1625 {
1626 	struct l2cap_chan *chan, *tmp;
1627 
1628 	BT_DBG("conn %p", conn);
1629 
1630 	mutex_lock(&conn->chan_lock);
1631 
1632 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1633 		l2cap_chan_lock(chan);
1634 
1635 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1636 			l2cap_chan_ready(chan);
1637 			l2cap_chan_unlock(chan);
1638 			continue;
1639 		}
1640 
1641 		if (chan->state == BT_CONNECT) {
1642 			if (!l2cap_chan_check_security(chan, true) ||
1643 			    !__l2cap_no_conn_pending(chan)) {
1644 				l2cap_chan_unlock(chan);
1645 				continue;
1646 			}
1647 
1648 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1649 			    && test_bit(CONF_STATE2_DEVICE,
1650 					&chan->conf_state)) {
1651 				l2cap_chan_close(chan, ECONNRESET);
1652 				l2cap_chan_unlock(chan);
1653 				continue;
1654 			}
1655 
1656 			if (l2cap_check_enc_key_size(conn->hcon))
1657 				l2cap_start_connection(chan);
1658 			else
1659 				l2cap_chan_close(chan, ECONNREFUSED);
1660 
1661 		} else if (chan->state == BT_CONNECT2) {
1662 			struct l2cap_conn_rsp rsp;
1663 			char buf[128];
1664 			rsp.scid = cpu_to_le16(chan->dcid);
1665 			rsp.dcid = cpu_to_le16(chan->scid);
1666 
1667 			if (l2cap_chan_check_security(chan, false)) {
1668 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1669 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1670 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1671 					chan->ops->defer(chan);
1672 
1673 				} else {
1674 					l2cap_state_change(chan, BT_CONFIG);
1675 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1676 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1677 				}
1678 			} else {
1679 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1680 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1681 			}
1682 
1683 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1684 				       sizeof(rsp), &rsp);
1685 
1686 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1687 			    rsp.result != L2CAP_CR_SUCCESS) {
1688 				l2cap_chan_unlock(chan);
1689 				continue;
1690 			}
1691 
1692 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1693 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1694 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1695 			chan->num_conf_req++;
1696 		}
1697 
1698 		l2cap_chan_unlock(chan);
1699 	}
1700 
1701 	mutex_unlock(&conn->chan_lock);
1702 }
1703 
l2cap_le_conn_ready(struct l2cap_conn * conn)1704 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1705 {
1706 	struct hci_conn *hcon = conn->hcon;
1707 	struct hci_dev *hdev = hcon->hdev;
1708 
1709 	BT_DBG("%s conn %p", hdev->name, conn);
1710 
1711 	/* For outgoing pairing which doesn't necessarily have an
1712 	 * associated socket (e.g. mgmt_pair_device).
1713 	 */
1714 	if (hcon->out)
1715 		smp_conn_security(hcon, hcon->pending_sec_level);
1716 
1717 	/* For LE peripheral connections, make sure the connection interval
1718 	 * is in the range of the minimum and maximum interval that has
1719 	 * been configured for this connection. If not, then trigger
1720 	 * the connection update procedure.
1721 	 */
1722 	if (hcon->role == HCI_ROLE_SLAVE &&
1723 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1724 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1725 		struct l2cap_conn_param_update_req req;
1726 
1727 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1728 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1729 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1730 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1731 
1732 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1733 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1734 	}
1735 }
1736 
l2cap_conn_ready(struct l2cap_conn * conn)1737 static void l2cap_conn_ready(struct l2cap_conn *conn)
1738 {
1739 	struct l2cap_chan *chan;
1740 	struct hci_conn *hcon = conn->hcon;
1741 
1742 	BT_DBG("conn %p", conn);
1743 
1744 	if (hcon->type == ACL_LINK)
1745 		l2cap_request_info(conn);
1746 
1747 	mutex_lock(&conn->chan_lock);
1748 
1749 	list_for_each_entry(chan, &conn->chan_l, list) {
1750 
1751 		l2cap_chan_lock(chan);
1752 
1753 		if (chan->scid == L2CAP_CID_A2MP) {
1754 			l2cap_chan_unlock(chan);
1755 			continue;
1756 		}
1757 
1758 		if (hcon->type == LE_LINK) {
1759 			l2cap_le_start(chan);
1760 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1761 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1762 				l2cap_chan_ready(chan);
1763 		} else if (chan->state == BT_CONNECT) {
1764 			l2cap_do_start(chan);
1765 		}
1766 
1767 		l2cap_chan_unlock(chan);
1768 	}
1769 
1770 	mutex_unlock(&conn->chan_lock);
1771 
1772 	if (hcon->type == LE_LINK)
1773 		l2cap_le_conn_ready(conn);
1774 
1775 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1776 }
1777 
1778 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1779 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1780 {
1781 	struct l2cap_chan *chan;
1782 
1783 	BT_DBG("conn %p", conn);
1784 
1785 	mutex_lock(&conn->chan_lock);
1786 
1787 	list_for_each_entry(chan, &conn->chan_l, list) {
1788 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1789 			l2cap_chan_set_err(chan, err);
1790 	}
1791 
1792 	mutex_unlock(&conn->chan_lock);
1793 }
1794 
l2cap_info_timeout(struct work_struct * work)1795 static void l2cap_info_timeout(struct work_struct *work)
1796 {
1797 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1798 					       info_timer.work);
1799 
1800 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1801 	conn->info_ident = 0;
1802 
1803 	l2cap_conn_start(conn);
1804 }
1805 
1806 /*
1807  * l2cap_user
1808  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1809  * callback is called during registration. The ->remove callback is called
1810  * during unregistration.
1811  * An l2cap_user object can either be explicitly unregistered or when the
1812  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1813  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1814  * External modules must own a reference to the l2cap_conn object if they intend
1815  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1816  * any time if they don't.
1817  */
1818 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1819 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1820 {
1821 	struct hci_dev *hdev = conn->hcon->hdev;
1822 	int ret;
1823 
1824 	/* We need to check whether l2cap_conn is registered. If it is not, we
1825 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1826 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1827 	 * relies on the parent hci_conn object to be locked. This itself relies
1828 	 * on the hci_dev object to be locked. So we must lock the hci device
1829 	 * here, too. */
1830 
1831 	hci_dev_lock(hdev);
1832 
1833 	if (!list_empty(&user->list)) {
1834 		ret = -EINVAL;
1835 		goto out_unlock;
1836 	}
1837 
1838 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1839 	if (!conn->hchan) {
1840 		ret = -ENODEV;
1841 		goto out_unlock;
1842 	}
1843 
1844 	ret = user->probe(conn, user);
1845 	if (ret)
1846 		goto out_unlock;
1847 
1848 	list_add(&user->list, &conn->users);
1849 	ret = 0;
1850 
1851 out_unlock:
1852 	hci_dev_unlock(hdev);
1853 	return ret;
1854 }
1855 EXPORT_SYMBOL(l2cap_register_user);
1856 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1857 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1858 {
1859 	struct hci_dev *hdev = conn->hcon->hdev;
1860 
1861 	hci_dev_lock(hdev);
1862 
1863 	if (list_empty(&user->list))
1864 		goto out_unlock;
1865 
1866 	list_del_init(&user->list);
1867 	user->remove(conn, user);
1868 
1869 out_unlock:
1870 	hci_dev_unlock(hdev);
1871 }
1872 EXPORT_SYMBOL(l2cap_unregister_user);
1873 
l2cap_unregister_all_users(struct l2cap_conn * conn)1874 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1875 {
1876 	struct l2cap_user *user;
1877 
1878 	while (!list_empty(&conn->users)) {
1879 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1880 		list_del_init(&user->list);
1881 		user->remove(conn, user);
1882 	}
1883 }
1884 
l2cap_conn_del(struct hci_conn * hcon,int err)1885 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1886 {
1887 	struct l2cap_conn *conn = hcon->l2cap_data;
1888 	struct l2cap_chan *chan, *l;
1889 
1890 	if (!conn)
1891 		return;
1892 
1893 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1894 
1895 	kfree_skb(conn->rx_skb);
1896 
1897 	skb_queue_purge(&conn->pending_rx);
1898 
1899 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1900 	 * might block if we are running on a worker from the same workqueue
1901 	 * pending_rx_work is waiting on.
1902 	 */
1903 	if (work_pending(&conn->pending_rx_work))
1904 		cancel_work_sync(&conn->pending_rx_work);
1905 
1906 	if (work_pending(&conn->id_addr_update_work))
1907 		cancel_work_sync(&conn->id_addr_update_work);
1908 
1909 	l2cap_unregister_all_users(conn);
1910 
1911 	/* Force the connection to be immediately dropped */
1912 	hcon->disc_timeout = 0;
1913 
1914 	mutex_lock(&conn->chan_lock);
1915 
1916 	/* Kill channels */
1917 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1918 		l2cap_chan_hold(chan);
1919 		l2cap_chan_lock(chan);
1920 
1921 		l2cap_chan_del(chan, err);
1922 
1923 		chan->ops->close(chan);
1924 
1925 		l2cap_chan_unlock(chan);
1926 		l2cap_chan_put(chan);
1927 	}
1928 
1929 	mutex_unlock(&conn->chan_lock);
1930 
1931 	hci_chan_del(conn->hchan);
1932 
1933 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1934 		cancel_delayed_work_sync(&conn->info_timer);
1935 
1936 	hcon->l2cap_data = NULL;
1937 	conn->hchan = NULL;
1938 	l2cap_conn_put(conn);
1939 }
1940 
l2cap_conn_free(struct kref * ref)1941 static void l2cap_conn_free(struct kref *ref)
1942 {
1943 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1944 
1945 	hci_conn_put(conn->hcon);
1946 	kfree(conn);
1947 }
1948 
l2cap_conn_get(struct l2cap_conn * conn)1949 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1950 {
1951 	kref_get(&conn->ref);
1952 	return conn;
1953 }
1954 EXPORT_SYMBOL(l2cap_conn_get);
1955 
l2cap_conn_put(struct l2cap_conn * conn)1956 void l2cap_conn_put(struct l2cap_conn *conn)
1957 {
1958 	kref_put(&conn->ref, l2cap_conn_free);
1959 }
1960 EXPORT_SYMBOL(l2cap_conn_put);
1961 
1962 /* ---- Socket interface ---- */
1963 
1964 /* Find socket with psm and source / destination bdaddr.
1965  * Returns closest match.
1966  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1967 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1968 						   bdaddr_t *src,
1969 						   bdaddr_t *dst,
1970 						   u8 link_type)
1971 {
1972 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1973 
1974 	read_lock(&chan_list_lock);
1975 
1976 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1977 		if (state && c->state != state)
1978 			continue;
1979 
1980 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1981 			continue;
1982 
1983 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1984 			continue;
1985 
1986 		if (c->psm == psm) {
1987 			int src_match, dst_match;
1988 			int src_any, dst_any;
1989 
1990 			/* Exact match. */
1991 			src_match = !bacmp(&c->src, src);
1992 			dst_match = !bacmp(&c->dst, dst);
1993 			if (src_match && dst_match) {
1994 				if (!l2cap_chan_hold_unless_zero(c))
1995 					continue;
1996 
1997 				read_unlock(&chan_list_lock);
1998 				return c;
1999 			}
2000 
2001 			/* Closest match */
2002 			src_any = !bacmp(&c->src, BDADDR_ANY);
2003 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
2004 			if ((src_match && dst_any) || (src_any && dst_match) ||
2005 			    (src_any && dst_any))
2006 				c1 = c;
2007 		}
2008 	}
2009 
2010 	if (c1)
2011 		c1 = l2cap_chan_hold_unless_zero(c1);
2012 
2013 	read_unlock(&chan_list_lock);
2014 
2015 	return c1;
2016 }
2017 
l2cap_monitor_timeout(struct work_struct * work)2018 static void l2cap_monitor_timeout(struct work_struct *work)
2019 {
2020 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2021 					       monitor_timer.work);
2022 
2023 	BT_DBG("chan %p", chan);
2024 
2025 	l2cap_chan_lock(chan);
2026 
2027 	if (!chan->conn) {
2028 		l2cap_chan_unlock(chan);
2029 		l2cap_chan_put(chan);
2030 		return;
2031 	}
2032 
2033 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2034 
2035 	l2cap_chan_unlock(chan);
2036 	l2cap_chan_put(chan);
2037 }
2038 
l2cap_retrans_timeout(struct work_struct * work)2039 static void l2cap_retrans_timeout(struct work_struct *work)
2040 {
2041 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2042 					       retrans_timer.work);
2043 
2044 	BT_DBG("chan %p", chan);
2045 
2046 	l2cap_chan_lock(chan);
2047 
2048 	if (!chan->conn) {
2049 		l2cap_chan_unlock(chan);
2050 		l2cap_chan_put(chan);
2051 		return;
2052 	}
2053 
2054 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2055 	l2cap_chan_unlock(chan);
2056 	l2cap_chan_put(chan);
2057 }
2058 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)2059 static void l2cap_streaming_send(struct l2cap_chan *chan,
2060 				 struct sk_buff_head *skbs)
2061 {
2062 	struct sk_buff *skb;
2063 	struct l2cap_ctrl *control;
2064 
2065 	BT_DBG("chan %p, skbs %p", chan, skbs);
2066 
2067 	if (__chan_is_moving(chan))
2068 		return;
2069 
2070 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2071 
2072 	while (!skb_queue_empty(&chan->tx_q)) {
2073 
2074 		skb = skb_dequeue(&chan->tx_q);
2075 
2076 		bt_cb(skb)->l2cap.retries = 1;
2077 		control = &bt_cb(skb)->l2cap;
2078 
2079 		control->reqseq = 0;
2080 		control->txseq = chan->next_tx_seq;
2081 
2082 		__pack_control(chan, control, skb);
2083 
2084 		if (chan->fcs == L2CAP_FCS_CRC16) {
2085 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2086 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2087 		}
2088 
2089 		l2cap_do_send(chan, skb);
2090 
2091 		BT_DBG("Sent txseq %u", control->txseq);
2092 
2093 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2094 		chan->frames_sent++;
2095 	}
2096 }
2097 
l2cap_ertm_send(struct l2cap_chan * chan)2098 static int l2cap_ertm_send(struct l2cap_chan *chan)
2099 {
2100 	struct sk_buff *skb, *tx_skb;
2101 	struct l2cap_ctrl *control;
2102 	int sent = 0;
2103 
2104 	BT_DBG("chan %p", chan);
2105 
2106 	if (chan->state != BT_CONNECTED)
2107 		return -ENOTCONN;
2108 
2109 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2110 		return 0;
2111 
2112 	if (__chan_is_moving(chan))
2113 		return 0;
2114 
2115 	while (chan->tx_send_head &&
2116 	       chan->unacked_frames < chan->remote_tx_win &&
2117 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2118 
2119 		skb = chan->tx_send_head;
2120 
2121 		bt_cb(skb)->l2cap.retries = 1;
2122 		control = &bt_cb(skb)->l2cap;
2123 
2124 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2125 			control->final = 1;
2126 
2127 		control->reqseq = chan->buffer_seq;
2128 		chan->last_acked_seq = chan->buffer_seq;
2129 		control->txseq = chan->next_tx_seq;
2130 
2131 		__pack_control(chan, control, skb);
2132 
2133 		if (chan->fcs == L2CAP_FCS_CRC16) {
2134 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2135 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2136 		}
2137 
2138 		/* Clone after data has been modified. Data is assumed to be
2139 		   read-only (for locking purposes) on cloned sk_buffs.
2140 		 */
2141 		tx_skb = skb_clone(skb, GFP_KERNEL);
2142 
2143 		if (!tx_skb)
2144 			break;
2145 
2146 		__set_retrans_timer(chan);
2147 
2148 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2149 		chan->unacked_frames++;
2150 		chan->frames_sent++;
2151 		sent++;
2152 
2153 		if (skb_queue_is_last(&chan->tx_q, skb))
2154 			chan->tx_send_head = NULL;
2155 		else
2156 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2157 
2158 		l2cap_do_send(chan, tx_skb);
2159 		BT_DBG("Sent txseq %u", control->txseq);
2160 	}
2161 
2162 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2163 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2164 
2165 	return sent;
2166 }
2167 
l2cap_ertm_resend(struct l2cap_chan * chan)2168 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2169 {
2170 	struct l2cap_ctrl control;
2171 	struct sk_buff *skb;
2172 	struct sk_buff *tx_skb;
2173 	u16 seq;
2174 
2175 	BT_DBG("chan %p", chan);
2176 
2177 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2178 		return;
2179 
2180 	if (__chan_is_moving(chan))
2181 		return;
2182 
2183 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2184 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2185 
2186 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2187 		if (!skb) {
2188 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2189 			       seq);
2190 			continue;
2191 		}
2192 
2193 		bt_cb(skb)->l2cap.retries++;
2194 		control = bt_cb(skb)->l2cap;
2195 
2196 		if (chan->max_tx != 0 &&
2197 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2198 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2199 			l2cap_send_disconn_req(chan, ECONNRESET);
2200 			l2cap_seq_list_clear(&chan->retrans_list);
2201 			break;
2202 		}
2203 
2204 		control.reqseq = chan->buffer_seq;
2205 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2206 			control.final = 1;
2207 		else
2208 			control.final = 0;
2209 
2210 		if (skb_cloned(skb)) {
2211 			/* Cloned sk_buffs are read-only, so we need a
2212 			 * writeable copy
2213 			 */
2214 			tx_skb = skb_copy(skb, GFP_KERNEL);
2215 		} else {
2216 			tx_skb = skb_clone(skb, GFP_KERNEL);
2217 		}
2218 
2219 		if (!tx_skb) {
2220 			l2cap_seq_list_clear(&chan->retrans_list);
2221 			break;
2222 		}
2223 
2224 		/* Update skb contents */
2225 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2226 			put_unaligned_le32(__pack_extended_control(&control),
2227 					   tx_skb->data + L2CAP_HDR_SIZE);
2228 		} else {
2229 			put_unaligned_le16(__pack_enhanced_control(&control),
2230 					   tx_skb->data + L2CAP_HDR_SIZE);
2231 		}
2232 
2233 		/* Update FCS */
2234 		if (chan->fcs == L2CAP_FCS_CRC16) {
2235 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2236 					tx_skb->len - L2CAP_FCS_SIZE);
2237 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2238 						L2CAP_FCS_SIZE);
2239 		}
2240 
2241 		l2cap_do_send(chan, tx_skb);
2242 
2243 		BT_DBG("Resent txseq %d", control.txseq);
2244 
2245 		chan->last_acked_seq = chan->buffer_seq;
2246 	}
2247 }
2248 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2249 static void l2cap_retransmit(struct l2cap_chan *chan,
2250 			     struct l2cap_ctrl *control)
2251 {
2252 	BT_DBG("chan %p, control %p", chan, control);
2253 
2254 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2255 	l2cap_ertm_resend(chan);
2256 }
2257 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2258 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2259 				 struct l2cap_ctrl *control)
2260 {
2261 	struct sk_buff *skb;
2262 
2263 	BT_DBG("chan %p, control %p", chan, control);
2264 
2265 	if (control->poll)
2266 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2267 
2268 	l2cap_seq_list_clear(&chan->retrans_list);
2269 
2270 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2271 		return;
2272 
2273 	if (chan->unacked_frames) {
2274 		skb_queue_walk(&chan->tx_q, skb) {
2275 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2276 			    skb == chan->tx_send_head)
2277 				break;
2278 		}
2279 
2280 		skb_queue_walk_from(&chan->tx_q, skb) {
2281 			if (skb == chan->tx_send_head)
2282 				break;
2283 
2284 			l2cap_seq_list_append(&chan->retrans_list,
2285 					      bt_cb(skb)->l2cap.txseq);
2286 		}
2287 
2288 		l2cap_ertm_resend(chan);
2289 	}
2290 }
2291 
l2cap_send_ack(struct l2cap_chan * chan)2292 static void l2cap_send_ack(struct l2cap_chan *chan)
2293 {
2294 	struct l2cap_ctrl control;
2295 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2296 					 chan->last_acked_seq);
2297 	int threshold;
2298 
2299 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2300 	       chan, chan->last_acked_seq, chan->buffer_seq);
2301 
2302 	memset(&control, 0, sizeof(control));
2303 	control.sframe = 1;
2304 
2305 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2306 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2307 		__clear_ack_timer(chan);
2308 		control.super = L2CAP_SUPER_RNR;
2309 		control.reqseq = chan->buffer_seq;
2310 		l2cap_send_sframe(chan, &control);
2311 	} else {
2312 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2313 			l2cap_ertm_send(chan);
2314 			/* If any i-frames were sent, they included an ack */
2315 			if (chan->buffer_seq == chan->last_acked_seq)
2316 				frames_to_ack = 0;
2317 		}
2318 
2319 		/* Ack now if the window is 3/4ths full.
2320 		 * Calculate without mul or div
2321 		 */
2322 		threshold = chan->ack_win;
2323 		threshold += threshold << 1;
2324 		threshold >>= 2;
2325 
2326 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2327 		       threshold);
2328 
2329 		if (frames_to_ack >= threshold) {
2330 			__clear_ack_timer(chan);
2331 			control.super = L2CAP_SUPER_RR;
2332 			control.reqseq = chan->buffer_seq;
2333 			l2cap_send_sframe(chan, &control);
2334 			frames_to_ack = 0;
2335 		}
2336 
2337 		if (frames_to_ack)
2338 			__set_ack_timer(chan);
2339 	}
2340 }
2341 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2342 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2343 					 struct msghdr *msg, int len,
2344 					 int count, struct sk_buff *skb)
2345 {
2346 	struct l2cap_conn *conn = chan->conn;
2347 	struct sk_buff **frag;
2348 	int sent = 0;
2349 
2350 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2351 		return -EFAULT;
2352 
2353 	sent += count;
2354 	len  -= count;
2355 
2356 	/* Continuation fragments (no L2CAP header) */
2357 	frag = &skb_shinfo(skb)->frag_list;
2358 	while (len) {
2359 		struct sk_buff *tmp;
2360 
2361 		count = min_t(unsigned int, conn->mtu, len);
2362 
2363 		tmp = chan->ops->alloc_skb(chan, 0, count,
2364 					   msg->msg_flags & MSG_DONTWAIT);
2365 		if (IS_ERR(tmp))
2366 			return PTR_ERR(tmp);
2367 
2368 		*frag = tmp;
2369 
2370 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2371 				   &msg->msg_iter))
2372 			return -EFAULT;
2373 
2374 		sent += count;
2375 		len  -= count;
2376 
2377 		skb->len += (*frag)->len;
2378 		skb->data_len += (*frag)->len;
2379 
2380 		frag = &(*frag)->next;
2381 	}
2382 
2383 	return sent;
2384 }
2385 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2386 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2387 						 struct msghdr *msg, size_t len)
2388 {
2389 	struct l2cap_conn *conn = chan->conn;
2390 	struct sk_buff *skb;
2391 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2392 	struct l2cap_hdr *lh;
2393 
2394 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2395 	       __le16_to_cpu(chan->psm), len);
2396 
2397 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2398 
2399 	skb = chan->ops->alloc_skb(chan, hlen, count,
2400 				   msg->msg_flags & MSG_DONTWAIT);
2401 	if (IS_ERR(skb))
2402 		return skb;
2403 
2404 	/* Create L2CAP header */
2405 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2406 	lh->cid = cpu_to_le16(chan->dcid);
2407 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2408 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2409 
2410 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2411 	if (unlikely(err < 0)) {
2412 		kfree_skb(skb);
2413 		return ERR_PTR(err);
2414 	}
2415 	return skb;
2416 }
2417 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2418 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2419 					      struct msghdr *msg, size_t len)
2420 {
2421 	struct l2cap_conn *conn = chan->conn;
2422 	struct sk_buff *skb;
2423 	int err, count;
2424 	struct l2cap_hdr *lh;
2425 
2426 	BT_DBG("chan %p len %zu", chan, len);
2427 
2428 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2429 
2430 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2431 				   msg->msg_flags & MSG_DONTWAIT);
2432 	if (IS_ERR(skb))
2433 		return skb;
2434 
2435 	/* Create L2CAP header */
2436 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2437 	lh->cid = cpu_to_le16(chan->dcid);
2438 	lh->len = cpu_to_le16(len);
2439 
2440 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2441 	if (unlikely(err < 0)) {
2442 		kfree_skb(skb);
2443 		return ERR_PTR(err);
2444 	}
2445 	return skb;
2446 }
2447 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2448 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2449 					       struct msghdr *msg, size_t len,
2450 					       u16 sdulen)
2451 {
2452 	struct l2cap_conn *conn = chan->conn;
2453 	struct sk_buff *skb;
2454 	int err, count, hlen;
2455 	struct l2cap_hdr *lh;
2456 
2457 	BT_DBG("chan %p len %zu", chan, len);
2458 
2459 	if (!conn)
2460 		return ERR_PTR(-ENOTCONN);
2461 
2462 	hlen = __ertm_hdr_size(chan);
2463 
2464 	if (sdulen)
2465 		hlen += L2CAP_SDULEN_SIZE;
2466 
2467 	if (chan->fcs == L2CAP_FCS_CRC16)
2468 		hlen += L2CAP_FCS_SIZE;
2469 
2470 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2471 
2472 	skb = chan->ops->alloc_skb(chan, hlen, count,
2473 				   msg->msg_flags & MSG_DONTWAIT);
2474 	if (IS_ERR(skb))
2475 		return skb;
2476 
2477 	/* Create L2CAP header */
2478 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2479 	lh->cid = cpu_to_le16(chan->dcid);
2480 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2481 
2482 	/* Control header is populated later */
2483 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2484 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2485 	else
2486 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2487 
2488 	if (sdulen)
2489 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2490 
2491 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2492 	if (unlikely(err < 0)) {
2493 		kfree_skb(skb);
2494 		return ERR_PTR(err);
2495 	}
2496 
2497 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2498 	bt_cb(skb)->l2cap.retries = 0;
2499 	return skb;
2500 }
2501 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2502 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2503 			     struct sk_buff_head *seg_queue,
2504 			     struct msghdr *msg, size_t len)
2505 {
2506 	struct sk_buff *skb;
2507 	u16 sdu_len;
2508 	size_t pdu_len;
2509 	u8 sar;
2510 
2511 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2512 
2513 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2514 	 * so fragmented skbs are not used.  The HCI layer's handling
2515 	 * of fragmented skbs is not compatible with ERTM's queueing.
2516 	 */
2517 
2518 	/* PDU size is derived from the HCI MTU */
2519 	pdu_len = chan->conn->mtu;
2520 
2521 	/* Constrain PDU size for BR/EDR connections */
2522 	if (!chan->hs_hcon)
2523 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2524 
2525 	/* Adjust for largest possible L2CAP overhead. */
2526 	if (chan->fcs)
2527 		pdu_len -= L2CAP_FCS_SIZE;
2528 
2529 	pdu_len -= __ertm_hdr_size(chan);
2530 
2531 	/* Remote device may have requested smaller PDUs */
2532 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2533 
2534 	if (len <= pdu_len) {
2535 		sar = L2CAP_SAR_UNSEGMENTED;
2536 		sdu_len = 0;
2537 		pdu_len = len;
2538 	} else {
2539 		sar = L2CAP_SAR_START;
2540 		sdu_len = len;
2541 	}
2542 
2543 	while (len > 0) {
2544 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2545 
2546 		if (IS_ERR(skb)) {
2547 			__skb_queue_purge(seg_queue);
2548 			return PTR_ERR(skb);
2549 		}
2550 
2551 		bt_cb(skb)->l2cap.sar = sar;
2552 		__skb_queue_tail(seg_queue, skb);
2553 
2554 		len -= pdu_len;
2555 		if (sdu_len)
2556 			sdu_len = 0;
2557 
2558 		if (len <= pdu_len) {
2559 			sar = L2CAP_SAR_END;
2560 			pdu_len = len;
2561 		} else {
2562 			sar = L2CAP_SAR_CONTINUE;
2563 		}
2564 	}
2565 
2566 	return 0;
2567 }
2568 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2569 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2570 						   struct msghdr *msg,
2571 						   size_t len, u16 sdulen)
2572 {
2573 	struct l2cap_conn *conn = chan->conn;
2574 	struct sk_buff *skb;
2575 	int err, count, hlen;
2576 	struct l2cap_hdr *lh;
2577 
2578 	BT_DBG("chan %p len %zu", chan, len);
2579 
2580 	if (!conn)
2581 		return ERR_PTR(-ENOTCONN);
2582 
2583 	hlen = L2CAP_HDR_SIZE;
2584 
2585 	if (sdulen)
2586 		hlen += L2CAP_SDULEN_SIZE;
2587 
2588 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2589 
2590 	skb = chan->ops->alloc_skb(chan, hlen, count,
2591 				   msg->msg_flags & MSG_DONTWAIT);
2592 	if (IS_ERR(skb))
2593 		return skb;
2594 
2595 	/* Create L2CAP header */
2596 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2597 	lh->cid = cpu_to_le16(chan->dcid);
2598 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2599 
2600 	if (sdulen)
2601 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2602 
2603 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2604 	if (unlikely(err < 0)) {
2605 		kfree_skb(skb);
2606 		return ERR_PTR(err);
2607 	}
2608 
2609 	return skb;
2610 }
2611 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2612 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2613 				struct sk_buff_head *seg_queue,
2614 				struct msghdr *msg, size_t len)
2615 {
2616 	struct sk_buff *skb;
2617 	size_t pdu_len;
2618 	u16 sdu_len;
2619 
2620 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2621 
2622 	sdu_len = len;
2623 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2624 
2625 	while (len > 0) {
2626 		if (len <= pdu_len)
2627 			pdu_len = len;
2628 
2629 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2630 		if (IS_ERR(skb)) {
2631 			__skb_queue_purge(seg_queue);
2632 			return PTR_ERR(skb);
2633 		}
2634 
2635 		__skb_queue_tail(seg_queue, skb);
2636 
2637 		len -= pdu_len;
2638 
2639 		if (sdu_len) {
2640 			sdu_len = 0;
2641 			pdu_len += L2CAP_SDULEN_SIZE;
2642 		}
2643 	}
2644 
2645 	return 0;
2646 }
2647 
l2cap_le_flowctl_send(struct l2cap_chan * chan)2648 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2649 {
2650 	int sent = 0;
2651 
2652 	BT_DBG("chan %p", chan);
2653 
2654 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2655 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2656 		chan->tx_credits--;
2657 		sent++;
2658 	}
2659 
2660 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2661 	       skb_queue_len(&chan->tx_q));
2662 }
2663 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2664 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2665 {
2666 	struct sk_buff *skb;
2667 	int err;
2668 	struct sk_buff_head seg_queue;
2669 
2670 	if (!chan->conn)
2671 		return -ENOTCONN;
2672 
2673 	/* Connectionless channel */
2674 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2675 		skb = l2cap_create_connless_pdu(chan, msg, len);
2676 		if (IS_ERR(skb))
2677 			return PTR_ERR(skb);
2678 
2679 		/* Channel lock is released before requesting new skb and then
2680 		 * reacquired thus we need to recheck channel state.
2681 		 */
2682 		if (chan->state != BT_CONNECTED) {
2683 			kfree_skb(skb);
2684 			return -ENOTCONN;
2685 		}
2686 
2687 		l2cap_do_send(chan, skb);
2688 		return len;
2689 	}
2690 
2691 	switch (chan->mode) {
2692 	case L2CAP_MODE_LE_FLOWCTL:
2693 	case L2CAP_MODE_EXT_FLOWCTL:
2694 		/* Check outgoing MTU */
2695 		if (len > chan->omtu)
2696 			return -EMSGSIZE;
2697 
2698 		__skb_queue_head_init(&seg_queue);
2699 
2700 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2701 
2702 		if (chan->state != BT_CONNECTED) {
2703 			__skb_queue_purge(&seg_queue);
2704 			err = -ENOTCONN;
2705 		}
2706 
2707 		if (err)
2708 			return err;
2709 
2710 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2711 
2712 		l2cap_le_flowctl_send(chan);
2713 
2714 		if (!chan->tx_credits)
2715 			chan->ops->suspend(chan);
2716 
2717 		err = len;
2718 
2719 		break;
2720 
2721 	case L2CAP_MODE_BASIC:
2722 		/* Check outgoing MTU */
2723 		if (len > chan->omtu)
2724 			return -EMSGSIZE;
2725 
2726 		/* Create a basic PDU */
2727 		skb = l2cap_create_basic_pdu(chan, msg, len);
2728 		if (IS_ERR(skb))
2729 			return PTR_ERR(skb);
2730 
2731 		/* Channel lock is released before requesting new skb and then
2732 		 * reacquired thus we need to recheck channel state.
2733 		 */
2734 		if (chan->state != BT_CONNECTED) {
2735 			kfree_skb(skb);
2736 			return -ENOTCONN;
2737 		}
2738 
2739 		l2cap_do_send(chan, skb);
2740 		err = len;
2741 		break;
2742 
2743 	case L2CAP_MODE_ERTM:
2744 	case L2CAP_MODE_STREAMING:
2745 		/* Check outgoing MTU */
2746 		if (len > chan->omtu) {
2747 			err = -EMSGSIZE;
2748 			break;
2749 		}
2750 
2751 		__skb_queue_head_init(&seg_queue);
2752 
2753 		/* Do segmentation before calling in to the state machine,
2754 		 * since it's possible to block while waiting for memory
2755 		 * allocation.
2756 		 */
2757 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2758 
2759 		/* The channel could have been closed while segmenting,
2760 		 * check that it is still connected.
2761 		 */
2762 		if (chan->state != BT_CONNECTED) {
2763 			__skb_queue_purge(&seg_queue);
2764 			err = -ENOTCONN;
2765 		}
2766 
2767 		if (err)
2768 			break;
2769 
2770 		if (chan->mode == L2CAP_MODE_ERTM)
2771 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2772 		else
2773 			l2cap_streaming_send(chan, &seg_queue);
2774 
2775 		err = len;
2776 
2777 		/* If the skbs were not queued for sending, they'll still be in
2778 		 * seg_queue and need to be purged.
2779 		 */
2780 		__skb_queue_purge(&seg_queue);
2781 		break;
2782 
2783 	default:
2784 		BT_DBG("bad state %1.1x", chan->mode);
2785 		err = -EBADFD;
2786 	}
2787 
2788 	return err;
2789 }
2790 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2791 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2792 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2793 {
2794 	struct l2cap_ctrl control;
2795 	u16 seq;
2796 
2797 	BT_DBG("chan %p, txseq %u", chan, txseq);
2798 
2799 	memset(&control, 0, sizeof(control));
2800 	control.sframe = 1;
2801 	control.super = L2CAP_SUPER_SREJ;
2802 
2803 	for (seq = chan->expected_tx_seq; seq != txseq;
2804 	     seq = __next_seq(chan, seq)) {
2805 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2806 			control.reqseq = seq;
2807 			l2cap_send_sframe(chan, &control);
2808 			l2cap_seq_list_append(&chan->srej_list, seq);
2809 		}
2810 	}
2811 
2812 	chan->expected_tx_seq = __next_seq(chan, txseq);
2813 }
2814 
l2cap_send_srej_tail(struct l2cap_chan * chan)2815 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2816 {
2817 	struct l2cap_ctrl control;
2818 
2819 	BT_DBG("chan %p", chan);
2820 
2821 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2822 		return;
2823 
2824 	memset(&control, 0, sizeof(control));
2825 	control.sframe = 1;
2826 	control.super = L2CAP_SUPER_SREJ;
2827 	control.reqseq = chan->srej_list.tail;
2828 	l2cap_send_sframe(chan, &control);
2829 }
2830 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2831 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2832 {
2833 	struct l2cap_ctrl control;
2834 	u16 initial_head;
2835 	u16 seq;
2836 
2837 	BT_DBG("chan %p, txseq %u", chan, txseq);
2838 
2839 	memset(&control, 0, sizeof(control));
2840 	control.sframe = 1;
2841 	control.super = L2CAP_SUPER_SREJ;
2842 
2843 	/* Capture initial list head to allow only one pass through the list. */
2844 	initial_head = chan->srej_list.head;
2845 
2846 	do {
2847 		seq = l2cap_seq_list_pop(&chan->srej_list);
2848 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2849 			break;
2850 
2851 		control.reqseq = seq;
2852 		l2cap_send_sframe(chan, &control);
2853 		l2cap_seq_list_append(&chan->srej_list, seq);
2854 	} while (chan->srej_list.head != initial_head);
2855 }
2856 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2857 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2858 {
2859 	struct sk_buff *acked_skb;
2860 	u16 ackseq;
2861 
2862 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2863 
2864 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2865 		return;
2866 
2867 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2868 	       chan->expected_ack_seq, chan->unacked_frames);
2869 
2870 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2871 	     ackseq = __next_seq(chan, ackseq)) {
2872 
2873 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2874 		if (acked_skb) {
2875 			skb_unlink(acked_skb, &chan->tx_q);
2876 			kfree_skb(acked_skb);
2877 			chan->unacked_frames--;
2878 		}
2879 	}
2880 
2881 	chan->expected_ack_seq = reqseq;
2882 
2883 	if (chan->unacked_frames == 0)
2884 		__clear_retrans_timer(chan);
2885 
2886 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2887 }
2888 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2889 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2890 {
2891 	BT_DBG("chan %p", chan);
2892 
2893 	chan->expected_tx_seq = chan->buffer_seq;
2894 	l2cap_seq_list_clear(&chan->srej_list);
2895 	skb_queue_purge(&chan->srej_q);
2896 	chan->rx_state = L2CAP_RX_STATE_RECV;
2897 }
2898 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2899 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2900 				struct l2cap_ctrl *control,
2901 				struct sk_buff_head *skbs, u8 event)
2902 {
2903 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2904 	       event);
2905 
2906 	switch (event) {
2907 	case L2CAP_EV_DATA_REQUEST:
2908 		if (chan->tx_send_head == NULL)
2909 			chan->tx_send_head = skb_peek(skbs);
2910 
2911 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2912 		l2cap_ertm_send(chan);
2913 		break;
2914 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2915 		BT_DBG("Enter LOCAL_BUSY");
2916 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2917 
2918 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2919 			/* The SREJ_SENT state must be aborted if we are to
2920 			 * enter the LOCAL_BUSY state.
2921 			 */
2922 			l2cap_abort_rx_srej_sent(chan);
2923 		}
2924 
2925 		l2cap_send_ack(chan);
2926 
2927 		break;
2928 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2929 		BT_DBG("Exit LOCAL_BUSY");
2930 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2931 
2932 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2933 			struct l2cap_ctrl local_control;
2934 
2935 			memset(&local_control, 0, sizeof(local_control));
2936 			local_control.sframe = 1;
2937 			local_control.super = L2CAP_SUPER_RR;
2938 			local_control.poll = 1;
2939 			local_control.reqseq = chan->buffer_seq;
2940 			l2cap_send_sframe(chan, &local_control);
2941 
2942 			chan->retry_count = 1;
2943 			__set_monitor_timer(chan);
2944 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2945 		}
2946 		break;
2947 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2948 		l2cap_process_reqseq(chan, control->reqseq);
2949 		break;
2950 	case L2CAP_EV_EXPLICIT_POLL:
2951 		l2cap_send_rr_or_rnr(chan, 1);
2952 		chan->retry_count = 1;
2953 		__set_monitor_timer(chan);
2954 		__clear_ack_timer(chan);
2955 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2956 		break;
2957 	case L2CAP_EV_RETRANS_TO:
2958 		l2cap_send_rr_or_rnr(chan, 1);
2959 		chan->retry_count = 1;
2960 		__set_monitor_timer(chan);
2961 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2962 		break;
2963 	case L2CAP_EV_RECV_FBIT:
2964 		/* Nothing to process */
2965 		break;
2966 	default:
2967 		break;
2968 	}
2969 }
2970 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2971 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2972 				  struct l2cap_ctrl *control,
2973 				  struct sk_buff_head *skbs, u8 event)
2974 {
2975 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2976 	       event);
2977 
2978 	switch (event) {
2979 	case L2CAP_EV_DATA_REQUEST:
2980 		if (chan->tx_send_head == NULL)
2981 			chan->tx_send_head = skb_peek(skbs);
2982 		/* Queue data, but don't send. */
2983 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2984 		break;
2985 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2986 		BT_DBG("Enter LOCAL_BUSY");
2987 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2988 
2989 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2990 			/* The SREJ_SENT state must be aborted if we are to
2991 			 * enter the LOCAL_BUSY state.
2992 			 */
2993 			l2cap_abort_rx_srej_sent(chan);
2994 		}
2995 
2996 		l2cap_send_ack(chan);
2997 
2998 		break;
2999 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
3000 		BT_DBG("Exit LOCAL_BUSY");
3001 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3002 
3003 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
3004 			struct l2cap_ctrl local_control;
3005 			memset(&local_control, 0, sizeof(local_control));
3006 			local_control.sframe = 1;
3007 			local_control.super = L2CAP_SUPER_RR;
3008 			local_control.poll = 1;
3009 			local_control.reqseq = chan->buffer_seq;
3010 			l2cap_send_sframe(chan, &local_control);
3011 
3012 			chan->retry_count = 1;
3013 			__set_monitor_timer(chan);
3014 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
3015 		}
3016 		break;
3017 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
3018 		l2cap_process_reqseq(chan, control->reqseq);
3019 		fallthrough;
3020 
3021 	case L2CAP_EV_RECV_FBIT:
3022 		if (control && control->final) {
3023 			__clear_monitor_timer(chan);
3024 			if (chan->unacked_frames > 0)
3025 				__set_retrans_timer(chan);
3026 			chan->retry_count = 0;
3027 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3028 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3029 		}
3030 		break;
3031 	case L2CAP_EV_EXPLICIT_POLL:
3032 		/* Ignore */
3033 		break;
3034 	case L2CAP_EV_MONITOR_TO:
3035 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3036 			l2cap_send_rr_or_rnr(chan, 1);
3037 			__set_monitor_timer(chan);
3038 			chan->retry_count++;
3039 		} else {
3040 			l2cap_send_disconn_req(chan, ECONNABORTED);
3041 		}
3042 		break;
3043 	default:
3044 		break;
3045 	}
3046 }
3047 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)3048 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3049 		     struct sk_buff_head *skbs, u8 event)
3050 {
3051 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3052 	       chan, control, skbs, event, chan->tx_state);
3053 
3054 	switch (chan->tx_state) {
3055 	case L2CAP_TX_STATE_XMIT:
3056 		l2cap_tx_state_xmit(chan, control, skbs, event);
3057 		break;
3058 	case L2CAP_TX_STATE_WAIT_F:
3059 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3060 		break;
3061 	default:
3062 		/* Ignore event */
3063 		break;
3064 	}
3065 }
3066 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)3067 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3068 			     struct l2cap_ctrl *control)
3069 {
3070 	BT_DBG("chan %p, control %p", chan, control);
3071 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3072 }
3073 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)3074 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3075 				  struct l2cap_ctrl *control)
3076 {
3077 	BT_DBG("chan %p, control %p", chan, control);
3078 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3079 }
3080 
3081 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)3082 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3083 {
3084 	struct sk_buff *nskb;
3085 	struct l2cap_chan *chan;
3086 
3087 	BT_DBG("conn %p", conn);
3088 
3089 	mutex_lock(&conn->chan_lock);
3090 
3091 	list_for_each_entry(chan, &conn->chan_l, list) {
3092 		if (chan->chan_type != L2CAP_CHAN_RAW)
3093 			continue;
3094 
3095 		/* Don't send frame to the channel it came from */
3096 		if (bt_cb(skb)->l2cap.chan == chan)
3097 			continue;
3098 
3099 		nskb = skb_clone(skb, GFP_KERNEL);
3100 		if (!nskb)
3101 			continue;
3102 		if (chan->ops->recv(chan, nskb))
3103 			kfree_skb(nskb);
3104 	}
3105 
3106 	mutex_unlock(&conn->chan_lock);
3107 }
3108 
3109 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)3110 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3111 				       u8 ident, u16 dlen, void *data)
3112 {
3113 	struct sk_buff *skb, **frag;
3114 	struct l2cap_cmd_hdr *cmd;
3115 	struct l2cap_hdr *lh;
3116 	int len, count;
3117 
3118 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3119 	       conn, code, ident, dlen);
3120 
3121 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3122 		return NULL;
3123 
3124 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3125 	count = min_t(unsigned int, conn->mtu, len);
3126 
3127 	skb = bt_skb_alloc(count, GFP_KERNEL);
3128 	if (!skb)
3129 		return NULL;
3130 
3131 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3132 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3133 
3134 	if (conn->hcon->type == LE_LINK)
3135 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3136 	else
3137 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3138 
3139 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3140 	cmd->code  = code;
3141 	cmd->ident = ident;
3142 	cmd->len   = cpu_to_le16(dlen);
3143 
3144 	if (dlen) {
3145 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3146 		skb_put_data(skb, data, count);
3147 		data += count;
3148 	}
3149 
3150 	len -= skb->len;
3151 
3152 	/* Continuation fragments (no L2CAP header) */
3153 	frag = &skb_shinfo(skb)->frag_list;
3154 	while (len) {
3155 		count = min_t(unsigned int, conn->mtu, len);
3156 
3157 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3158 		if (!*frag)
3159 			goto fail;
3160 
3161 		skb_put_data(*frag, data, count);
3162 
3163 		len  -= count;
3164 		data += count;
3165 
3166 		frag = &(*frag)->next;
3167 	}
3168 
3169 	return skb;
3170 
3171 fail:
3172 	kfree_skb(skb);
3173 	return NULL;
3174 }
3175 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3176 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3177 				     unsigned long *val)
3178 {
3179 	struct l2cap_conf_opt *opt = *ptr;
3180 	int len;
3181 
3182 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3183 	*ptr += len;
3184 
3185 	*type = opt->type;
3186 	*olen = opt->len;
3187 
3188 	switch (opt->len) {
3189 	case 1:
3190 		*val = *((u8 *) opt->val);
3191 		break;
3192 
3193 	case 2:
3194 		*val = get_unaligned_le16(opt->val);
3195 		break;
3196 
3197 	case 4:
3198 		*val = get_unaligned_le32(opt->val);
3199 		break;
3200 
3201 	default:
3202 		*val = (unsigned long) opt->val;
3203 		break;
3204 	}
3205 
3206 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3207 	return len;
3208 }
3209 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3210 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3211 {
3212 	struct l2cap_conf_opt *opt = *ptr;
3213 
3214 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3215 
3216 	if (size < L2CAP_CONF_OPT_SIZE + len)
3217 		return;
3218 
3219 	opt->type = type;
3220 	opt->len  = len;
3221 
3222 	switch (len) {
3223 	case 1:
3224 		*((u8 *) opt->val)  = val;
3225 		break;
3226 
3227 	case 2:
3228 		put_unaligned_le16(val, opt->val);
3229 		break;
3230 
3231 	case 4:
3232 		put_unaligned_le32(val, opt->val);
3233 		break;
3234 
3235 	default:
3236 		memcpy(opt->val, (void *) val, len);
3237 		break;
3238 	}
3239 
3240 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3241 }
3242 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3243 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3244 {
3245 	struct l2cap_conf_efs efs;
3246 
3247 	switch (chan->mode) {
3248 	case L2CAP_MODE_ERTM:
3249 		efs.id		= chan->local_id;
3250 		efs.stype	= chan->local_stype;
3251 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3252 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3253 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3254 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3255 		break;
3256 
3257 	case L2CAP_MODE_STREAMING:
3258 		efs.id		= 1;
3259 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3260 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3261 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3262 		efs.acc_lat	= 0;
3263 		efs.flush_to	= 0;
3264 		break;
3265 
3266 	default:
3267 		return;
3268 	}
3269 
3270 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3271 			   (unsigned long) &efs, size);
3272 }
3273 
l2cap_ack_timeout(struct work_struct * work)3274 static void l2cap_ack_timeout(struct work_struct *work)
3275 {
3276 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3277 					       ack_timer.work);
3278 	u16 frames_to_ack;
3279 
3280 	BT_DBG("chan %p", chan);
3281 
3282 	l2cap_chan_lock(chan);
3283 
3284 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3285 				     chan->last_acked_seq);
3286 
3287 	if (frames_to_ack)
3288 		l2cap_send_rr_or_rnr(chan, 0);
3289 
3290 	l2cap_chan_unlock(chan);
3291 	l2cap_chan_put(chan);
3292 }
3293 
l2cap_ertm_init(struct l2cap_chan * chan)3294 int l2cap_ertm_init(struct l2cap_chan *chan)
3295 {
3296 	int err;
3297 
3298 	chan->next_tx_seq = 0;
3299 	chan->expected_tx_seq = 0;
3300 	chan->expected_ack_seq = 0;
3301 	chan->unacked_frames = 0;
3302 	chan->buffer_seq = 0;
3303 	chan->frames_sent = 0;
3304 	chan->last_acked_seq = 0;
3305 	chan->sdu = NULL;
3306 	chan->sdu_last_frag = NULL;
3307 	chan->sdu_len = 0;
3308 
3309 	skb_queue_head_init(&chan->tx_q);
3310 
3311 	chan->local_amp_id = AMP_ID_BREDR;
3312 	chan->move_id = AMP_ID_BREDR;
3313 	chan->move_state = L2CAP_MOVE_STABLE;
3314 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3315 
3316 	if (chan->mode != L2CAP_MODE_ERTM)
3317 		return 0;
3318 
3319 	chan->rx_state = L2CAP_RX_STATE_RECV;
3320 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3321 
3322 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3323 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3324 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3325 
3326 	skb_queue_head_init(&chan->srej_q);
3327 
3328 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3329 	if (err < 0)
3330 		return err;
3331 
3332 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3333 	if (err < 0)
3334 		l2cap_seq_list_free(&chan->srej_list);
3335 
3336 	return err;
3337 }
3338 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3339 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3340 {
3341 	switch (mode) {
3342 	case L2CAP_MODE_STREAMING:
3343 	case L2CAP_MODE_ERTM:
3344 		if (l2cap_mode_supported(mode, remote_feat_mask))
3345 			return mode;
3346 		fallthrough;
3347 	default:
3348 		return L2CAP_MODE_BASIC;
3349 	}
3350 }
3351 
__l2cap_ews_supported(struct l2cap_conn * conn)3352 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3353 {
3354 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3355 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3356 }
3357 
__l2cap_efs_supported(struct l2cap_conn * conn)3358 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3359 {
3360 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3361 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3362 }
3363 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3364 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3365 				      struct l2cap_conf_rfc *rfc)
3366 {
3367 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3368 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3369 
3370 		/* Class 1 devices have must have ERTM timeouts
3371 		 * exceeding the Link Supervision Timeout.  The
3372 		 * default Link Supervision Timeout for AMP
3373 		 * controllers is 10 seconds.
3374 		 *
3375 		 * Class 1 devices use 0xffffffff for their
3376 		 * best-effort flush timeout, so the clamping logic
3377 		 * will result in a timeout that meets the above
3378 		 * requirement.  ERTM timeouts are 16-bit values, so
3379 		 * the maximum timeout is 65.535 seconds.
3380 		 */
3381 
3382 		/* Convert timeout to milliseconds and round */
3383 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3384 
3385 		/* This is the recommended formula for class 2 devices
3386 		 * that start ERTM timers when packets are sent to the
3387 		 * controller.
3388 		 */
3389 		ertm_to = 3 * ertm_to + 500;
3390 
3391 		if (ertm_to > 0xffff)
3392 			ertm_to = 0xffff;
3393 
3394 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3395 		rfc->monitor_timeout = rfc->retrans_timeout;
3396 	} else {
3397 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3398 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3399 	}
3400 }
3401 
l2cap_txwin_setup(struct l2cap_chan * chan)3402 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3403 {
3404 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3405 	    __l2cap_ews_supported(chan->conn)) {
3406 		/* use extended control field */
3407 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3408 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3409 	} else {
3410 		chan->tx_win = min_t(u16, chan->tx_win,
3411 				     L2CAP_DEFAULT_TX_WINDOW);
3412 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3413 	}
3414 	chan->ack_win = chan->tx_win;
3415 }
3416 
l2cap_mtu_auto(struct l2cap_chan * chan)3417 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3418 {
3419 	struct hci_conn *conn = chan->conn->hcon;
3420 
3421 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3422 
3423 	/* The 2-DH1 packet has between 2 and 56 information bytes
3424 	 * (including the 2-byte payload header)
3425 	 */
3426 	if (!(conn->pkt_type & HCI_2DH1))
3427 		chan->imtu = 54;
3428 
3429 	/* The 3-DH1 packet has between 2 and 85 information bytes
3430 	 * (including the 2-byte payload header)
3431 	 */
3432 	if (!(conn->pkt_type & HCI_3DH1))
3433 		chan->imtu = 83;
3434 
3435 	/* The 2-DH3 packet has between 2 and 369 information bytes
3436 	 * (including the 2-byte payload header)
3437 	 */
3438 	if (!(conn->pkt_type & HCI_2DH3))
3439 		chan->imtu = 367;
3440 
3441 	/* The 3-DH3 packet has between 2 and 554 information bytes
3442 	 * (including the 2-byte payload header)
3443 	 */
3444 	if (!(conn->pkt_type & HCI_3DH3))
3445 		chan->imtu = 552;
3446 
3447 	/* The 2-DH5 packet has between 2 and 681 information bytes
3448 	 * (including the 2-byte payload header)
3449 	 */
3450 	if (!(conn->pkt_type & HCI_2DH5))
3451 		chan->imtu = 679;
3452 
3453 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3454 	 * (including the 2-byte payload header)
3455 	 */
3456 	if (!(conn->pkt_type & HCI_3DH5))
3457 		chan->imtu = 1021;
3458 }
3459 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3460 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3461 {
3462 	struct l2cap_conf_req *req = data;
3463 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3464 	void *ptr = req->data;
3465 	void *endptr = data + data_size;
3466 	u16 size;
3467 
3468 	BT_DBG("chan %p", chan);
3469 
3470 	if (chan->num_conf_req || chan->num_conf_rsp)
3471 		goto done;
3472 
3473 	switch (chan->mode) {
3474 	case L2CAP_MODE_STREAMING:
3475 	case L2CAP_MODE_ERTM:
3476 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3477 			break;
3478 
3479 		if (__l2cap_efs_supported(chan->conn))
3480 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3481 
3482 		fallthrough;
3483 	default:
3484 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3485 		break;
3486 	}
3487 
3488 done:
3489 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3490 		if (!chan->imtu)
3491 			l2cap_mtu_auto(chan);
3492 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3493 				   endptr - ptr);
3494 	}
3495 
3496 	switch (chan->mode) {
3497 	case L2CAP_MODE_BASIC:
3498 		if (disable_ertm)
3499 			break;
3500 
3501 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3502 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3503 			break;
3504 
3505 		rfc.mode            = L2CAP_MODE_BASIC;
3506 		rfc.txwin_size      = 0;
3507 		rfc.max_transmit    = 0;
3508 		rfc.retrans_timeout = 0;
3509 		rfc.monitor_timeout = 0;
3510 		rfc.max_pdu_size    = 0;
3511 
3512 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3513 				   (unsigned long) &rfc, endptr - ptr);
3514 		break;
3515 
3516 	case L2CAP_MODE_ERTM:
3517 		rfc.mode            = L2CAP_MODE_ERTM;
3518 		rfc.max_transmit    = chan->max_tx;
3519 
3520 		__l2cap_set_ertm_timeouts(chan, &rfc);
3521 
3522 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3523 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3524 			     L2CAP_FCS_SIZE);
3525 		rfc.max_pdu_size = cpu_to_le16(size);
3526 
3527 		l2cap_txwin_setup(chan);
3528 
3529 		rfc.txwin_size = min_t(u16, chan->tx_win,
3530 				       L2CAP_DEFAULT_TX_WINDOW);
3531 
3532 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3533 				   (unsigned long) &rfc, endptr - ptr);
3534 
3535 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3536 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3537 
3538 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3539 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3540 					   chan->tx_win, endptr - ptr);
3541 
3542 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3543 			if (chan->fcs == L2CAP_FCS_NONE ||
3544 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3545 				chan->fcs = L2CAP_FCS_NONE;
3546 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3547 						   chan->fcs, endptr - ptr);
3548 			}
3549 		break;
3550 
3551 	case L2CAP_MODE_STREAMING:
3552 		l2cap_txwin_setup(chan);
3553 		rfc.mode            = L2CAP_MODE_STREAMING;
3554 		rfc.txwin_size      = 0;
3555 		rfc.max_transmit    = 0;
3556 		rfc.retrans_timeout = 0;
3557 		rfc.monitor_timeout = 0;
3558 
3559 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3560 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3561 			     L2CAP_FCS_SIZE);
3562 		rfc.max_pdu_size = cpu_to_le16(size);
3563 
3564 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3565 				   (unsigned long) &rfc, endptr - ptr);
3566 
3567 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3568 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3569 
3570 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3571 			if (chan->fcs == L2CAP_FCS_NONE ||
3572 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3573 				chan->fcs = L2CAP_FCS_NONE;
3574 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3575 						   chan->fcs, endptr - ptr);
3576 			}
3577 		break;
3578 	}
3579 
3580 	req->dcid  = cpu_to_le16(chan->dcid);
3581 	req->flags = cpu_to_le16(0);
3582 
3583 	return ptr - data;
3584 }
3585 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3586 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3587 {
3588 	struct l2cap_conf_rsp *rsp = data;
3589 	void *ptr = rsp->data;
3590 	void *endptr = data + data_size;
3591 	void *req = chan->conf_req;
3592 	int len = chan->conf_len;
3593 	int type, hint, olen;
3594 	unsigned long val;
3595 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3596 	struct l2cap_conf_efs efs;
3597 	u8 remote_efs = 0;
3598 	u16 mtu = L2CAP_DEFAULT_MTU;
3599 	u16 result = L2CAP_CONF_SUCCESS;
3600 	u16 size;
3601 
3602 	BT_DBG("chan %p", chan);
3603 
3604 	while (len >= L2CAP_CONF_OPT_SIZE) {
3605 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3606 		if (len < 0)
3607 			break;
3608 
3609 		hint  = type & L2CAP_CONF_HINT;
3610 		type &= L2CAP_CONF_MASK;
3611 
3612 		switch (type) {
3613 		case L2CAP_CONF_MTU:
3614 			if (olen != 2)
3615 				break;
3616 			mtu = val;
3617 			break;
3618 
3619 		case L2CAP_CONF_FLUSH_TO:
3620 			if (olen != 2)
3621 				break;
3622 			chan->flush_to = val;
3623 			break;
3624 
3625 		case L2CAP_CONF_QOS:
3626 			break;
3627 
3628 		case L2CAP_CONF_RFC:
3629 			if (olen != sizeof(rfc))
3630 				break;
3631 			memcpy(&rfc, (void *) val, olen);
3632 			break;
3633 
3634 		case L2CAP_CONF_FCS:
3635 			if (olen != 1)
3636 				break;
3637 			if (val == L2CAP_FCS_NONE)
3638 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3639 			break;
3640 
3641 		case L2CAP_CONF_EFS:
3642 			if (olen != sizeof(efs))
3643 				break;
3644 			remote_efs = 1;
3645 			memcpy(&efs, (void *) val, olen);
3646 			break;
3647 
3648 		case L2CAP_CONF_EWS:
3649 			if (olen != 2)
3650 				break;
3651 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3652 				return -ECONNREFUSED;
3653 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3654 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3655 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3656 			chan->remote_tx_win = val;
3657 			break;
3658 
3659 		default:
3660 			if (hint)
3661 				break;
3662 			result = L2CAP_CONF_UNKNOWN;
3663 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3664 			break;
3665 		}
3666 	}
3667 
3668 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3669 		goto done;
3670 
3671 	switch (chan->mode) {
3672 	case L2CAP_MODE_STREAMING:
3673 	case L2CAP_MODE_ERTM:
3674 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3675 			chan->mode = l2cap_select_mode(rfc.mode,
3676 						       chan->conn->feat_mask);
3677 			break;
3678 		}
3679 
3680 		if (remote_efs) {
3681 			if (__l2cap_efs_supported(chan->conn))
3682 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3683 			else
3684 				return -ECONNREFUSED;
3685 		}
3686 
3687 		if (chan->mode != rfc.mode)
3688 			return -ECONNREFUSED;
3689 
3690 		break;
3691 	}
3692 
3693 done:
3694 	if (chan->mode != rfc.mode) {
3695 		result = L2CAP_CONF_UNACCEPT;
3696 		rfc.mode = chan->mode;
3697 
3698 		if (chan->num_conf_rsp == 1)
3699 			return -ECONNREFUSED;
3700 
3701 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3702 				   (unsigned long) &rfc, endptr - ptr);
3703 	}
3704 
3705 	if (result == L2CAP_CONF_SUCCESS) {
3706 		/* Configure output options and let the other side know
3707 		 * which ones we don't like. */
3708 
3709 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3710 			result = L2CAP_CONF_UNACCEPT;
3711 		else {
3712 			chan->omtu = mtu;
3713 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3714 		}
3715 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3716 
3717 		if (remote_efs) {
3718 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3719 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3720 			    efs.stype != chan->local_stype) {
3721 
3722 				result = L2CAP_CONF_UNACCEPT;
3723 
3724 				if (chan->num_conf_req >= 1)
3725 					return -ECONNREFUSED;
3726 
3727 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3728 						   sizeof(efs),
3729 						   (unsigned long) &efs, endptr - ptr);
3730 			} else {
3731 				/* Send PENDING Conf Rsp */
3732 				result = L2CAP_CONF_PENDING;
3733 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3734 			}
3735 		}
3736 
3737 		switch (rfc.mode) {
3738 		case L2CAP_MODE_BASIC:
3739 			chan->fcs = L2CAP_FCS_NONE;
3740 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3741 			break;
3742 
3743 		case L2CAP_MODE_ERTM:
3744 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3745 				chan->remote_tx_win = rfc.txwin_size;
3746 			else
3747 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3748 
3749 			chan->remote_max_tx = rfc.max_transmit;
3750 
3751 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3752 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3753 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3754 			rfc.max_pdu_size = cpu_to_le16(size);
3755 			chan->remote_mps = size;
3756 
3757 			__l2cap_set_ertm_timeouts(chan, &rfc);
3758 
3759 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3760 
3761 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3762 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3763 
3764 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3765 				chan->remote_id = efs.id;
3766 				chan->remote_stype = efs.stype;
3767 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3768 				chan->remote_flush_to =
3769 					le32_to_cpu(efs.flush_to);
3770 				chan->remote_acc_lat =
3771 					le32_to_cpu(efs.acc_lat);
3772 				chan->remote_sdu_itime =
3773 					le32_to_cpu(efs.sdu_itime);
3774 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3775 						   sizeof(efs),
3776 						   (unsigned long) &efs, endptr - ptr);
3777 			}
3778 			break;
3779 
3780 		case L2CAP_MODE_STREAMING:
3781 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3782 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3783 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3784 			rfc.max_pdu_size = cpu_to_le16(size);
3785 			chan->remote_mps = size;
3786 
3787 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3788 
3789 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3790 					   (unsigned long) &rfc, endptr - ptr);
3791 
3792 			break;
3793 
3794 		default:
3795 			result = L2CAP_CONF_UNACCEPT;
3796 
3797 			memset(&rfc, 0, sizeof(rfc));
3798 			rfc.mode = chan->mode;
3799 		}
3800 
3801 		if (result == L2CAP_CONF_SUCCESS)
3802 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3803 	}
3804 	rsp->scid   = cpu_to_le16(chan->dcid);
3805 	rsp->result = cpu_to_le16(result);
3806 	rsp->flags  = cpu_to_le16(0);
3807 
3808 	return ptr - data;
3809 }
3810 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3811 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3812 				void *data, size_t size, u16 *result)
3813 {
3814 	struct l2cap_conf_req *req = data;
3815 	void *ptr = req->data;
3816 	void *endptr = data + size;
3817 	int type, olen;
3818 	unsigned long val;
3819 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3820 	struct l2cap_conf_efs efs;
3821 
3822 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3823 
3824 	while (len >= L2CAP_CONF_OPT_SIZE) {
3825 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3826 		if (len < 0)
3827 			break;
3828 
3829 		switch (type) {
3830 		case L2CAP_CONF_MTU:
3831 			if (olen != 2)
3832 				break;
3833 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3834 				*result = L2CAP_CONF_UNACCEPT;
3835 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3836 			} else
3837 				chan->imtu = val;
3838 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3839 					   endptr - ptr);
3840 			break;
3841 
3842 		case L2CAP_CONF_FLUSH_TO:
3843 			if (olen != 2)
3844 				break;
3845 			chan->flush_to = val;
3846 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3847 					   chan->flush_to, endptr - ptr);
3848 			break;
3849 
3850 		case L2CAP_CONF_RFC:
3851 			if (olen != sizeof(rfc))
3852 				break;
3853 			memcpy(&rfc, (void *)val, olen);
3854 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3855 			    rfc.mode != chan->mode)
3856 				return -ECONNREFUSED;
3857 			chan->fcs = 0;
3858 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3859 					   (unsigned long) &rfc, endptr - ptr);
3860 			break;
3861 
3862 		case L2CAP_CONF_EWS:
3863 			if (olen != 2)
3864 				break;
3865 			chan->ack_win = min_t(u16, val, chan->ack_win);
3866 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3867 					   chan->tx_win, endptr - ptr);
3868 			break;
3869 
3870 		case L2CAP_CONF_EFS:
3871 			if (olen != sizeof(efs))
3872 				break;
3873 			memcpy(&efs, (void *)val, olen);
3874 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3875 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3876 			    efs.stype != chan->local_stype)
3877 				return -ECONNREFUSED;
3878 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3879 					   (unsigned long) &efs, endptr - ptr);
3880 			break;
3881 
3882 		case L2CAP_CONF_FCS:
3883 			if (olen != 1)
3884 				break;
3885 			if (*result == L2CAP_CONF_PENDING)
3886 				if (val == L2CAP_FCS_NONE)
3887 					set_bit(CONF_RECV_NO_FCS,
3888 						&chan->conf_state);
3889 			break;
3890 		}
3891 	}
3892 
3893 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3894 		return -ECONNREFUSED;
3895 
3896 	chan->mode = rfc.mode;
3897 
3898 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3899 		switch (rfc.mode) {
3900 		case L2CAP_MODE_ERTM:
3901 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3902 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3903 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3904 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3905 				chan->ack_win = min_t(u16, chan->ack_win,
3906 						      rfc.txwin_size);
3907 
3908 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3909 				chan->local_msdu = le16_to_cpu(efs.msdu);
3910 				chan->local_sdu_itime =
3911 					le32_to_cpu(efs.sdu_itime);
3912 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3913 				chan->local_flush_to =
3914 					le32_to_cpu(efs.flush_to);
3915 			}
3916 			break;
3917 
3918 		case L2CAP_MODE_STREAMING:
3919 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3920 		}
3921 	}
3922 
3923 	req->dcid   = cpu_to_le16(chan->dcid);
3924 	req->flags  = cpu_to_le16(0);
3925 
3926 	return ptr - data;
3927 }
3928 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3929 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3930 				u16 result, u16 flags)
3931 {
3932 	struct l2cap_conf_rsp *rsp = data;
3933 	void *ptr = rsp->data;
3934 
3935 	BT_DBG("chan %p", chan);
3936 
3937 	rsp->scid   = cpu_to_le16(chan->dcid);
3938 	rsp->result = cpu_to_le16(result);
3939 	rsp->flags  = cpu_to_le16(flags);
3940 
3941 	return ptr - data;
3942 }
3943 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3944 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3945 {
3946 	struct l2cap_le_conn_rsp rsp;
3947 	struct l2cap_conn *conn = chan->conn;
3948 
3949 	BT_DBG("chan %p", chan);
3950 
3951 	rsp.dcid    = cpu_to_le16(chan->scid);
3952 	rsp.mtu     = cpu_to_le16(chan->imtu);
3953 	rsp.mps     = cpu_to_le16(chan->mps);
3954 	rsp.credits = cpu_to_le16(chan->rx_credits);
3955 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3956 
3957 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3958 		       &rsp);
3959 }
3960 
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3961 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3962 {
3963 	struct {
3964 		struct l2cap_ecred_conn_rsp rsp;
3965 		__le16 dcid[5];
3966 	} __packed pdu;
3967 	struct l2cap_conn *conn = chan->conn;
3968 	u16 ident = chan->ident;
3969 	int i = 0;
3970 
3971 	if (!ident)
3972 		return;
3973 
3974 	BT_DBG("chan %p ident %d", chan, ident);
3975 
3976 	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3977 	pdu.rsp.mps     = cpu_to_le16(chan->mps);
3978 	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3979 	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3980 
3981 	mutex_lock(&conn->chan_lock);
3982 
3983 	list_for_each_entry(chan, &conn->chan_l, list) {
3984 		if (chan->ident != ident)
3985 			continue;
3986 
3987 		/* Reset ident so only one response is sent */
3988 		chan->ident = 0;
3989 
3990 		/* Include all channels pending with the same ident */
3991 		pdu.dcid[i++] = cpu_to_le16(chan->scid);
3992 	}
3993 
3994 	mutex_unlock(&conn->chan_lock);
3995 
3996 	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
3997 			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
3998 }
3999 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)4000 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4001 {
4002 	struct l2cap_conn_rsp rsp;
4003 	struct l2cap_conn *conn = chan->conn;
4004 	u8 buf[128];
4005 	u8 rsp_code;
4006 
4007 	rsp.scid   = cpu_to_le16(chan->dcid);
4008 	rsp.dcid   = cpu_to_le16(chan->scid);
4009 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4010 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4011 
4012 	if (chan->hs_hcon)
4013 		rsp_code = L2CAP_CREATE_CHAN_RSP;
4014 	else
4015 		rsp_code = L2CAP_CONN_RSP;
4016 
4017 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4018 
4019 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4020 
4021 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4022 		return;
4023 
4024 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4025 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4026 	chan->num_conf_req++;
4027 }
4028 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)4029 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4030 {
4031 	int type, olen;
4032 	unsigned long val;
4033 	/* Use sane default values in case a misbehaving remote device
4034 	 * did not send an RFC or extended window size option.
4035 	 */
4036 	u16 txwin_ext = chan->ack_win;
4037 	struct l2cap_conf_rfc rfc = {
4038 		.mode = chan->mode,
4039 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4040 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4041 		.max_pdu_size = cpu_to_le16(chan->imtu),
4042 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4043 	};
4044 
4045 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4046 
4047 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4048 		return;
4049 
4050 	while (len >= L2CAP_CONF_OPT_SIZE) {
4051 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4052 		if (len < 0)
4053 			break;
4054 
4055 		switch (type) {
4056 		case L2CAP_CONF_RFC:
4057 			if (olen != sizeof(rfc))
4058 				break;
4059 			memcpy(&rfc, (void *)val, olen);
4060 			break;
4061 		case L2CAP_CONF_EWS:
4062 			if (olen != 2)
4063 				break;
4064 			txwin_ext = val;
4065 			break;
4066 		}
4067 	}
4068 
4069 	switch (rfc.mode) {
4070 	case L2CAP_MODE_ERTM:
4071 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4072 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4073 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4074 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4075 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4076 		else
4077 			chan->ack_win = min_t(u16, chan->ack_win,
4078 					      rfc.txwin_size);
4079 		break;
4080 	case L2CAP_MODE_STREAMING:
4081 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4082 	}
4083 }
4084 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4085 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4086 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4087 				    u8 *data)
4088 {
4089 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4090 
4091 	if (cmd_len < sizeof(*rej))
4092 		return -EPROTO;
4093 
4094 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4095 		return 0;
4096 
4097 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4098 	    cmd->ident == conn->info_ident) {
4099 		cancel_delayed_work(&conn->info_timer);
4100 
4101 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4102 		conn->info_ident = 0;
4103 
4104 		l2cap_conn_start(conn);
4105 	}
4106 
4107 	return 0;
4108 }
4109 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code,u8 amp_id)4110 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4111 					struct l2cap_cmd_hdr *cmd,
4112 					u8 *data, u8 rsp_code, u8 amp_id)
4113 {
4114 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4115 	struct l2cap_conn_rsp rsp;
4116 	struct l2cap_chan *chan = NULL, *pchan;
4117 	int result, status = L2CAP_CS_NO_INFO;
4118 
4119 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4120 	__le16 psm = req->psm;
4121 
4122 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4123 
4124 	/* Check if we have socket listening on psm */
4125 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4126 					 &conn->hcon->dst, ACL_LINK);
4127 	if (!pchan) {
4128 		result = L2CAP_CR_BAD_PSM;
4129 		goto sendresp;
4130 	}
4131 
4132 	mutex_lock(&conn->chan_lock);
4133 	l2cap_chan_lock(pchan);
4134 
4135 	/* Check if the ACL is secure enough (if not SDP) */
4136 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4137 	    !hci_conn_check_link_mode(conn->hcon)) {
4138 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4139 		result = L2CAP_CR_SEC_BLOCK;
4140 		goto response;
4141 	}
4142 
4143 	result = L2CAP_CR_NO_MEM;
4144 
4145 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4146 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4147 		result = L2CAP_CR_INVALID_SCID;
4148 		goto response;
4149 	}
4150 
4151 	/* Check if we already have channel with that dcid */
4152 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4153 		result = L2CAP_CR_SCID_IN_USE;
4154 		goto response;
4155 	}
4156 
4157 	chan = pchan->ops->new_connection(pchan);
4158 	if (!chan)
4159 		goto response;
4160 
4161 	/* For certain devices (ex: HID mouse), support for authentication,
4162 	 * pairing and bonding is optional. For such devices, inorder to avoid
4163 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4164 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4165 	 */
4166 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4167 
4168 	bacpy(&chan->src, &conn->hcon->src);
4169 	bacpy(&chan->dst, &conn->hcon->dst);
4170 	chan->src_type = bdaddr_src_type(conn->hcon);
4171 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4172 	chan->psm  = psm;
4173 	chan->dcid = scid;
4174 	chan->local_amp_id = amp_id;
4175 
4176 	__l2cap_chan_add(conn, chan);
4177 
4178 	dcid = chan->scid;
4179 
4180 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4181 
4182 	chan->ident = cmd->ident;
4183 
4184 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4185 		if (l2cap_chan_check_security(chan, false)) {
4186 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4187 				l2cap_state_change(chan, BT_CONNECT2);
4188 				result = L2CAP_CR_PEND;
4189 				status = L2CAP_CS_AUTHOR_PEND;
4190 				chan->ops->defer(chan);
4191 			} else {
4192 				/* Force pending result for AMP controllers.
4193 				 * The connection will succeed after the
4194 				 * physical link is up.
4195 				 */
4196 				if (amp_id == AMP_ID_BREDR) {
4197 					l2cap_state_change(chan, BT_CONFIG);
4198 					result = L2CAP_CR_SUCCESS;
4199 				} else {
4200 					l2cap_state_change(chan, BT_CONNECT2);
4201 					result = L2CAP_CR_PEND;
4202 				}
4203 				status = L2CAP_CS_NO_INFO;
4204 			}
4205 		} else {
4206 			l2cap_state_change(chan, BT_CONNECT2);
4207 			result = L2CAP_CR_PEND;
4208 			status = L2CAP_CS_AUTHEN_PEND;
4209 		}
4210 	} else {
4211 		l2cap_state_change(chan, BT_CONNECT2);
4212 		result = L2CAP_CR_PEND;
4213 		status = L2CAP_CS_NO_INFO;
4214 	}
4215 
4216 response:
4217 	l2cap_chan_unlock(pchan);
4218 	mutex_unlock(&conn->chan_lock);
4219 	l2cap_chan_put(pchan);
4220 
4221 sendresp:
4222 	rsp.scid   = cpu_to_le16(scid);
4223 	rsp.dcid   = cpu_to_le16(dcid);
4224 	rsp.result = cpu_to_le16(result);
4225 	rsp.status = cpu_to_le16(status);
4226 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4227 
4228 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4229 		struct l2cap_info_req info;
4230 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4231 
4232 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4233 		conn->info_ident = l2cap_get_ident(conn);
4234 
4235 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4236 
4237 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4238 			       sizeof(info), &info);
4239 	}
4240 
4241 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4242 	    result == L2CAP_CR_SUCCESS) {
4243 		u8 buf[128];
4244 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4245 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4246 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4247 		chan->num_conf_req++;
4248 	}
4249 
4250 	return chan;
4251 }
4252 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4253 static int l2cap_connect_req(struct l2cap_conn *conn,
4254 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4255 {
4256 	struct hci_dev *hdev = conn->hcon->hdev;
4257 	struct hci_conn *hcon = conn->hcon;
4258 
4259 	if (cmd_len < sizeof(struct l2cap_conn_req))
4260 		return -EPROTO;
4261 
4262 	hci_dev_lock(hdev);
4263 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4264 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4265 		mgmt_device_connected(hdev, hcon, NULL, 0);
4266 	hci_dev_unlock(hdev);
4267 
4268 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4269 	return 0;
4270 }
4271 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4272 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4273 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4274 				    u8 *data)
4275 {
4276 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4277 	u16 scid, dcid, result, status;
4278 	struct l2cap_chan *chan;
4279 	u8 req[128];
4280 	int err;
4281 
4282 	if (cmd_len < sizeof(*rsp))
4283 		return -EPROTO;
4284 
4285 	scid   = __le16_to_cpu(rsp->scid);
4286 	dcid   = __le16_to_cpu(rsp->dcid);
4287 	result = __le16_to_cpu(rsp->result);
4288 	status = __le16_to_cpu(rsp->status);
4289 
4290 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4291 	       dcid, scid, result, status);
4292 
4293 	mutex_lock(&conn->chan_lock);
4294 
4295 	if (scid) {
4296 		chan = __l2cap_get_chan_by_scid(conn, scid);
4297 		if (!chan) {
4298 			err = -EBADSLT;
4299 			goto unlock;
4300 		}
4301 	} else {
4302 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4303 		if (!chan) {
4304 			err = -EBADSLT;
4305 			goto unlock;
4306 		}
4307 	}
4308 
4309 	err = 0;
4310 
4311 	l2cap_chan_lock(chan);
4312 
4313 	switch (result) {
4314 	case L2CAP_CR_SUCCESS:
4315 		l2cap_state_change(chan, BT_CONFIG);
4316 		chan->ident = 0;
4317 		chan->dcid = dcid;
4318 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4319 
4320 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4321 			break;
4322 
4323 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4324 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4325 		chan->num_conf_req++;
4326 		break;
4327 
4328 	case L2CAP_CR_PEND:
4329 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4330 		break;
4331 
4332 	default:
4333 		l2cap_chan_del(chan, ECONNREFUSED);
4334 		break;
4335 	}
4336 
4337 	l2cap_chan_unlock(chan);
4338 
4339 unlock:
4340 	mutex_unlock(&conn->chan_lock);
4341 
4342 	return err;
4343 }
4344 
set_default_fcs(struct l2cap_chan * chan)4345 static inline void set_default_fcs(struct l2cap_chan *chan)
4346 {
4347 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4348 	 * sides request it.
4349 	 */
4350 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4351 		chan->fcs = L2CAP_FCS_NONE;
4352 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4353 		chan->fcs = L2CAP_FCS_CRC16;
4354 }
4355 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4356 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4357 				    u8 ident, u16 flags)
4358 {
4359 	struct l2cap_conn *conn = chan->conn;
4360 
4361 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4362 	       flags);
4363 
4364 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4365 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4366 
4367 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4368 		       l2cap_build_conf_rsp(chan, data,
4369 					    L2CAP_CONF_SUCCESS, flags), data);
4370 }
4371 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4372 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4373 				   u16 scid, u16 dcid)
4374 {
4375 	struct l2cap_cmd_rej_cid rej;
4376 
4377 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4378 	rej.scid = __cpu_to_le16(scid);
4379 	rej.dcid = __cpu_to_le16(dcid);
4380 
4381 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4382 }
4383 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4384 static inline int l2cap_config_req(struct l2cap_conn *conn,
4385 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4386 				   u8 *data)
4387 {
4388 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4389 	u16 dcid, flags;
4390 	u8 rsp[64];
4391 	struct l2cap_chan *chan;
4392 	int len, err = 0;
4393 
4394 	if (cmd_len < sizeof(*req))
4395 		return -EPROTO;
4396 
4397 	dcid  = __le16_to_cpu(req->dcid);
4398 	flags = __le16_to_cpu(req->flags);
4399 
4400 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4401 
4402 	chan = l2cap_get_chan_by_scid(conn, dcid);
4403 	if (!chan) {
4404 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4405 		return 0;
4406 	}
4407 
4408 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4409 	    chan->state != BT_CONNECTED) {
4410 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4411 				       chan->dcid);
4412 		goto unlock;
4413 	}
4414 
4415 	/* Reject if config buffer is too small. */
4416 	len = cmd_len - sizeof(*req);
4417 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4418 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4419 			       l2cap_build_conf_rsp(chan, rsp,
4420 			       L2CAP_CONF_REJECT, flags), rsp);
4421 		goto unlock;
4422 	}
4423 
4424 	/* Store config. */
4425 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4426 	chan->conf_len += len;
4427 
4428 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4429 		/* Incomplete config. Send empty response. */
4430 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4431 			       l2cap_build_conf_rsp(chan, rsp,
4432 			       L2CAP_CONF_SUCCESS, flags), rsp);
4433 		goto unlock;
4434 	}
4435 
4436 	/* Complete config. */
4437 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4438 	if (len < 0) {
4439 		l2cap_send_disconn_req(chan, ECONNRESET);
4440 		goto unlock;
4441 	}
4442 
4443 	chan->ident = cmd->ident;
4444 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4445 	chan->num_conf_rsp++;
4446 
4447 	/* Reset config buffer. */
4448 	chan->conf_len = 0;
4449 
4450 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4451 		goto unlock;
4452 
4453 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4454 		set_default_fcs(chan);
4455 
4456 		if (chan->mode == L2CAP_MODE_ERTM ||
4457 		    chan->mode == L2CAP_MODE_STREAMING)
4458 			err = l2cap_ertm_init(chan);
4459 
4460 		if (err < 0)
4461 			l2cap_send_disconn_req(chan, -err);
4462 		else
4463 			l2cap_chan_ready(chan);
4464 
4465 		goto unlock;
4466 	}
4467 
4468 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4469 		u8 buf[64];
4470 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4471 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4472 		chan->num_conf_req++;
4473 	}
4474 
4475 	/* Got Conf Rsp PENDING from remote side and assume we sent
4476 	   Conf Rsp PENDING in the code above */
4477 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4478 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4479 
4480 		/* check compatibility */
4481 
4482 		/* Send rsp for BR/EDR channel */
4483 		if (!chan->hs_hcon)
4484 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4485 		else
4486 			chan->ident = cmd->ident;
4487 	}
4488 
4489 unlock:
4490 	l2cap_chan_unlock(chan);
4491 	l2cap_chan_put(chan);
4492 	return err;
4493 }
4494 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4495 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4496 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4497 				   u8 *data)
4498 {
4499 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4500 	u16 scid, flags, result;
4501 	struct l2cap_chan *chan;
4502 	int len = cmd_len - sizeof(*rsp);
4503 	int err = 0;
4504 
4505 	if (cmd_len < sizeof(*rsp))
4506 		return -EPROTO;
4507 
4508 	scid   = __le16_to_cpu(rsp->scid);
4509 	flags  = __le16_to_cpu(rsp->flags);
4510 	result = __le16_to_cpu(rsp->result);
4511 
4512 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4513 	       result, len);
4514 
4515 	chan = l2cap_get_chan_by_scid(conn, scid);
4516 	if (!chan)
4517 		return 0;
4518 
4519 	switch (result) {
4520 	case L2CAP_CONF_SUCCESS:
4521 		l2cap_conf_rfc_get(chan, rsp->data, len);
4522 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4523 		break;
4524 
4525 	case L2CAP_CONF_PENDING:
4526 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4527 
4528 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4529 			char buf[64];
4530 
4531 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4532 						   buf, sizeof(buf), &result);
4533 			if (len < 0) {
4534 				l2cap_send_disconn_req(chan, ECONNRESET);
4535 				goto done;
4536 			}
4537 
4538 			if (!chan->hs_hcon) {
4539 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4540 							0);
4541 			} else {
4542 				if (l2cap_check_efs(chan)) {
4543 					amp_create_logical_link(chan);
4544 					chan->ident = cmd->ident;
4545 				}
4546 			}
4547 		}
4548 		goto done;
4549 
4550 	case L2CAP_CONF_UNKNOWN:
4551 	case L2CAP_CONF_UNACCEPT:
4552 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4553 			char req[64];
4554 
4555 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4556 				l2cap_send_disconn_req(chan, ECONNRESET);
4557 				goto done;
4558 			}
4559 
4560 			/* throw out any old stored conf requests */
4561 			result = L2CAP_CONF_SUCCESS;
4562 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4563 						   req, sizeof(req), &result);
4564 			if (len < 0) {
4565 				l2cap_send_disconn_req(chan, ECONNRESET);
4566 				goto done;
4567 			}
4568 
4569 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4570 				       L2CAP_CONF_REQ, len, req);
4571 			chan->num_conf_req++;
4572 			if (result != L2CAP_CONF_SUCCESS)
4573 				goto done;
4574 			break;
4575 		}
4576 		fallthrough;
4577 
4578 	default:
4579 		l2cap_chan_set_err(chan, ECONNRESET);
4580 
4581 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4582 		l2cap_send_disconn_req(chan, ECONNRESET);
4583 		goto done;
4584 	}
4585 
4586 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4587 		goto done;
4588 
4589 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4590 
4591 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4592 		set_default_fcs(chan);
4593 
4594 		if (chan->mode == L2CAP_MODE_ERTM ||
4595 		    chan->mode == L2CAP_MODE_STREAMING)
4596 			err = l2cap_ertm_init(chan);
4597 
4598 		if (err < 0)
4599 			l2cap_send_disconn_req(chan, -err);
4600 		else
4601 			l2cap_chan_ready(chan);
4602 	}
4603 
4604 done:
4605 	l2cap_chan_unlock(chan);
4606 	l2cap_chan_put(chan);
4607 	return err;
4608 }
4609 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4610 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4611 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4612 				       u8 *data)
4613 {
4614 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4615 	struct l2cap_disconn_rsp rsp;
4616 	u16 dcid, scid;
4617 	struct l2cap_chan *chan;
4618 
4619 	if (cmd_len != sizeof(*req))
4620 		return -EPROTO;
4621 
4622 	scid = __le16_to_cpu(req->scid);
4623 	dcid = __le16_to_cpu(req->dcid);
4624 
4625 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4626 
4627 	mutex_lock(&conn->chan_lock);
4628 
4629 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4630 	if (!chan) {
4631 		mutex_unlock(&conn->chan_lock);
4632 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4633 		return 0;
4634 	}
4635 
4636 	l2cap_chan_hold(chan);
4637 	l2cap_chan_lock(chan);
4638 
4639 	rsp.dcid = cpu_to_le16(chan->scid);
4640 	rsp.scid = cpu_to_le16(chan->dcid);
4641 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4642 
4643 	chan->ops->set_shutdown(chan);
4644 
4645 	l2cap_chan_del(chan, ECONNRESET);
4646 
4647 	chan->ops->close(chan);
4648 
4649 	l2cap_chan_unlock(chan);
4650 	l2cap_chan_put(chan);
4651 
4652 	mutex_unlock(&conn->chan_lock);
4653 
4654 	return 0;
4655 }
4656 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4657 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4658 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4659 				       u8 *data)
4660 {
4661 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4662 	u16 dcid, scid;
4663 	struct l2cap_chan *chan;
4664 
4665 	if (cmd_len != sizeof(*rsp))
4666 		return -EPROTO;
4667 
4668 	scid = __le16_to_cpu(rsp->scid);
4669 	dcid = __le16_to_cpu(rsp->dcid);
4670 
4671 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4672 
4673 	mutex_lock(&conn->chan_lock);
4674 
4675 	chan = __l2cap_get_chan_by_scid(conn, scid);
4676 	if (!chan) {
4677 		mutex_unlock(&conn->chan_lock);
4678 		return 0;
4679 	}
4680 
4681 	l2cap_chan_hold(chan);
4682 	l2cap_chan_lock(chan);
4683 
4684 	if (chan->state != BT_DISCONN) {
4685 		l2cap_chan_unlock(chan);
4686 		l2cap_chan_put(chan);
4687 		mutex_unlock(&conn->chan_lock);
4688 		return 0;
4689 	}
4690 
4691 	l2cap_chan_del(chan, 0);
4692 
4693 	chan->ops->close(chan);
4694 
4695 	l2cap_chan_unlock(chan);
4696 	l2cap_chan_put(chan);
4697 
4698 	mutex_unlock(&conn->chan_lock);
4699 
4700 	return 0;
4701 }
4702 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4703 static inline int l2cap_information_req(struct l2cap_conn *conn,
4704 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4705 					u8 *data)
4706 {
4707 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4708 	u16 type;
4709 
4710 	if (cmd_len != sizeof(*req))
4711 		return -EPROTO;
4712 
4713 	type = __le16_to_cpu(req->type);
4714 
4715 	BT_DBG("type 0x%4.4x", type);
4716 
4717 	if (type == L2CAP_IT_FEAT_MASK) {
4718 		u8 buf[8];
4719 		u32 feat_mask = l2cap_feat_mask;
4720 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4721 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4722 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4723 		if (!disable_ertm)
4724 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4725 				| L2CAP_FEAT_FCS;
4726 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4727 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4728 				| L2CAP_FEAT_EXT_WINDOW;
4729 
4730 		put_unaligned_le32(feat_mask, rsp->data);
4731 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4732 			       buf);
4733 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4734 		u8 buf[12];
4735 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4736 
4737 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4738 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4739 		rsp->data[0] = conn->local_fixed_chan;
4740 		memset(rsp->data + 1, 0, 7);
4741 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4742 			       buf);
4743 	} else {
4744 		struct l2cap_info_rsp rsp;
4745 		rsp.type   = cpu_to_le16(type);
4746 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4747 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4748 			       &rsp);
4749 	}
4750 
4751 	return 0;
4752 }
4753 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4754 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4755 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4756 					u8 *data)
4757 {
4758 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4759 	u16 type, result;
4760 
4761 	if (cmd_len < sizeof(*rsp))
4762 		return -EPROTO;
4763 
4764 	type   = __le16_to_cpu(rsp->type);
4765 	result = __le16_to_cpu(rsp->result);
4766 
4767 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4768 
4769 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4770 	if (cmd->ident != conn->info_ident ||
4771 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4772 		return 0;
4773 
4774 	cancel_delayed_work(&conn->info_timer);
4775 
4776 	if (result != L2CAP_IR_SUCCESS) {
4777 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4778 		conn->info_ident = 0;
4779 
4780 		l2cap_conn_start(conn);
4781 
4782 		return 0;
4783 	}
4784 
4785 	switch (type) {
4786 	case L2CAP_IT_FEAT_MASK:
4787 		conn->feat_mask = get_unaligned_le32(rsp->data);
4788 
4789 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4790 			struct l2cap_info_req req;
4791 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4792 
4793 			conn->info_ident = l2cap_get_ident(conn);
4794 
4795 			l2cap_send_cmd(conn, conn->info_ident,
4796 				       L2CAP_INFO_REQ, sizeof(req), &req);
4797 		} else {
4798 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4799 			conn->info_ident = 0;
4800 
4801 			l2cap_conn_start(conn);
4802 		}
4803 		break;
4804 
4805 	case L2CAP_IT_FIXED_CHAN:
4806 		conn->remote_fixed_chan = rsp->data[0];
4807 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4808 		conn->info_ident = 0;
4809 
4810 		l2cap_conn_start(conn);
4811 		break;
4812 	}
4813 
4814 	return 0;
4815 }
4816 
l2cap_create_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4817 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4818 				    struct l2cap_cmd_hdr *cmd,
4819 				    u16 cmd_len, void *data)
4820 {
4821 	struct l2cap_create_chan_req *req = data;
4822 	struct l2cap_create_chan_rsp rsp;
4823 	struct l2cap_chan *chan;
4824 	struct hci_dev *hdev;
4825 	u16 psm, scid;
4826 
4827 	if (cmd_len != sizeof(*req))
4828 		return -EPROTO;
4829 
4830 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4831 		return -EINVAL;
4832 
4833 	psm = le16_to_cpu(req->psm);
4834 	scid = le16_to_cpu(req->scid);
4835 
4836 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4837 
4838 	/* For controller id 0 make BR/EDR connection */
4839 	if (req->amp_id == AMP_ID_BREDR) {
4840 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4841 			      req->amp_id);
4842 		return 0;
4843 	}
4844 
4845 	/* Validate AMP controller id */
4846 	hdev = hci_dev_get(req->amp_id);
4847 	if (!hdev)
4848 		goto error;
4849 
4850 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4851 		hci_dev_put(hdev);
4852 		goto error;
4853 	}
4854 
4855 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4856 			     req->amp_id);
4857 	if (chan) {
4858 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4859 		struct hci_conn *hs_hcon;
4860 
4861 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4862 						  &conn->hcon->dst);
4863 		if (!hs_hcon) {
4864 			hci_dev_put(hdev);
4865 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4866 					       chan->dcid);
4867 			return 0;
4868 		}
4869 
4870 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4871 
4872 		mgr->bredr_chan = chan;
4873 		chan->hs_hcon = hs_hcon;
4874 		chan->fcs = L2CAP_FCS_NONE;
4875 		conn->mtu = hdev->block_mtu;
4876 	}
4877 
4878 	hci_dev_put(hdev);
4879 
4880 	return 0;
4881 
4882 error:
4883 	rsp.dcid = 0;
4884 	rsp.scid = cpu_to_le16(scid);
4885 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4886 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4887 
4888 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4889 		       sizeof(rsp), &rsp);
4890 
4891 	return 0;
4892 }
4893 
l2cap_send_move_chan_req(struct l2cap_chan * chan,u8 dest_amp_id)4894 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4895 {
4896 	struct l2cap_move_chan_req req;
4897 	u8 ident;
4898 
4899 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4900 
4901 	ident = l2cap_get_ident(chan->conn);
4902 	chan->ident = ident;
4903 
4904 	req.icid = cpu_to_le16(chan->scid);
4905 	req.dest_amp_id = dest_amp_id;
4906 
4907 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4908 		       &req);
4909 
4910 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4911 }
4912 
l2cap_send_move_chan_rsp(struct l2cap_chan * chan,u16 result)4913 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4914 {
4915 	struct l2cap_move_chan_rsp rsp;
4916 
4917 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4918 
4919 	rsp.icid = cpu_to_le16(chan->dcid);
4920 	rsp.result = cpu_to_le16(result);
4921 
4922 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4923 		       sizeof(rsp), &rsp);
4924 }
4925 
l2cap_send_move_chan_cfm(struct l2cap_chan * chan,u16 result)4926 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4927 {
4928 	struct l2cap_move_chan_cfm cfm;
4929 
4930 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4931 
4932 	chan->ident = l2cap_get_ident(chan->conn);
4933 
4934 	cfm.icid = cpu_to_le16(chan->scid);
4935 	cfm.result = cpu_to_le16(result);
4936 
4937 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4938 		       sizeof(cfm), &cfm);
4939 
4940 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4941 }
4942 
l2cap_send_move_chan_cfm_icid(struct l2cap_conn * conn,u16 icid)4943 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4944 {
4945 	struct l2cap_move_chan_cfm cfm;
4946 
4947 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4948 
4949 	cfm.icid = cpu_to_le16(icid);
4950 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4951 
4952 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4953 		       sizeof(cfm), &cfm);
4954 }
4955 
l2cap_send_move_chan_cfm_rsp(struct l2cap_conn * conn,u8 ident,u16 icid)4956 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4957 					 u16 icid)
4958 {
4959 	struct l2cap_move_chan_cfm_rsp rsp;
4960 
4961 	BT_DBG("icid 0x%4.4x", icid);
4962 
4963 	rsp.icid = cpu_to_le16(icid);
4964 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4965 }
4966 
__release_logical_link(struct l2cap_chan * chan)4967 static void __release_logical_link(struct l2cap_chan *chan)
4968 {
4969 	chan->hs_hchan = NULL;
4970 	chan->hs_hcon = NULL;
4971 
4972 	/* Placeholder - release the logical link */
4973 }
4974 
l2cap_logical_fail(struct l2cap_chan * chan)4975 static void l2cap_logical_fail(struct l2cap_chan *chan)
4976 {
4977 	/* Logical link setup failed */
4978 	if (chan->state != BT_CONNECTED) {
4979 		/* Create channel failure, disconnect */
4980 		l2cap_send_disconn_req(chan, ECONNRESET);
4981 		return;
4982 	}
4983 
4984 	switch (chan->move_role) {
4985 	case L2CAP_MOVE_ROLE_RESPONDER:
4986 		l2cap_move_done(chan);
4987 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4988 		break;
4989 	case L2CAP_MOVE_ROLE_INITIATOR:
4990 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4991 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4992 			/* Remote has only sent pending or
4993 			 * success responses, clean up
4994 			 */
4995 			l2cap_move_done(chan);
4996 		}
4997 
4998 		/* Other amp move states imply that the move
4999 		 * has already aborted
5000 		 */
5001 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5002 		break;
5003 	}
5004 }
5005 
l2cap_logical_finish_create(struct l2cap_chan * chan,struct hci_chan * hchan)5006 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5007 					struct hci_chan *hchan)
5008 {
5009 	struct l2cap_conf_rsp rsp;
5010 
5011 	chan->hs_hchan = hchan;
5012 	chan->hs_hcon->l2cap_data = chan->conn;
5013 
5014 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5015 
5016 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5017 		int err;
5018 
5019 		set_default_fcs(chan);
5020 
5021 		err = l2cap_ertm_init(chan);
5022 		if (err < 0)
5023 			l2cap_send_disconn_req(chan, -err);
5024 		else
5025 			l2cap_chan_ready(chan);
5026 	}
5027 }
5028 
l2cap_logical_finish_move(struct l2cap_chan * chan,struct hci_chan * hchan)5029 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5030 				      struct hci_chan *hchan)
5031 {
5032 	chan->hs_hcon = hchan->conn;
5033 	chan->hs_hcon->l2cap_data = chan->conn;
5034 
5035 	BT_DBG("move_state %d", chan->move_state);
5036 
5037 	switch (chan->move_state) {
5038 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5039 		/* Move confirm will be sent after a success
5040 		 * response is received
5041 		 */
5042 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5043 		break;
5044 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5045 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5046 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5047 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5048 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5049 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5050 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5051 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5052 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5053 		}
5054 		break;
5055 	default:
5056 		/* Move was not in expected state, free the channel */
5057 		__release_logical_link(chan);
5058 
5059 		chan->move_state = L2CAP_MOVE_STABLE;
5060 	}
5061 }
5062 
5063 /* Call with chan locked */
l2cap_logical_cfm(struct l2cap_chan * chan,struct hci_chan * hchan,u8 status)5064 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5065 		       u8 status)
5066 {
5067 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5068 
5069 	if (status) {
5070 		l2cap_logical_fail(chan);
5071 		__release_logical_link(chan);
5072 		return;
5073 	}
5074 
5075 	if (chan->state != BT_CONNECTED) {
5076 		/* Ignore logical link if channel is on BR/EDR */
5077 		if (chan->local_amp_id != AMP_ID_BREDR)
5078 			l2cap_logical_finish_create(chan, hchan);
5079 	} else {
5080 		l2cap_logical_finish_move(chan, hchan);
5081 	}
5082 }
5083 
l2cap_move_start(struct l2cap_chan * chan)5084 void l2cap_move_start(struct l2cap_chan *chan)
5085 {
5086 	BT_DBG("chan %p", chan);
5087 
5088 	if (chan->local_amp_id == AMP_ID_BREDR) {
5089 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5090 			return;
5091 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5092 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5093 		/* Placeholder - start physical link setup */
5094 	} else {
5095 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5096 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5097 		chan->move_id = 0;
5098 		l2cap_move_setup(chan);
5099 		l2cap_send_move_chan_req(chan, 0);
5100 	}
5101 }
5102 
l2cap_do_create(struct l2cap_chan * chan,int result,u8 local_amp_id,u8 remote_amp_id)5103 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5104 			    u8 local_amp_id, u8 remote_amp_id)
5105 {
5106 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5107 	       local_amp_id, remote_amp_id);
5108 
5109 	chan->fcs = L2CAP_FCS_NONE;
5110 
5111 	/* Outgoing channel on AMP */
5112 	if (chan->state == BT_CONNECT) {
5113 		if (result == L2CAP_CR_SUCCESS) {
5114 			chan->local_amp_id = local_amp_id;
5115 			l2cap_send_create_chan_req(chan, remote_amp_id);
5116 		} else {
5117 			/* Revert to BR/EDR connect */
5118 			l2cap_send_conn_req(chan);
5119 		}
5120 
5121 		return;
5122 	}
5123 
5124 	/* Incoming channel on AMP */
5125 	if (__l2cap_no_conn_pending(chan)) {
5126 		struct l2cap_conn_rsp rsp;
5127 		char buf[128];
5128 		rsp.scid = cpu_to_le16(chan->dcid);
5129 		rsp.dcid = cpu_to_le16(chan->scid);
5130 
5131 		if (result == L2CAP_CR_SUCCESS) {
5132 			/* Send successful response */
5133 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5134 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5135 		} else {
5136 			/* Send negative response */
5137 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5138 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5139 		}
5140 
5141 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5142 			       sizeof(rsp), &rsp);
5143 
5144 		if (result == L2CAP_CR_SUCCESS) {
5145 			l2cap_state_change(chan, BT_CONFIG);
5146 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5147 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5148 				       L2CAP_CONF_REQ,
5149 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5150 			chan->num_conf_req++;
5151 		}
5152 	}
5153 }
5154 
l2cap_do_move_initiate(struct l2cap_chan * chan,u8 local_amp_id,u8 remote_amp_id)5155 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5156 				   u8 remote_amp_id)
5157 {
5158 	l2cap_move_setup(chan);
5159 	chan->move_id = local_amp_id;
5160 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5161 
5162 	l2cap_send_move_chan_req(chan, remote_amp_id);
5163 }
5164 
l2cap_do_move_respond(struct l2cap_chan * chan,int result)5165 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5166 {
5167 	struct hci_chan *hchan = NULL;
5168 
5169 	/* Placeholder - get hci_chan for logical link */
5170 
5171 	if (hchan) {
5172 		if (hchan->state == BT_CONNECTED) {
5173 			/* Logical link is ready to go */
5174 			chan->hs_hcon = hchan->conn;
5175 			chan->hs_hcon->l2cap_data = chan->conn;
5176 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5177 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5178 
5179 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5180 		} else {
5181 			/* Wait for logical link to be ready */
5182 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5183 		}
5184 	} else {
5185 		/* Logical link not available */
5186 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5187 	}
5188 }
5189 
l2cap_do_move_cancel(struct l2cap_chan * chan,int result)5190 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5191 {
5192 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5193 		u8 rsp_result;
5194 		if (result == -EINVAL)
5195 			rsp_result = L2CAP_MR_BAD_ID;
5196 		else
5197 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5198 
5199 		l2cap_send_move_chan_rsp(chan, rsp_result);
5200 	}
5201 
5202 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5203 	chan->move_state = L2CAP_MOVE_STABLE;
5204 
5205 	/* Restart data transmission */
5206 	l2cap_ertm_send(chan);
5207 }
5208 
5209 /* Invoke with locked chan */
__l2cap_physical_cfm(struct l2cap_chan * chan,int result)5210 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5211 {
5212 	u8 local_amp_id = chan->local_amp_id;
5213 	u8 remote_amp_id = chan->remote_amp_id;
5214 
5215 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5216 	       chan, result, local_amp_id, remote_amp_id);
5217 
5218 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5219 		return;
5220 
5221 	if (chan->state != BT_CONNECTED) {
5222 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5223 	} else if (result != L2CAP_MR_SUCCESS) {
5224 		l2cap_do_move_cancel(chan, result);
5225 	} else {
5226 		switch (chan->move_role) {
5227 		case L2CAP_MOVE_ROLE_INITIATOR:
5228 			l2cap_do_move_initiate(chan, local_amp_id,
5229 					       remote_amp_id);
5230 			break;
5231 		case L2CAP_MOVE_ROLE_RESPONDER:
5232 			l2cap_do_move_respond(chan, result);
5233 			break;
5234 		default:
5235 			l2cap_do_move_cancel(chan, result);
5236 			break;
5237 		}
5238 	}
5239 }
5240 
l2cap_move_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5241 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5242 					 struct l2cap_cmd_hdr *cmd,
5243 					 u16 cmd_len, void *data)
5244 {
5245 	struct l2cap_move_chan_req *req = data;
5246 	struct l2cap_move_chan_rsp rsp;
5247 	struct l2cap_chan *chan;
5248 	u16 icid = 0;
5249 	u16 result = L2CAP_MR_NOT_ALLOWED;
5250 
5251 	if (cmd_len != sizeof(*req))
5252 		return -EPROTO;
5253 
5254 	icid = le16_to_cpu(req->icid);
5255 
5256 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5257 
5258 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5259 		return -EINVAL;
5260 
5261 	chan = l2cap_get_chan_by_dcid(conn, icid);
5262 	if (!chan) {
5263 		rsp.icid = cpu_to_le16(icid);
5264 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5265 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5266 			       sizeof(rsp), &rsp);
5267 		return 0;
5268 	}
5269 
5270 	chan->ident = cmd->ident;
5271 
5272 	if (chan->scid < L2CAP_CID_DYN_START ||
5273 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5274 	    (chan->mode != L2CAP_MODE_ERTM &&
5275 	     chan->mode != L2CAP_MODE_STREAMING)) {
5276 		result = L2CAP_MR_NOT_ALLOWED;
5277 		goto send_move_response;
5278 	}
5279 
5280 	if (chan->local_amp_id == req->dest_amp_id) {
5281 		result = L2CAP_MR_SAME_ID;
5282 		goto send_move_response;
5283 	}
5284 
5285 	if (req->dest_amp_id != AMP_ID_BREDR) {
5286 		struct hci_dev *hdev;
5287 		hdev = hci_dev_get(req->dest_amp_id);
5288 		if (!hdev || hdev->dev_type != HCI_AMP ||
5289 		    !test_bit(HCI_UP, &hdev->flags)) {
5290 			if (hdev)
5291 				hci_dev_put(hdev);
5292 
5293 			result = L2CAP_MR_BAD_ID;
5294 			goto send_move_response;
5295 		}
5296 		hci_dev_put(hdev);
5297 	}
5298 
5299 	/* Detect a move collision.  Only send a collision response
5300 	 * if this side has "lost", otherwise proceed with the move.
5301 	 * The winner has the larger bd_addr.
5302 	 */
5303 	if ((__chan_is_moving(chan) ||
5304 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5305 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5306 		result = L2CAP_MR_COLLISION;
5307 		goto send_move_response;
5308 	}
5309 
5310 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5311 	l2cap_move_setup(chan);
5312 	chan->move_id = req->dest_amp_id;
5313 
5314 	if (req->dest_amp_id == AMP_ID_BREDR) {
5315 		/* Moving to BR/EDR */
5316 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5317 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5318 			result = L2CAP_MR_PEND;
5319 		} else {
5320 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5321 			result = L2CAP_MR_SUCCESS;
5322 		}
5323 	} else {
5324 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5325 		/* Placeholder - uncomment when amp functions are available */
5326 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5327 		result = L2CAP_MR_PEND;
5328 	}
5329 
5330 send_move_response:
5331 	l2cap_send_move_chan_rsp(chan, result);
5332 
5333 	l2cap_chan_unlock(chan);
5334 	l2cap_chan_put(chan);
5335 
5336 	return 0;
5337 }
5338 
l2cap_move_continue(struct l2cap_conn * conn,u16 icid,u16 result)5339 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5340 {
5341 	struct l2cap_chan *chan;
5342 	struct hci_chan *hchan = NULL;
5343 
5344 	chan = l2cap_get_chan_by_scid(conn, icid);
5345 	if (!chan) {
5346 		l2cap_send_move_chan_cfm_icid(conn, icid);
5347 		return;
5348 	}
5349 
5350 	__clear_chan_timer(chan);
5351 	if (result == L2CAP_MR_PEND)
5352 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5353 
5354 	switch (chan->move_state) {
5355 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5356 		/* Move confirm will be sent when logical link
5357 		 * is complete.
5358 		 */
5359 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5360 		break;
5361 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5362 		if (result == L2CAP_MR_PEND) {
5363 			break;
5364 		} else if (test_bit(CONN_LOCAL_BUSY,
5365 				    &chan->conn_state)) {
5366 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5367 		} else {
5368 			/* Logical link is up or moving to BR/EDR,
5369 			 * proceed with move
5370 			 */
5371 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5372 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5373 		}
5374 		break;
5375 	case L2CAP_MOVE_WAIT_RSP:
5376 		/* Moving to AMP */
5377 		if (result == L2CAP_MR_SUCCESS) {
5378 			/* Remote is ready, send confirm immediately
5379 			 * after logical link is ready
5380 			 */
5381 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5382 		} else {
5383 			/* Both logical link and move success
5384 			 * are required to confirm
5385 			 */
5386 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5387 		}
5388 
5389 		/* Placeholder - get hci_chan for logical link */
5390 		if (!hchan) {
5391 			/* Logical link not available */
5392 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5393 			break;
5394 		}
5395 
5396 		/* If the logical link is not yet connected, do not
5397 		 * send confirmation.
5398 		 */
5399 		if (hchan->state != BT_CONNECTED)
5400 			break;
5401 
5402 		/* Logical link is already ready to go */
5403 
5404 		chan->hs_hcon = hchan->conn;
5405 		chan->hs_hcon->l2cap_data = chan->conn;
5406 
5407 		if (result == L2CAP_MR_SUCCESS) {
5408 			/* Can confirm now */
5409 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5410 		} else {
5411 			/* Now only need move success
5412 			 * to confirm
5413 			 */
5414 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5415 		}
5416 
5417 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5418 		break;
5419 	default:
5420 		/* Any other amp move state means the move failed. */
5421 		chan->move_id = chan->local_amp_id;
5422 		l2cap_move_done(chan);
5423 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5424 	}
5425 
5426 	l2cap_chan_unlock(chan);
5427 	l2cap_chan_put(chan);
5428 }
5429 
l2cap_move_fail(struct l2cap_conn * conn,u8 ident,u16 icid,u16 result)5430 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5431 			    u16 result)
5432 {
5433 	struct l2cap_chan *chan;
5434 
5435 	chan = l2cap_get_chan_by_ident(conn, ident);
5436 	if (!chan) {
5437 		/* Could not locate channel, icid is best guess */
5438 		l2cap_send_move_chan_cfm_icid(conn, icid);
5439 		return;
5440 	}
5441 
5442 	__clear_chan_timer(chan);
5443 
5444 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5445 		if (result == L2CAP_MR_COLLISION) {
5446 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5447 		} else {
5448 			/* Cleanup - cancel move */
5449 			chan->move_id = chan->local_amp_id;
5450 			l2cap_move_done(chan);
5451 		}
5452 	}
5453 
5454 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5455 
5456 	l2cap_chan_unlock(chan);
5457 	l2cap_chan_put(chan);
5458 }
5459 
l2cap_move_channel_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5460 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5461 				  struct l2cap_cmd_hdr *cmd,
5462 				  u16 cmd_len, void *data)
5463 {
5464 	struct l2cap_move_chan_rsp *rsp = data;
5465 	u16 icid, result;
5466 
5467 	if (cmd_len != sizeof(*rsp))
5468 		return -EPROTO;
5469 
5470 	icid = le16_to_cpu(rsp->icid);
5471 	result = le16_to_cpu(rsp->result);
5472 
5473 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5474 
5475 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5476 		l2cap_move_continue(conn, icid, result);
5477 	else
5478 		l2cap_move_fail(conn, cmd->ident, icid, result);
5479 
5480 	return 0;
5481 }
5482 
l2cap_move_channel_confirm(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5483 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5484 				      struct l2cap_cmd_hdr *cmd,
5485 				      u16 cmd_len, void *data)
5486 {
5487 	struct l2cap_move_chan_cfm *cfm = data;
5488 	struct l2cap_chan *chan;
5489 	u16 icid, result;
5490 
5491 	if (cmd_len != sizeof(*cfm))
5492 		return -EPROTO;
5493 
5494 	icid = le16_to_cpu(cfm->icid);
5495 	result = le16_to_cpu(cfm->result);
5496 
5497 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5498 
5499 	chan = l2cap_get_chan_by_dcid(conn, icid);
5500 	if (!chan) {
5501 		/* Spec requires a response even if the icid was not found */
5502 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5503 		return 0;
5504 	}
5505 
5506 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5507 		if (result == L2CAP_MC_CONFIRMED) {
5508 			chan->local_amp_id = chan->move_id;
5509 			if (chan->local_amp_id == AMP_ID_BREDR)
5510 				__release_logical_link(chan);
5511 		} else {
5512 			chan->move_id = chan->local_amp_id;
5513 		}
5514 
5515 		l2cap_move_done(chan);
5516 	}
5517 
5518 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5519 
5520 	l2cap_chan_unlock(chan);
5521 	l2cap_chan_put(chan);
5522 
5523 	return 0;
5524 }
5525 
l2cap_move_channel_confirm_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5526 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5527 						 struct l2cap_cmd_hdr *cmd,
5528 						 u16 cmd_len, void *data)
5529 {
5530 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5531 	struct l2cap_chan *chan;
5532 	u16 icid;
5533 
5534 	if (cmd_len != sizeof(*rsp))
5535 		return -EPROTO;
5536 
5537 	icid = le16_to_cpu(rsp->icid);
5538 
5539 	BT_DBG("icid 0x%4.4x", icid);
5540 
5541 	chan = l2cap_get_chan_by_scid(conn, icid);
5542 	if (!chan)
5543 		return 0;
5544 
5545 	__clear_chan_timer(chan);
5546 
5547 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5548 		chan->local_amp_id = chan->move_id;
5549 
5550 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5551 			__release_logical_link(chan);
5552 
5553 		l2cap_move_done(chan);
5554 	}
5555 
5556 	l2cap_chan_unlock(chan);
5557 	l2cap_chan_put(chan);
5558 
5559 	return 0;
5560 }
5561 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5562 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5563 					      struct l2cap_cmd_hdr *cmd,
5564 					      u16 cmd_len, u8 *data)
5565 {
5566 	struct hci_conn *hcon = conn->hcon;
5567 	struct l2cap_conn_param_update_req *req;
5568 	struct l2cap_conn_param_update_rsp rsp;
5569 	u16 min, max, latency, to_multiplier;
5570 	int err;
5571 
5572 	if (hcon->role != HCI_ROLE_MASTER)
5573 		return -EINVAL;
5574 
5575 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5576 		return -EPROTO;
5577 
5578 	req = (struct l2cap_conn_param_update_req *) data;
5579 	min		= __le16_to_cpu(req->min);
5580 	max		= __le16_to_cpu(req->max);
5581 	latency		= __le16_to_cpu(req->latency);
5582 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5583 
5584 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5585 	       min, max, latency, to_multiplier);
5586 
5587 	memset(&rsp, 0, sizeof(rsp));
5588 
5589 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5590 	if (err)
5591 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5592 	else
5593 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5594 
5595 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5596 		       sizeof(rsp), &rsp);
5597 
5598 	if (!err) {
5599 		u8 store_hint;
5600 
5601 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5602 						to_multiplier);
5603 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5604 				    store_hint, min, max, latency,
5605 				    to_multiplier);
5606 
5607 	}
5608 
5609 	return 0;
5610 }
5611 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5612 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5613 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5614 				u8 *data)
5615 {
5616 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5617 	struct hci_conn *hcon = conn->hcon;
5618 	u16 dcid, mtu, mps, credits, result;
5619 	struct l2cap_chan *chan;
5620 	int err, sec_level;
5621 
5622 	if (cmd_len < sizeof(*rsp))
5623 		return -EPROTO;
5624 
5625 	dcid    = __le16_to_cpu(rsp->dcid);
5626 	mtu     = __le16_to_cpu(rsp->mtu);
5627 	mps     = __le16_to_cpu(rsp->mps);
5628 	credits = __le16_to_cpu(rsp->credits);
5629 	result  = __le16_to_cpu(rsp->result);
5630 
5631 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5632 					   dcid < L2CAP_CID_DYN_START ||
5633 					   dcid > L2CAP_CID_LE_DYN_END))
5634 		return -EPROTO;
5635 
5636 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5637 	       dcid, mtu, mps, credits, result);
5638 
5639 	mutex_lock(&conn->chan_lock);
5640 
5641 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5642 	if (!chan) {
5643 		err = -EBADSLT;
5644 		goto unlock;
5645 	}
5646 
5647 	err = 0;
5648 
5649 	l2cap_chan_lock(chan);
5650 
5651 	switch (result) {
5652 	case L2CAP_CR_LE_SUCCESS:
5653 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5654 			err = -EBADSLT;
5655 			break;
5656 		}
5657 
5658 		chan->ident = 0;
5659 		chan->dcid = dcid;
5660 		chan->omtu = mtu;
5661 		chan->remote_mps = mps;
5662 		chan->tx_credits = credits;
5663 		l2cap_chan_ready(chan);
5664 		break;
5665 
5666 	case L2CAP_CR_LE_AUTHENTICATION:
5667 	case L2CAP_CR_LE_ENCRYPTION:
5668 		/* If we already have MITM protection we can't do
5669 		 * anything.
5670 		 */
5671 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5672 			l2cap_chan_del(chan, ECONNREFUSED);
5673 			break;
5674 		}
5675 
5676 		sec_level = hcon->sec_level + 1;
5677 		if (chan->sec_level < sec_level)
5678 			chan->sec_level = sec_level;
5679 
5680 		/* We'll need to send a new Connect Request */
5681 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5682 
5683 		smp_conn_security(hcon, chan->sec_level);
5684 		break;
5685 
5686 	default:
5687 		l2cap_chan_del(chan, ECONNREFUSED);
5688 		break;
5689 	}
5690 
5691 	l2cap_chan_unlock(chan);
5692 
5693 unlock:
5694 	mutex_unlock(&conn->chan_lock);
5695 
5696 	return err;
5697 }
5698 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5699 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5700 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5701 				      u8 *data)
5702 {
5703 	int err = 0;
5704 
5705 	switch (cmd->code) {
5706 	case L2CAP_COMMAND_REJ:
5707 		l2cap_command_rej(conn, cmd, cmd_len, data);
5708 		break;
5709 
5710 	case L2CAP_CONN_REQ:
5711 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5712 		break;
5713 
5714 	case L2CAP_CONN_RSP:
5715 	case L2CAP_CREATE_CHAN_RSP:
5716 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5717 		break;
5718 
5719 	case L2CAP_CONF_REQ:
5720 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5721 		break;
5722 
5723 	case L2CAP_CONF_RSP:
5724 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5725 		break;
5726 
5727 	case L2CAP_DISCONN_REQ:
5728 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5729 		break;
5730 
5731 	case L2CAP_DISCONN_RSP:
5732 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5733 		break;
5734 
5735 	case L2CAP_ECHO_REQ:
5736 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5737 		break;
5738 
5739 	case L2CAP_ECHO_RSP:
5740 		break;
5741 
5742 	case L2CAP_INFO_REQ:
5743 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5744 		break;
5745 
5746 	case L2CAP_INFO_RSP:
5747 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5748 		break;
5749 
5750 	case L2CAP_CREATE_CHAN_REQ:
5751 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5752 		break;
5753 
5754 	case L2CAP_MOVE_CHAN_REQ:
5755 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5756 		break;
5757 
5758 	case L2CAP_MOVE_CHAN_RSP:
5759 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5760 		break;
5761 
5762 	case L2CAP_MOVE_CHAN_CFM:
5763 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5764 		break;
5765 
5766 	case L2CAP_MOVE_CHAN_CFM_RSP:
5767 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5768 		break;
5769 
5770 	default:
5771 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5772 		err = -EINVAL;
5773 		break;
5774 	}
5775 
5776 	return err;
5777 }
5778 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5779 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5780 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5781 				u8 *data)
5782 {
5783 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5784 	struct l2cap_le_conn_rsp rsp;
5785 	struct l2cap_chan *chan, *pchan;
5786 	u16 dcid, scid, credits, mtu, mps;
5787 	__le16 psm;
5788 	u8 result;
5789 
5790 	if (cmd_len != sizeof(*req))
5791 		return -EPROTO;
5792 
5793 	scid = __le16_to_cpu(req->scid);
5794 	mtu  = __le16_to_cpu(req->mtu);
5795 	mps  = __le16_to_cpu(req->mps);
5796 	psm  = req->psm;
5797 	dcid = 0;
5798 	credits = 0;
5799 
5800 	if (mtu < 23 || mps < 23)
5801 		return -EPROTO;
5802 
5803 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5804 	       scid, mtu, mps);
5805 
5806 	/* Check if we have socket listening on psm */
5807 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5808 					 &conn->hcon->dst, LE_LINK);
5809 	if (!pchan) {
5810 		result = L2CAP_CR_LE_BAD_PSM;
5811 		chan = NULL;
5812 		goto response;
5813 	}
5814 
5815 	mutex_lock(&conn->chan_lock);
5816 	l2cap_chan_lock(pchan);
5817 
5818 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5819 				     SMP_ALLOW_STK)) {
5820 		result = L2CAP_CR_LE_AUTHENTICATION;
5821 		chan = NULL;
5822 		goto response_unlock;
5823 	}
5824 
5825 	/* Check for valid dynamic CID range */
5826 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5827 		result = L2CAP_CR_LE_INVALID_SCID;
5828 		chan = NULL;
5829 		goto response_unlock;
5830 	}
5831 
5832 	/* Check if we already have channel with that dcid */
5833 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5834 		result = L2CAP_CR_LE_SCID_IN_USE;
5835 		chan = NULL;
5836 		goto response_unlock;
5837 	}
5838 
5839 	chan = pchan->ops->new_connection(pchan);
5840 	if (!chan) {
5841 		result = L2CAP_CR_LE_NO_MEM;
5842 		goto response_unlock;
5843 	}
5844 
5845 	bacpy(&chan->src, &conn->hcon->src);
5846 	bacpy(&chan->dst, &conn->hcon->dst);
5847 	chan->src_type = bdaddr_src_type(conn->hcon);
5848 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5849 	chan->psm  = psm;
5850 	chan->dcid = scid;
5851 	chan->omtu = mtu;
5852 	chan->remote_mps = mps;
5853 
5854 	__l2cap_chan_add(conn, chan);
5855 
5856 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5857 
5858 	dcid = chan->scid;
5859 	credits = chan->rx_credits;
5860 
5861 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5862 
5863 	chan->ident = cmd->ident;
5864 
5865 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5866 		l2cap_state_change(chan, BT_CONNECT2);
5867 		/* The following result value is actually not defined
5868 		 * for LE CoC but we use it to let the function know
5869 		 * that it should bail out after doing its cleanup
5870 		 * instead of sending a response.
5871 		 */
5872 		result = L2CAP_CR_PEND;
5873 		chan->ops->defer(chan);
5874 	} else {
5875 		l2cap_chan_ready(chan);
5876 		result = L2CAP_CR_LE_SUCCESS;
5877 	}
5878 
5879 response_unlock:
5880 	l2cap_chan_unlock(pchan);
5881 	mutex_unlock(&conn->chan_lock);
5882 	l2cap_chan_put(pchan);
5883 
5884 	if (result == L2CAP_CR_PEND)
5885 		return 0;
5886 
5887 response:
5888 	if (chan) {
5889 		rsp.mtu = cpu_to_le16(chan->imtu);
5890 		rsp.mps = cpu_to_le16(chan->mps);
5891 	} else {
5892 		rsp.mtu = 0;
5893 		rsp.mps = 0;
5894 	}
5895 
5896 	rsp.dcid    = cpu_to_le16(dcid);
5897 	rsp.credits = cpu_to_le16(credits);
5898 	rsp.result  = cpu_to_le16(result);
5899 
5900 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5901 
5902 	return 0;
5903 }
5904 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5905 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5906 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5907 				   u8 *data)
5908 {
5909 	struct l2cap_le_credits *pkt;
5910 	struct l2cap_chan *chan;
5911 	u16 cid, credits, max_credits;
5912 
5913 	if (cmd_len != sizeof(*pkt))
5914 		return -EPROTO;
5915 
5916 	pkt = (struct l2cap_le_credits *) data;
5917 	cid	= __le16_to_cpu(pkt->cid);
5918 	credits	= __le16_to_cpu(pkt->credits);
5919 
5920 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5921 
5922 	chan = l2cap_get_chan_by_dcid(conn, cid);
5923 	if (!chan)
5924 		return -EBADSLT;
5925 
5926 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5927 	if (credits > max_credits) {
5928 		BT_ERR("LE credits overflow");
5929 		l2cap_send_disconn_req(chan, ECONNRESET);
5930 
5931 		/* Return 0 so that we don't trigger an unnecessary
5932 		 * command reject packet.
5933 		 */
5934 		goto unlock;
5935 	}
5936 
5937 	chan->tx_credits += credits;
5938 
5939 	/* Resume sending */
5940 	l2cap_le_flowctl_send(chan);
5941 
5942 	if (chan->tx_credits)
5943 		chan->ops->resume(chan);
5944 
5945 unlock:
5946 	l2cap_chan_unlock(chan);
5947 	l2cap_chan_put(chan);
5948 
5949 	return 0;
5950 }
5951 
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5952 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5953 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5954 				       u8 *data)
5955 {
5956 	struct l2cap_ecred_conn_req *req = (void *) data;
5957 	struct {
5958 		struct l2cap_ecred_conn_rsp rsp;
5959 		__le16 dcid[L2CAP_ECRED_MAX_CID];
5960 	} __packed pdu;
5961 	struct l2cap_chan *chan, *pchan;
5962 	u16 mtu, mps;
5963 	__le16 psm;
5964 	u8 result, len = 0;
5965 	int i, num_scid;
5966 	bool defer = false;
5967 
5968 	if (!enable_ecred)
5969 		return -EINVAL;
5970 
5971 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5972 		result = L2CAP_CR_LE_INVALID_PARAMS;
5973 		goto response;
5974 	}
5975 
5976 	cmd_len -= sizeof(*req);
5977 	num_scid = cmd_len / sizeof(u16);
5978 
5979 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
5980 		result = L2CAP_CR_LE_INVALID_PARAMS;
5981 		goto response;
5982 	}
5983 
5984 	mtu  = __le16_to_cpu(req->mtu);
5985 	mps  = __le16_to_cpu(req->mps);
5986 
5987 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5988 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5989 		goto response;
5990 	}
5991 
5992 	psm  = req->psm;
5993 
5994 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5995 
5996 	memset(&pdu, 0, sizeof(pdu));
5997 
5998 	/* Check if we have socket listening on psm */
5999 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6000 					 &conn->hcon->dst, LE_LINK);
6001 	if (!pchan) {
6002 		result = L2CAP_CR_LE_BAD_PSM;
6003 		goto response;
6004 	}
6005 
6006 	mutex_lock(&conn->chan_lock);
6007 	l2cap_chan_lock(pchan);
6008 
6009 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6010 				     SMP_ALLOW_STK)) {
6011 		result = L2CAP_CR_LE_AUTHENTICATION;
6012 		goto unlock;
6013 	}
6014 
6015 	result = L2CAP_CR_LE_SUCCESS;
6016 
6017 	for (i = 0; i < num_scid; i++) {
6018 		u16 scid = __le16_to_cpu(req->scid[i]);
6019 
6020 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6021 
6022 		pdu.dcid[i] = 0x0000;
6023 		len += sizeof(*pdu.dcid);
6024 
6025 		/* Check for valid dynamic CID range */
6026 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6027 			result = L2CAP_CR_LE_INVALID_SCID;
6028 			continue;
6029 		}
6030 
6031 		/* Check if we already have channel with that dcid */
6032 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6033 			result = L2CAP_CR_LE_SCID_IN_USE;
6034 			continue;
6035 		}
6036 
6037 		chan = pchan->ops->new_connection(pchan);
6038 		if (!chan) {
6039 			result = L2CAP_CR_LE_NO_MEM;
6040 			continue;
6041 		}
6042 
6043 		bacpy(&chan->src, &conn->hcon->src);
6044 		bacpy(&chan->dst, &conn->hcon->dst);
6045 		chan->src_type = bdaddr_src_type(conn->hcon);
6046 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6047 		chan->psm  = psm;
6048 		chan->dcid = scid;
6049 		chan->omtu = mtu;
6050 		chan->remote_mps = mps;
6051 
6052 		__l2cap_chan_add(conn, chan);
6053 
6054 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6055 
6056 		/* Init response */
6057 		if (!pdu.rsp.credits) {
6058 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6059 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6060 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6061 		}
6062 
6063 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6064 
6065 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6066 
6067 		chan->ident = cmd->ident;
6068 
6069 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6070 			l2cap_state_change(chan, BT_CONNECT2);
6071 			defer = true;
6072 			chan->ops->defer(chan);
6073 		} else {
6074 			l2cap_chan_ready(chan);
6075 		}
6076 	}
6077 
6078 unlock:
6079 	l2cap_chan_unlock(pchan);
6080 	mutex_unlock(&conn->chan_lock);
6081 	l2cap_chan_put(pchan);
6082 
6083 response:
6084 	pdu.rsp.result = cpu_to_le16(result);
6085 
6086 	if (defer)
6087 		return 0;
6088 
6089 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6090 		       sizeof(pdu.rsp) + len, &pdu);
6091 
6092 	return 0;
6093 }
6094 
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6095 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6096 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6097 				       u8 *data)
6098 {
6099 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6100 	struct hci_conn *hcon = conn->hcon;
6101 	u16 mtu, mps, credits, result;
6102 	struct l2cap_chan *chan, *tmp;
6103 	int err = 0, sec_level;
6104 	int i = 0;
6105 
6106 	if (cmd_len < sizeof(*rsp))
6107 		return -EPROTO;
6108 
6109 	mtu     = __le16_to_cpu(rsp->mtu);
6110 	mps     = __le16_to_cpu(rsp->mps);
6111 	credits = __le16_to_cpu(rsp->credits);
6112 	result  = __le16_to_cpu(rsp->result);
6113 
6114 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6115 	       result);
6116 
6117 	mutex_lock(&conn->chan_lock);
6118 
6119 	cmd_len -= sizeof(*rsp);
6120 
6121 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6122 		u16 dcid;
6123 
6124 		if (chan->ident != cmd->ident ||
6125 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6126 		    chan->state == BT_CONNECTED)
6127 			continue;
6128 
6129 		l2cap_chan_lock(chan);
6130 
6131 		/* Check that there is a dcid for each pending channel */
6132 		if (cmd_len < sizeof(dcid)) {
6133 			l2cap_chan_del(chan, ECONNREFUSED);
6134 			l2cap_chan_unlock(chan);
6135 			continue;
6136 		}
6137 
6138 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6139 		cmd_len -= sizeof(u16);
6140 
6141 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6142 
6143 		/* Check if dcid is already in use */
6144 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6145 			/* If a device receives a
6146 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6147 			 * already-assigned Destination CID, then both the
6148 			 * original channel and the new channel shall be
6149 			 * immediately discarded and not used.
6150 			 */
6151 			l2cap_chan_del(chan, ECONNREFUSED);
6152 			l2cap_chan_unlock(chan);
6153 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6154 			l2cap_chan_lock(chan);
6155 			l2cap_chan_del(chan, ECONNRESET);
6156 			l2cap_chan_unlock(chan);
6157 			continue;
6158 		}
6159 
6160 		switch (result) {
6161 		case L2CAP_CR_LE_AUTHENTICATION:
6162 		case L2CAP_CR_LE_ENCRYPTION:
6163 			/* If we already have MITM protection we can't do
6164 			 * anything.
6165 			 */
6166 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6167 				l2cap_chan_del(chan, ECONNREFUSED);
6168 				break;
6169 			}
6170 
6171 			sec_level = hcon->sec_level + 1;
6172 			if (chan->sec_level < sec_level)
6173 				chan->sec_level = sec_level;
6174 
6175 			/* We'll need to send a new Connect Request */
6176 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6177 
6178 			smp_conn_security(hcon, chan->sec_level);
6179 			break;
6180 
6181 		case L2CAP_CR_LE_BAD_PSM:
6182 			l2cap_chan_del(chan, ECONNREFUSED);
6183 			break;
6184 
6185 		default:
6186 			/* If dcid was not set it means channels was refused */
6187 			if (!dcid) {
6188 				l2cap_chan_del(chan, ECONNREFUSED);
6189 				break;
6190 			}
6191 
6192 			chan->ident = 0;
6193 			chan->dcid = dcid;
6194 			chan->omtu = mtu;
6195 			chan->remote_mps = mps;
6196 			chan->tx_credits = credits;
6197 			l2cap_chan_ready(chan);
6198 			break;
6199 		}
6200 
6201 		l2cap_chan_unlock(chan);
6202 	}
6203 
6204 	mutex_unlock(&conn->chan_lock);
6205 
6206 	return err;
6207 }
6208 
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6209 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6210 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6211 					 u8 *data)
6212 {
6213 	struct l2cap_ecred_reconf_req *req = (void *) data;
6214 	struct l2cap_ecred_reconf_rsp rsp;
6215 	u16 mtu, mps, result;
6216 	struct l2cap_chan *chan;
6217 	int i, num_scid;
6218 
6219 	if (!enable_ecred)
6220 		return -EINVAL;
6221 
6222 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6223 		result = L2CAP_CR_LE_INVALID_PARAMS;
6224 		goto respond;
6225 	}
6226 
6227 	mtu = __le16_to_cpu(req->mtu);
6228 	mps = __le16_to_cpu(req->mps);
6229 
6230 	BT_DBG("mtu %u mps %u", mtu, mps);
6231 
6232 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6233 		result = L2CAP_RECONF_INVALID_MTU;
6234 		goto respond;
6235 	}
6236 
6237 	if (mps < L2CAP_ECRED_MIN_MPS) {
6238 		result = L2CAP_RECONF_INVALID_MPS;
6239 		goto respond;
6240 	}
6241 
6242 	cmd_len -= sizeof(*req);
6243 	num_scid = cmd_len / sizeof(u16);
6244 	result = L2CAP_RECONF_SUCCESS;
6245 
6246 	for (i = 0; i < num_scid; i++) {
6247 		u16 scid;
6248 
6249 		scid = __le16_to_cpu(req->scid[i]);
6250 		if (!scid)
6251 			return -EPROTO;
6252 
6253 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6254 		if (!chan)
6255 			continue;
6256 
6257 		/* If the MTU value is decreased for any of the included
6258 		 * channels, then the receiver shall disconnect all
6259 		 * included channels.
6260 		 */
6261 		if (chan->omtu > mtu) {
6262 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6263 			       chan->omtu, mtu);
6264 			result = L2CAP_RECONF_INVALID_MTU;
6265 		}
6266 
6267 		chan->omtu = mtu;
6268 		chan->remote_mps = mps;
6269 	}
6270 
6271 respond:
6272 	rsp.result = cpu_to_le16(result);
6273 
6274 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6275 		       &rsp);
6276 
6277 	return 0;
6278 }
6279 
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6280 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6281 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6282 					 u8 *data)
6283 {
6284 	struct l2cap_chan *chan, *tmp;
6285 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6286 	u16 result;
6287 
6288 	if (cmd_len < sizeof(*rsp))
6289 		return -EPROTO;
6290 
6291 	result = __le16_to_cpu(rsp->result);
6292 
6293 	BT_DBG("result 0x%4.4x", rsp->result);
6294 
6295 	if (!result)
6296 		return 0;
6297 
6298 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6299 		if (chan->ident != cmd->ident)
6300 			continue;
6301 
6302 		l2cap_chan_del(chan, ECONNRESET);
6303 	}
6304 
6305 	return 0;
6306 }
6307 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6308 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6309 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6310 				       u8 *data)
6311 {
6312 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6313 	struct l2cap_chan *chan;
6314 
6315 	if (cmd_len < sizeof(*rej))
6316 		return -EPROTO;
6317 
6318 	mutex_lock(&conn->chan_lock);
6319 
6320 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6321 	if (!chan)
6322 		goto done;
6323 
6324 	l2cap_chan_lock(chan);
6325 	l2cap_chan_del(chan, ECONNREFUSED);
6326 	l2cap_chan_unlock(chan);
6327 
6328 done:
6329 	mutex_unlock(&conn->chan_lock);
6330 	return 0;
6331 }
6332 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6333 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6334 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6335 				   u8 *data)
6336 {
6337 	int err = 0;
6338 
6339 	switch (cmd->code) {
6340 	case L2CAP_COMMAND_REJ:
6341 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6342 		break;
6343 
6344 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6345 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6346 		break;
6347 
6348 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6349 		break;
6350 
6351 	case L2CAP_LE_CONN_RSP:
6352 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6353 		break;
6354 
6355 	case L2CAP_LE_CONN_REQ:
6356 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6357 		break;
6358 
6359 	case L2CAP_LE_CREDITS:
6360 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6361 		break;
6362 
6363 	case L2CAP_ECRED_CONN_REQ:
6364 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6365 		break;
6366 
6367 	case L2CAP_ECRED_CONN_RSP:
6368 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6369 		break;
6370 
6371 	case L2CAP_ECRED_RECONF_REQ:
6372 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6373 		break;
6374 
6375 	case L2CAP_ECRED_RECONF_RSP:
6376 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6377 		break;
6378 
6379 	case L2CAP_DISCONN_REQ:
6380 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6381 		break;
6382 
6383 	case L2CAP_DISCONN_RSP:
6384 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6385 		break;
6386 
6387 	default:
6388 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6389 		err = -EINVAL;
6390 		break;
6391 	}
6392 
6393 	return err;
6394 }
6395 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6396 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6397 					struct sk_buff *skb)
6398 {
6399 	struct hci_conn *hcon = conn->hcon;
6400 	struct l2cap_cmd_hdr *cmd;
6401 	u16 len;
6402 	int err;
6403 
6404 	if (hcon->type != LE_LINK)
6405 		goto drop;
6406 
6407 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6408 		goto drop;
6409 
6410 	cmd = (void *) skb->data;
6411 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6412 
6413 	len = le16_to_cpu(cmd->len);
6414 
6415 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6416 
6417 	if (len != skb->len || !cmd->ident) {
6418 		BT_DBG("corrupted command");
6419 		goto drop;
6420 	}
6421 
6422 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6423 	if (err) {
6424 		struct l2cap_cmd_rej_unk rej;
6425 
6426 		BT_ERR("Wrong link type (%d)", err);
6427 
6428 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6429 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6430 			       sizeof(rej), &rej);
6431 	}
6432 
6433 drop:
6434 	kfree_skb(skb);
6435 }
6436 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6437 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6438 				     struct sk_buff *skb)
6439 {
6440 	struct hci_conn *hcon = conn->hcon;
6441 	struct l2cap_cmd_hdr *cmd;
6442 	int err;
6443 
6444 	l2cap_raw_recv(conn, skb);
6445 
6446 	if (hcon->type != ACL_LINK)
6447 		goto drop;
6448 
6449 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6450 		u16 len;
6451 
6452 		cmd = (void *) skb->data;
6453 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6454 
6455 		len = le16_to_cpu(cmd->len);
6456 
6457 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6458 		       cmd->ident);
6459 
6460 		if (len > skb->len || !cmd->ident) {
6461 			BT_DBG("corrupted command");
6462 			break;
6463 		}
6464 
6465 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6466 		if (err) {
6467 			struct l2cap_cmd_rej_unk rej;
6468 
6469 			BT_ERR("Wrong link type (%d)", err);
6470 
6471 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6472 			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6473 				       sizeof(rej), &rej);
6474 		}
6475 
6476 		skb_pull(skb, len);
6477 	}
6478 
6479 drop:
6480 	kfree_skb(skb);
6481 }
6482 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)6483 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6484 {
6485 	u16 our_fcs, rcv_fcs;
6486 	int hdr_size;
6487 
6488 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6489 		hdr_size = L2CAP_EXT_HDR_SIZE;
6490 	else
6491 		hdr_size = L2CAP_ENH_HDR_SIZE;
6492 
6493 	if (chan->fcs == L2CAP_FCS_CRC16) {
6494 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6495 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6496 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6497 
6498 		if (our_fcs != rcv_fcs)
6499 			return -EBADMSG;
6500 	}
6501 	return 0;
6502 }
6503 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)6504 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6505 {
6506 	struct l2cap_ctrl control;
6507 
6508 	BT_DBG("chan %p", chan);
6509 
6510 	memset(&control, 0, sizeof(control));
6511 	control.sframe = 1;
6512 	control.final = 1;
6513 	control.reqseq = chan->buffer_seq;
6514 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6515 
6516 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6517 		control.super = L2CAP_SUPER_RNR;
6518 		l2cap_send_sframe(chan, &control);
6519 	}
6520 
6521 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6522 	    chan->unacked_frames > 0)
6523 		__set_retrans_timer(chan);
6524 
6525 	/* Send pending iframes */
6526 	l2cap_ertm_send(chan);
6527 
6528 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6529 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6530 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6531 		 * send it now.
6532 		 */
6533 		control.super = L2CAP_SUPER_RR;
6534 		l2cap_send_sframe(chan, &control);
6535 	}
6536 }
6537 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)6538 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6539 			    struct sk_buff **last_frag)
6540 {
6541 	/* skb->len reflects data in skb as well as all fragments
6542 	 * skb->data_len reflects only data in fragments
6543 	 */
6544 	if (!skb_has_frag_list(skb))
6545 		skb_shinfo(skb)->frag_list = new_frag;
6546 
6547 	new_frag->next = NULL;
6548 
6549 	(*last_frag)->next = new_frag;
6550 	*last_frag = new_frag;
6551 
6552 	skb->len += new_frag->len;
6553 	skb->data_len += new_frag->len;
6554 	skb->truesize += new_frag->truesize;
6555 }
6556 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)6557 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6558 				struct l2cap_ctrl *control)
6559 {
6560 	int err = -EINVAL;
6561 
6562 	switch (control->sar) {
6563 	case L2CAP_SAR_UNSEGMENTED:
6564 		if (chan->sdu)
6565 			break;
6566 
6567 		err = chan->ops->recv(chan, skb);
6568 		break;
6569 
6570 	case L2CAP_SAR_START:
6571 		if (chan->sdu)
6572 			break;
6573 
6574 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6575 			break;
6576 
6577 		chan->sdu_len = get_unaligned_le16(skb->data);
6578 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6579 
6580 		if (chan->sdu_len > chan->imtu) {
6581 			err = -EMSGSIZE;
6582 			break;
6583 		}
6584 
6585 		if (skb->len >= chan->sdu_len)
6586 			break;
6587 
6588 		chan->sdu = skb;
6589 		chan->sdu_last_frag = skb;
6590 
6591 		skb = NULL;
6592 		err = 0;
6593 		break;
6594 
6595 	case L2CAP_SAR_CONTINUE:
6596 		if (!chan->sdu)
6597 			break;
6598 
6599 		append_skb_frag(chan->sdu, skb,
6600 				&chan->sdu_last_frag);
6601 		skb = NULL;
6602 
6603 		if (chan->sdu->len >= chan->sdu_len)
6604 			break;
6605 
6606 		err = 0;
6607 		break;
6608 
6609 	case L2CAP_SAR_END:
6610 		if (!chan->sdu)
6611 			break;
6612 
6613 		append_skb_frag(chan->sdu, skb,
6614 				&chan->sdu_last_frag);
6615 		skb = NULL;
6616 
6617 		if (chan->sdu->len != chan->sdu_len)
6618 			break;
6619 
6620 		err = chan->ops->recv(chan, chan->sdu);
6621 
6622 		if (!err) {
6623 			/* Reassembly complete */
6624 			chan->sdu = NULL;
6625 			chan->sdu_last_frag = NULL;
6626 			chan->sdu_len = 0;
6627 		}
6628 		break;
6629 	}
6630 
6631 	if (err) {
6632 		kfree_skb(skb);
6633 		kfree_skb(chan->sdu);
6634 		chan->sdu = NULL;
6635 		chan->sdu_last_frag = NULL;
6636 		chan->sdu_len = 0;
6637 	}
6638 
6639 	return err;
6640 }
6641 
l2cap_resegment(struct l2cap_chan * chan)6642 static int l2cap_resegment(struct l2cap_chan *chan)
6643 {
6644 	/* Placeholder */
6645 	return 0;
6646 }
6647 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)6648 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6649 {
6650 	u8 event;
6651 
6652 	if (chan->mode != L2CAP_MODE_ERTM)
6653 		return;
6654 
6655 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6656 	l2cap_tx(chan, NULL, NULL, event);
6657 }
6658 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)6659 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6660 {
6661 	int err = 0;
6662 	/* Pass sequential frames to l2cap_reassemble_sdu()
6663 	 * until a gap is encountered.
6664 	 */
6665 
6666 	BT_DBG("chan %p", chan);
6667 
6668 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6669 		struct sk_buff *skb;
6670 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6671 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6672 
6673 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6674 
6675 		if (!skb)
6676 			break;
6677 
6678 		skb_unlink(skb, &chan->srej_q);
6679 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6680 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6681 		if (err)
6682 			break;
6683 	}
6684 
6685 	if (skb_queue_empty(&chan->srej_q)) {
6686 		chan->rx_state = L2CAP_RX_STATE_RECV;
6687 		l2cap_send_ack(chan);
6688 	}
6689 
6690 	return err;
6691 }
6692 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6693 static void l2cap_handle_srej(struct l2cap_chan *chan,
6694 			      struct l2cap_ctrl *control)
6695 {
6696 	struct sk_buff *skb;
6697 
6698 	BT_DBG("chan %p, control %p", chan, control);
6699 
6700 	if (control->reqseq == chan->next_tx_seq) {
6701 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6702 		l2cap_send_disconn_req(chan, ECONNRESET);
6703 		return;
6704 	}
6705 
6706 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6707 
6708 	if (skb == NULL) {
6709 		BT_DBG("Seq %d not available for retransmission",
6710 		       control->reqseq);
6711 		return;
6712 	}
6713 
6714 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6715 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6716 		l2cap_send_disconn_req(chan, ECONNRESET);
6717 		return;
6718 	}
6719 
6720 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6721 
6722 	if (control->poll) {
6723 		l2cap_pass_to_tx(chan, control);
6724 
6725 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6726 		l2cap_retransmit(chan, control);
6727 		l2cap_ertm_send(chan);
6728 
6729 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6730 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6731 			chan->srej_save_reqseq = control->reqseq;
6732 		}
6733 	} else {
6734 		l2cap_pass_to_tx_fbit(chan, control);
6735 
6736 		if (control->final) {
6737 			if (chan->srej_save_reqseq != control->reqseq ||
6738 			    !test_and_clear_bit(CONN_SREJ_ACT,
6739 						&chan->conn_state))
6740 				l2cap_retransmit(chan, control);
6741 		} else {
6742 			l2cap_retransmit(chan, control);
6743 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6744 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6745 				chan->srej_save_reqseq = control->reqseq;
6746 			}
6747 		}
6748 	}
6749 }
6750 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6751 static void l2cap_handle_rej(struct l2cap_chan *chan,
6752 			     struct l2cap_ctrl *control)
6753 {
6754 	struct sk_buff *skb;
6755 
6756 	BT_DBG("chan %p, control %p", chan, control);
6757 
6758 	if (control->reqseq == chan->next_tx_seq) {
6759 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6760 		l2cap_send_disconn_req(chan, ECONNRESET);
6761 		return;
6762 	}
6763 
6764 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6765 
6766 	if (chan->max_tx && skb &&
6767 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6768 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6769 		l2cap_send_disconn_req(chan, ECONNRESET);
6770 		return;
6771 	}
6772 
6773 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6774 
6775 	l2cap_pass_to_tx(chan, control);
6776 
6777 	if (control->final) {
6778 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6779 			l2cap_retransmit_all(chan, control);
6780 	} else {
6781 		l2cap_retransmit_all(chan, control);
6782 		l2cap_ertm_send(chan);
6783 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6784 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6785 	}
6786 }
6787 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)6788 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6789 {
6790 	BT_DBG("chan %p, txseq %d", chan, txseq);
6791 
6792 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6793 	       chan->expected_tx_seq);
6794 
6795 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6796 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6797 		    chan->tx_win) {
6798 			/* See notes below regarding "double poll" and
6799 			 * invalid packets.
6800 			 */
6801 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6802 				BT_DBG("Invalid/Ignore - after SREJ");
6803 				return L2CAP_TXSEQ_INVALID_IGNORE;
6804 			} else {
6805 				BT_DBG("Invalid - in window after SREJ sent");
6806 				return L2CAP_TXSEQ_INVALID;
6807 			}
6808 		}
6809 
6810 		if (chan->srej_list.head == txseq) {
6811 			BT_DBG("Expected SREJ");
6812 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6813 		}
6814 
6815 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6816 			BT_DBG("Duplicate SREJ - txseq already stored");
6817 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6818 		}
6819 
6820 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6821 			BT_DBG("Unexpected SREJ - not requested");
6822 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6823 		}
6824 	}
6825 
6826 	if (chan->expected_tx_seq == txseq) {
6827 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6828 		    chan->tx_win) {
6829 			BT_DBG("Invalid - txseq outside tx window");
6830 			return L2CAP_TXSEQ_INVALID;
6831 		} else {
6832 			BT_DBG("Expected");
6833 			return L2CAP_TXSEQ_EXPECTED;
6834 		}
6835 	}
6836 
6837 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6838 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6839 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6840 		return L2CAP_TXSEQ_DUPLICATE;
6841 	}
6842 
6843 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6844 		/* A source of invalid packets is a "double poll" condition,
6845 		 * where delays cause us to send multiple poll packets.  If
6846 		 * the remote stack receives and processes both polls,
6847 		 * sequence numbers can wrap around in such a way that a
6848 		 * resent frame has a sequence number that looks like new data
6849 		 * with a sequence gap.  This would trigger an erroneous SREJ
6850 		 * request.
6851 		 *
6852 		 * Fortunately, this is impossible with a tx window that's
6853 		 * less than half of the maximum sequence number, which allows
6854 		 * invalid frames to be safely ignored.
6855 		 *
6856 		 * With tx window sizes greater than half of the tx window
6857 		 * maximum, the frame is invalid and cannot be ignored.  This
6858 		 * causes a disconnect.
6859 		 */
6860 
6861 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6862 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6863 			return L2CAP_TXSEQ_INVALID_IGNORE;
6864 		} else {
6865 			BT_DBG("Invalid - txseq outside tx window");
6866 			return L2CAP_TXSEQ_INVALID;
6867 		}
6868 	} else {
6869 		BT_DBG("Unexpected - txseq indicates missing frames");
6870 		return L2CAP_TXSEQ_UNEXPECTED;
6871 	}
6872 }
6873 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6874 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6875 			       struct l2cap_ctrl *control,
6876 			       struct sk_buff *skb, u8 event)
6877 {
6878 	int err = 0;
6879 	bool skb_in_use = false;
6880 
6881 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6882 	       event);
6883 
6884 	switch (event) {
6885 	case L2CAP_EV_RECV_IFRAME:
6886 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6887 		case L2CAP_TXSEQ_EXPECTED:
6888 			l2cap_pass_to_tx(chan, control);
6889 
6890 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6891 				BT_DBG("Busy, discarding expected seq %d",
6892 				       control->txseq);
6893 				break;
6894 			}
6895 
6896 			chan->expected_tx_seq = __next_seq(chan,
6897 							   control->txseq);
6898 
6899 			chan->buffer_seq = chan->expected_tx_seq;
6900 			skb_in_use = true;
6901 
6902 			err = l2cap_reassemble_sdu(chan, skb, control);
6903 			if (err)
6904 				break;
6905 
6906 			if (control->final) {
6907 				if (!test_and_clear_bit(CONN_REJ_ACT,
6908 							&chan->conn_state)) {
6909 					control->final = 0;
6910 					l2cap_retransmit_all(chan, control);
6911 					l2cap_ertm_send(chan);
6912 				}
6913 			}
6914 
6915 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6916 				l2cap_send_ack(chan);
6917 			break;
6918 		case L2CAP_TXSEQ_UNEXPECTED:
6919 			l2cap_pass_to_tx(chan, control);
6920 
6921 			/* Can't issue SREJ frames in the local busy state.
6922 			 * Drop this frame, it will be seen as missing
6923 			 * when local busy is exited.
6924 			 */
6925 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6926 				BT_DBG("Busy, discarding unexpected seq %d",
6927 				       control->txseq);
6928 				break;
6929 			}
6930 
6931 			/* There was a gap in the sequence, so an SREJ
6932 			 * must be sent for each missing frame.  The
6933 			 * current frame is stored for later use.
6934 			 */
6935 			skb_queue_tail(&chan->srej_q, skb);
6936 			skb_in_use = true;
6937 			BT_DBG("Queued %p (queue len %d)", skb,
6938 			       skb_queue_len(&chan->srej_q));
6939 
6940 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6941 			l2cap_seq_list_clear(&chan->srej_list);
6942 			l2cap_send_srej(chan, control->txseq);
6943 
6944 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6945 			break;
6946 		case L2CAP_TXSEQ_DUPLICATE:
6947 			l2cap_pass_to_tx(chan, control);
6948 			break;
6949 		case L2CAP_TXSEQ_INVALID_IGNORE:
6950 			break;
6951 		case L2CAP_TXSEQ_INVALID:
6952 		default:
6953 			l2cap_send_disconn_req(chan, ECONNRESET);
6954 			break;
6955 		}
6956 		break;
6957 	case L2CAP_EV_RECV_RR:
6958 		l2cap_pass_to_tx(chan, control);
6959 		if (control->final) {
6960 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6961 
6962 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6963 			    !__chan_is_moving(chan)) {
6964 				control->final = 0;
6965 				l2cap_retransmit_all(chan, control);
6966 			}
6967 
6968 			l2cap_ertm_send(chan);
6969 		} else if (control->poll) {
6970 			l2cap_send_i_or_rr_or_rnr(chan);
6971 		} else {
6972 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6973 					       &chan->conn_state) &&
6974 			    chan->unacked_frames)
6975 				__set_retrans_timer(chan);
6976 
6977 			l2cap_ertm_send(chan);
6978 		}
6979 		break;
6980 	case L2CAP_EV_RECV_RNR:
6981 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6982 		l2cap_pass_to_tx(chan, control);
6983 		if (control && control->poll) {
6984 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6985 			l2cap_send_rr_or_rnr(chan, 0);
6986 		}
6987 		__clear_retrans_timer(chan);
6988 		l2cap_seq_list_clear(&chan->retrans_list);
6989 		break;
6990 	case L2CAP_EV_RECV_REJ:
6991 		l2cap_handle_rej(chan, control);
6992 		break;
6993 	case L2CAP_EV_RECV_SREJ:
6994 		l2cap_handle_srej(chan, control);
6995 		break;
6996 	default:
6997 		break;
6998 	}
6999 
7000 	if (skb && !skb_in_use) {
7001 		BT_DBG("Freeing %p", skb);
7002 		kfree_skb(skb);
7003 	}
7004 
7005 	return err;
7006 }
7007 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7008 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7009 				    struct l2cap_ctrl *control,
7010 				    struct sk_buff *skb, u8 event)
7011 {
7012 	int err = 0;
7013 	u16 txseq = control->txseq;
7014 	bool skb_in_use = false;
7015 
7016 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7017 	       event);
7018 
7019 	switch (event) {
7020 	case L2CAP_EV_RECV_IFRAME:
7021 		switch (l2cap_classify_txseq(chan, txseq)) {
7022 		case L2CAP_TXSEQ_EXPECTED:
7023 			/* Keep frame for reassembly later */
7024 			l2cap_pass_to_tx(chan, control);
7025 			skb_queue_tail(&chan->srej_q, skb);
7026 			skb_in_use = true;
7027 			BT_DBG("Queued %p (queue len %d)", skb,
7028 			       skb_queue_len(&chan->srej_q));
7029 
7030 			chan->expected_tx_seq = __next_seq(chan, txseq);
7031 			break;
7032 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7033 			l2cap_seq_list_pop(&chan->srej_list);
7034 
7035 			l2cap_pass_to_tx(chan, control);
7036 			skb_queue_tail(&chan->srej_q, skb);
7037 			skb_in_use = true;
7038 			BT_DBG("Queued %p (queue len %d)", skb,
7039 			       skb_queue_len(&chan->srej_q));
7040 
7041 			err = l2cap_rx_queued_iframes(chan);
7042 			if (err)
7043 				break;
7044 
7045 			break;
7046 		case L2CAP_TXSEQ_UNEXPECTED:
7047 			/* Got a frame that can't be reassembled yet.
7048 			 * Save it for later, and send SREJs to cover
7049 			 * the missing frames.
7050 			 */
7051 			skb_queue_tail(&chan->srej_q, skb);
7052 			skb_in_use = true;
7053 			BT_DBG("Queued %p (queue len %d)", skb,
7054 			       skb_queue_len(&chan->srej_q));
7055 
7056 			l2cap_pass_to_tx(chan, control);
7057 			l2cap_send_srej(chan, control->txseq);
7058 			break;
7059 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7060 			/* This frame was requested with an SREJ, but
7061 			 * some expected retransmitted frames are
7062 			 * missing.  Request retransmission of missing
7063 			 * SREJ'd frames.
7064 			 */
7065 			skb_queue_tail(&chan->srej_q, skb);
7066 			skb_in_use = true;
7067 			BT_DBG("Queued %p (queue len %d)", skb,
7068 			       skb_queue_len(&chan->srej_q));
7069 
7070 			l2cap_pass_to_tx(chan, control);
7071 			l2cap_send_srej_list(chan, control->txseq);
7072 			break;
7073 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7074 			/* We've already queued this frame.  Drop this copy. */
7075 			l2cap_pass_to_tx(chan, control);
7076 			break;
7077 		case L2CAP_TXSEQ_DUPLICATE:
7078 			/* Expecting a later sequence number, so this frame
7079 			 * was already received.  Ignore it completely.
7080 			 */
7081 			break;
7082 		case L2CAP_TXSEQ_INVALID_IGNORE:
7083 			break;
7084 		case L2CAP_TXSEQ_INVALID:
7085 		default:
7086 			l2cap_send_disconn_req(chan, ECONNRESET);
7087 			break;
7088 		}
7089 		break;
7090 	case L2CAP_EV_RECV_RR:
7091 		l2cap_pass_to_tx(chan, control);
7092 		if (control->final) {
7093 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7094 
7095 			if (!test_and_clear_bit(CONN_REJ_ACT,
7096 						&chan->conn_state)) {
7097 				control->final = 0;
7098 				l2cap_retransmit_all(chan, control);
7099 			}
7100 
7101 			l2cap_ertm_send(chan);
7102 		} else if (control->poll) {
7103 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7104 					       &chan->conn_state) &&
7105 			    chan->unacked_frames) {
7106 				__set_retrans_timer(chan);
7107 			}
7108 
7109 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7110 			l2cap_send_srej_tail(chan);
7111 		} else {
7112 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7113 					       &chan->conn_state) &&
7114 			    chan->unacked_frames)
7115 				__set_retrans_timer(chan);
7116 
7117 			l2cap_send_ack(chan);
7118 		}
7119 		break;
7120 	case L2CAP_EV_RECV_RNR:
7121 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7122 		l2cap_pass_to_tx(chan, control);
7123 		if (control->poll) {
7124 			l2cap_send_srej_tail(chan);
7125 		} else {
7126 			struct l2cap_ctrl rr_control;
7127 			memset(&rr_control, 0, sizeof(rr_control));
7128 			rr_control.sframe = 1;
7129 			rr_control.super = L2CAP_SUPER_RR;
7130 			rr_control.reqseq = chan->buffer_seq;
7131 			l2cap_send_sframe(chan, &rr_control);
7132 		}
7133 
7134 		break;
7135 	case L2CAP_EV_RECV_REJ:
7136 		l2cap_handle_rej(chan, control);
7137 		break;
7138 	case L2CAP_EV_RECV_SREJ:
7139 		l2cap_handle_srej(chan, control);
7140 		break;
7141 	}
7142 
7143 	if (skb && !skb_in_use) {
7144 		BT_DBG("Freeing %p", skb);
7145 		kfree_skb(skb);
7146 	}
7147 
7148 	return err;
7149 }
7150 
l2cap_finish_move(struct l2cap_chan * chan)7151 static int l2cap_finish_move(struct l2cap_chan *chan)
7152 {
7153 	BT_DBG("chan %p", chan);
7154 
7155 	chan->rx_state = L2CAP_RX_STATE_RECV;
7156 
7157 	if (chan->hs_hcon)
7158 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7159 	else
7160 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7161 
7162 	return l2cap_resegment(chan);
7163 }
7164 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7165 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7166 				 struct l2cap_ctrl *control,
7167 				 struct sk_buff *skb, u8 event)
7168 {
7169 	int err;
7170 
7171 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7172 	       event);
7173 
7174 	if (!control->poll)
7175 		return -EPROTO;
7176 
7177 	l2cap_process_reqseq(chan, control->reqseq);
7178 
7179 	if (!skb_queue_empty(&chan->tx_q))
7180 		chan->tx_send_head = skb_peek(&chan->tx_q);
7181 	else
7182 		chan->tx_send_head = NULL;
7183 
7184 	/* Rewind next_tx_seq to the point expected
7185 	 * by the receiver.
7186 	 */
7187 	chan->next_tx_seq = control->reqseq;
7188 	chan->unacked_frames = 0;
7189 
7190 	err = l2cap_finish_move(chan);
7191 	if (err)
7192 		return err;
7193 
7194 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7195 	l2cap_send_i_or_rr_or_rnr(chan);
7196 
7197 	if (event == L2CAP_EV_RECV_IFRAME)
7198 		return -EPROTO;
7199 
7200 	return l2cap_rx_state_recv(chan, control, NULL, event);
7201 }
7202 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7203 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7204 				 struct l2cap_ctrl *control,
7205 				 struct sk_buff *skb, u8 event)
7206 {
7207 	int err;
7208 
7209 	if (!control->final)
7210 		return -EPROTO;
7211 
7212 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7213 
7214 	chan->rx_state = L2CAP_RX_STATE_RECV;
7215 	l2cap_process_reqseq(chan, control->reqseq);
7216 
7217 	if (!skb_queue_empty(&chan->tx_q))
7218 		chan->tx_send_head = skb_peek(&chan->tx_q);
7219 	else
7220 		chan->tx_send_head = NULL;
7221 
7222 	/* Rewind next_tx_seq to the point expected
7223 	 * by the receiver.
7224 	 */
7225 	chan->next_tx_seq = control->reqseq;
7226 	chan->unacked_frames = 0;
7227 
7228 	if (chan->hs_hcon)
7229 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7230 	else
7231 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7232 
7233 	err = l2cap_resegment(chan);
7234 
7235 	if (!err)
7236 		err = l2cap_rx_state_recv(chan, control, skb, event);
7237 
7238 	return err;
7239 }
7240 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)7241 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7242 {
7243 	/* Make sure reqseq is for a packet that has been sent but not acked */
7244 	u16 unacked;
7245 
7246 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7247 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7248 }
7249 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7250 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7251 		    struct sk_buff *skb, u8 event)
7252 {
7253 	int err = 0;
7254 
7255 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7256 	       control, skb, event, chan->rx_state);
7257 
7258 	if (__valid_reqseq(chan, control->reqseq)) {
7259 		switch (chan->rx_state) {
7260 		case L2CAP_RX_STATE_RECV:
7261 			err = l2cap_rx_state_recv(chan, control, skb, event);
7262 			break;
7263 		case L2CAP_RX_STATE_SREJ_SENT:
7264 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7265 						       event);
7266 			break;
7267 		case L2CAP_RX_STATE_WAIT_P:
7268 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7269 			break;
7270 		case L2CAP_RX_STATE_WAIT_F:
7271 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7272 			break;
7273 		default:
7274 			/* shut it down */
7275 			break;
7276 		}
7277 	} else {
7278 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7279 		       control->reqseq, chan->next_tx_seq,
7280 		       chan->expected_ack_seq);
7281 		l2cap_send_disconn_req(chan, ECONNRESET);
7282 	}
7283 
7284 	return err;
7285 }
7286 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)7287 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7288 			   struct sk_buff *skb)
7289 {
7290 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7291 	       chan->rx_state);
7292 
7293 	if (l2cap_classify_txseq(chan, control->txseq) ==
7294 	    L2CAP_TXSEQ_EXPECTED) {
7295 		l2cap_pass_to_tx(chan, control);
7296 
7297 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7298 		       __next_seq(chan, chan->buffer_seq));
7299 
7300 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7301 
7302 		l2cap_reassemble_sdu(chan, skb, control);
7303 	} else {
7304 		if (chan->sdu) {
7305 			kfree_skb(chan->sdu);
7306 			chan->sdu = NULL;
7307 		}
7308 		chan->sdu_last_frag = NULL;
7309 		chan->sdu_len = 0;
7310 
7311 		if (skb) {
7312 			BT_DBG("Freeing %p", skb);
7313 			kfree_skb(skb);
7314 		}
7315 	}
7316 
7317 	chan->last_acked_seq = control->txseq;
7318 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
7319 
7320 	return 0;
7321 }
7322 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7323 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7324 {
7325 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7326 	u16 len;
7327 	u8 event;
7328 
7329 	__unpack_control(chan, skb);
7330 
7331 	len = skb->len;
7332 
7333 	/*
7334 	 * We can just drop the corrupted I-frame here.
7335 	 * Receiver will miss it and start proper recovery
7336 	 * procedures and ask for retransmission.
7337 	 */
7338 	if (l2cap_check_fcs(chan, skb))
7339 		goto drop;
7340 
7341 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7342 		len -= L2CAP_SDULEN_SIZE;
7343 
7344 	if (chan->fcs == L2CAP_FCS_CRC16)
7345 		len -= L2CAP_FCS_SIZE;
7346 
7347 	if (len > chan->mps) {
7348 		l2cap_send_disconn_req(chan, ECONNRESET);
7349 		goto drop;
7350 	}
7351 
7352 	if (chan->ops->filter) {
7353 		if (chan->ops->filter(chan, skb))
7354 			goto drop;
7355 	}
7356 
7357 	if (!control->sframe) {
7358 		int err;
7359 
7360 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7361 		       control->sar, control->reqseq, control->final,
7362 		       control->txseq);
7363 
7364 		/* Validate F-bit - F=0 always valid, F=1 only
7365 		 * valid in TX WAIT_F
7366 		 */
7367 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7368 			goto drop;
7369 
7370 		if (chan->mode != L2CAP_MODE_STREAMING) {
7371 			event = L2CAP_EV_RECV_IFRAME;
7372 			err = l2cap_rx(chan, control, skb, event);
7373 		} else {
7374 			err = l2cap_stream_rx(chan, control, skb);
7375 		}
7376 
7377 		if (err)
7378 			l2cap_send_disconn_req(chan, ECONNRESET);
7379 	} else {
7380 		const u8 rx_func_to_event[4] = {
7381 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7382 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7383 		};
7384 
7385 		/* Only I-frames are expected in streaming mode */
7386 		if (chan->mode == L2CAP_MODE_STREAMING)
7387 			goto drop;
7388 
7389 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7390 		       control->reqseq, control->final, control->poll,
7391 		       control->super);
7392 
7393 		if (len != 0) {
7394 			BT_ERR("Trailing bytes: %d in sframe", len);
7395 			l2cap_send_disconn_req(chan, ECONNRESET);
7396 			goto drop;
7397 		}
7398 
7399 		/* Validate F and P bits */
7400 		if (control->final && (control->poll ||
7401 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7402 			goto drop;
7403 
7404 		event = rx_func_to_event[control->super];
7405 		if (l2cap_rx(chan, control, skb, event))
7406 			l2cap_send_disconn_req(chan, ECONNRESET);
7407 	}
7408 
7409 	return 0;
7410 
7411 drop:
7412 	kfree_skb(skb);
7413 	return 0;
7414 }
7415 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)7416 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7417 {
7418 	struct l2cap_conn *conn = chan->conn;
7419 	struct l2cap_le_credits pkt;
7420 	u16 return_credits;
7421 
7422 	return_credits = (chan->imtu / chan->mps) + 1;
7423 
7424 	if (chan->rx_credits >= return_credits)
7425 		return;
7426 
7427 	return_credits -= chan->rx_credits;
7428 
7429 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7430 
7431 	chan->rx_credits += return_credits;
7432 
7433 	pkt.cid     = cpu_to_le16(chan->scid);
7434 	pkt.credits = cpu_to_le16(return_credits);
7435 
7436 	chan->ident = l2cap_get_ident(conn);
7437 
7438 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7439 }
7440 
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)7441 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7442 {
7443 	int err;
7444 
7445 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7446 
7447 	/* Wait recv to confirm reception before updating the credits */
7448 	err = chan->ops->recv(chan, skb);
7449 
7450 	/* Update credits whenever an SDU is received */
7451 	l2cap_chan_le_send_credits(chan);
7452 
7453 	return err;
7454 }
7455 
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7456 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7457 {
7458 	int err;
7459 
7460 	if (!chan->rx_credits) {
7461 		BT_ERR("No credits to receive LE L2CAP data");
7462 		l2cap_send_disconn_req(chan, ECONNRESET);
7463 		return -ENOBUFS;
7464 	}
7465 
7466 	if (chan->imtu < skb->len) {
7467 		BT_ERR("Too big LE L2CAP PDU");
7468 		return -ENOBUFS;
7469 	}
7470 
7471 	chan->rx_credits--;
7472 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7473 
7474 	/* Update if remote had run out of credits, this should only happens
7475 	 * if the remote is not using the entire MPS.
7476 	 */
7477 	if (!chan->rx_credits)
7478 		l2cap_chan_le_send_credits(chan);
7479 
7480 	err = 0;
7481 
7482 	if (!chan->sdu) {
7483 		u16 sdu_len;
7484 
7485 		sdu_len = get_unaligned_le16(skb->data);
7486 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7487 
7488 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7489 		       sdu_len, skb->len, chan->imtu);
7490 
7491 		if (sdu_len > chan->imtu) {
7492 			BT_ERR("Too big LE L2CAP SDU length received");
7493 			err = -EMSGSIZE;
7494 			goto failed;
7495 		}
7496 
7497 		if (skb->len > sdu_len) {
7498 			BT_ERR("Too much LE L2CAP data received");
7499 			err = -EINVAL;
7500 			goto failed;
7501 		}
7502 
7503 		if (skb->len == sdu_len)
7504 			return l2cap_ecred_recv(chan, skb);
7505 
7506 		chan->sdu = skb;
7507 		chan->sdu_len = sdu_len;
7508 		chan->sdu_last_frag = skb;
7509 
7510 		/* Detect if remote is not able to use the selected MPS */
7511 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7512 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7513 
7514 			/* Adjust the number of credits */
7515 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7516 			chan->mps = mps_len;
7517 			l2cap_chan_le_send_credits(chan);
7518 		}
7519 
7520 		return 0;
7521 	}
7522 
7523 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7524 	       chan->sdu->len, skb->len, chan->sdu_len);
7525 
7526 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7527 		BT_ERR("Too much LE L2CAP data received");
7528 		err = -EINVAL;
7529 		goto failed;
7530 	}
7531 
7532 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7533 	skb = NULL;
7534 
7535 	if (chan->sdu->len == chan->sdu_len) {
7536 		err = l2cap_ecred_recv(chan, chan->sdu);
7537 		if (!err) {
7538 			chan->sdu = NULL;
7539 			chan->sdu_last_frag = NULL;
7540 			chan->sdu_len = 0;
7541 		}
7542 	}
7543 
7544 failed:
7545 	if (err) {
7546 		kfree_skb(skb);
7547 		kfree_skb(chan->sdu);
7548 		chan->sdu = NULL;
7549 		chan->sdu_last_frag = NULL;
7550 		chan->sdu_len = 0;
7551 	}
7552 
7553 	/* We can't return an error here since we took care of the skb
7554 	 * freeing internally. An error return would cause the caller to
7555 	 * do a double-free of the skb.
7556 	 */
7557 	return 0;
7558 }
7559 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)7560 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7561 			       struct sk_buff *skb)
7562 {
7563 	struct l2cap_chan *chan;
7564 
7565 	chan = l2cap_get_chan_by_scid(conn, cid);
7566 	if (!chan) {
7567 		if (cid == L2CAP_CID_A2MP) {
7568 			chan = a2mp_channel_create(conn, skb);
7569 			if (!chan) {
7570 				kfree_skb(skb);
7571 				return;
7572 			}
7573 
7574 			l2cap_chan_lock(chan);
7575 		} else {
7576 			BT_DBG("unknown cid 0x%4.4x", cid);
7577 			/* Drop packet and return */
7578 			kfree_skb(skb);
7579 			return;
7580 		}
7581 	}
7582 
7583 	BT_DBG("chan %p, len %d", chan, skb->len);
7584 
7585 	/* If we receive data on a fixed channel before the info req/rsp
7586 	 * procedure is done simply assume that the channel is supported
7587 	 * and mark it as ready.
7588 	 */
7589 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7590 		l2cap_chan_ready(chan);
7591 
7592 	if (chan->state != BT_CONNECTED)
7593 		goto drop;
7594 
7595 	switch (chan->mode) {
7596 	case L2CAP_MODE_LE_FLOWCTL:
7597 	case L2CAP_MODE_EXT_FLOWCTL:
7598 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7599 			goto drop;
7600 
7601 		goto done;
7602 
7603 	case L2CAP_MODE_BASIC:
7604 		/* If socket recv buffers overflows we drop data here
7605 		 * which is *bad* because L2CAP has to be reliable.
7606 		 * But we don't have any other choice. L2CAP doesn't
7607 		 * provide flow control mechanism. */
7608 
7609 		if (chan->imtu < skb->len) {
7610 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7611 			goto drop;
7612 		}
7613 
7614 		if (!chan->ops->recv(chan, skb))
7615 			goto done;
7616 		break;
7617 
7618 	case L2CAP_MODE_ERTM:
7619 	case L2CAP_MODE_STREAMING:
7620 		l2cap_data_rcv(chan, skb);
7621 		goto done;
7622 
7623 	default:
7624 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7625 		break;
7626 	}
7627 
7628 drop:
7629 	kfree_skb(skb);
7630 
7631 done:
7632 	l2cap_chan_unlock(chan);
7633 	l2cap_chan_put(chan);
7634 }
7635 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)7636 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7637 				  struct sk_buff *skb)
7638 {
7639 	struct hci_conn *hcon = conn->hcon;
7640 	struct l2cap_chan *chan;
7641 
7642 	if (hcon->type != ACL_LINK)
7643 		goto free_skb;
7644 
7645 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7646 					ACL_LINK);
7647 	if (!chan)
7648 		goto free_skb;
7649 
7650 	BT_DBG("chan %p, len %d", chan, skb->len);
7651 
7652 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7653 		goto drop;
7654 
7655 	if (chan->imtu < skb->len)
7656 		goto drop;
7657 
7658 	/* Store remote BD_ADDR and PSM for msg_name */
7659 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7660 	bt_cb(skb)->l2cap.psm = psm;
7661 
7662 	if (!chan->ops->recv(chan, skb)) {
7663 		l2cap_chan_put(chan);
7664 		return;
7665 	}
7666 
7667 drop:
7668 	l2cap_chan_put(chan);
7669 free_skb:
7670 	kfree_skb(skb);
7671 }
7672 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)7673 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7674 {
7675 	struct l2cap_hdr *lh = (void *) skb->data;
7676 	struct hci_conn *hcon = conn->hcon;
7677 	u16 cid, len;
7678 	__le16 psm;
7679 
7680 	if (hcon->state != BT_CONNECTED) {
7681 		BT_DBG("queueing pending rx skb");
7682 		skb_queue_tail(&conn->pending_rx, skb);
7683 		return;
7684 	}
7685 
7686 	skb_pull(skb, L2CAP_HDR_SIZE);
7687 	cid = __le16_to_cpu(lh->cid);
7688 	len = __le16_to_cpu(lh->len);
7689 
7690 	if (len != skb->len) {
7691 		kfree_skb(skb);
7692 		return;
7693 	}
7694 
7695 	/* Since we can't actively block incoming LE connections we must
7696 	 * at least ensure that we ignore incoming data from them.
7697 	 */
7698 	if (hcon->type == LE_LINK &&
7699 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7700 				   bdaddr_dst_type(hcon))) {
7701 		kfree_skb(skb);
7702 		return;
7703 	}
7704 
7705 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7706 
7707 	switch (cid) {
7708 	case L2CAP_CID_SIGNALING:
7709 		l2cap_sig_channel(conn, skb);
7710 		break;
7711 
7712 	case L2CAP_CID_CONN_LESS:
7713 		psm = get_unaligned((__le16 *) skb->data);
7714 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7715 		l2cap_conless_channel(conn, psm, skb);
7716 		break;
7717 
7718 	case L2CAP_CID_LE_SIGNALING:
7719 		l2cap_le_sig_channel(conn, skb);
7720 		break;
7721 
7722 	default:
7723 		l2cap_data_channel(conn, cid, skb);
7724 		break;
7725 	}
7726 }
7727 
process_pending_rx(struct work_struct * work)7728 static void process_pending_rx(struct work_struct *work)
7729 {
7730 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7731 					       pending_rx_work);
7732 	struct sk_buff *skb;
7733 
7734 	BT_DBG("");
7735 
7736 	while ((skb = skb_dequeue(&conn->pending_rx)))
7737 		l2cap_recv_frame(conn, skb);
7738 }
7739 
l2cap_conn_add(struct hci_conn * hcon)7740 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7741 {
7742 	struct l2cap_conn *conn = hcon->l2cap_data;
7743 	struct hci_chan *hchan;
7744 
7745 	if (conn)
7746 		return conn;
7747 
7748 	hchan = hci_chan_create(hcon);
7749 	if (!hchan)
7750 		return NULL;
7751 
7752 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7753 	if (!conn) {
7754 		hci_chan_del(hchan);
7755 		return NULL;
7756 	}
7757 
7758 	kref_init(&conn->ref);
7759 	hcon->l2cap_data = conn;
7760 	conn->hcon = hci_conn_get(hcon);
7761 	conn->hchan = hchan;
7762 
7763 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7764 
7765 	switch (hcon->type) {
7766 	case LE_LINK:
7767 		if (hcon->hdev->le_mtu) {
7768 			conn->mtu = hcon->hdev->le_mtu;
7769 			break;
7770 		}
7771 		fallthrough;
7772 	default:
7773 		conn->mtu = hcon->hdev->acl_mtu;
7774 		break;
7775 	}
7776 
7777 	conn->feat_mask = 0;
7778 
7779 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7780 
7781 	if (hcon->type == ACL_LINK &&
7782 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7783 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7784 
7785 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7786 	    (bredr_sc_enabled(hcon->hdev) ||
7787 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7788 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7789 
7790 	mutex_init(&conn->ident_lock);
7791 	mutex_init(&conn->chan_lock);
7792 
7793 	INIT_LIST_HEAD(&conn->chan_l);
7794 	INIT_LIST_HEAD(&conn->users);
7795 
7796 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7797 
7798 	skb_queue_head_init(&conn->pending_rx);
7799 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7800 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7801 
7802 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7803 
7804 	return conn;
7805 }
7806 
is_valid_psm(u16 psm,u8 dst_type)7807 static bool is_valid_psm(u16 psm, u8 dst_type)
7808 {
7809 	if (!psm)
7810 		return false;
7811 
7812 	if (bdaddr_type_is_le(dst_type))
7813 		return (psm <= 0x00ff);
7814 
7815 	/* PSM must be odd and lsb of upper byte must be 0 */
7816 	return ((psm & 0x0101) == 0x0001);
7817 }
7818 
7819 struct l2cap_chan_data {
7820 	struct l2cap_chan *chan;
7821 	struct pid *pid;
7822 	int count;
7823 };
7824 
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)7825 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7826 {
7827 	struct l2cap_chan_data *d = data;
7828 	struct pid *pid;
7829 
7830 	if (chan == d->chan)
7831 		return;
7832 
7833 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7834 		return;
7835 
7836 	pid = chan->ops->get_peer_pid(chan);
7837 
7838 	/* Only count deferred channels with the same PID/PSM */
7839 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7840 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7841 		return;
7842 
7843 	d->count++;
7844 }
7845 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)7846 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7847 		       bdaddr_t *dst, u8 dst_type)
7848 {
7849 	struct l2cap_conn *conn;
7850 	struct hci_conn *hcon;
7851 	struct hci_dev *hdev;
7852 	int err;
7853 
7854 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7855 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7856 
7857 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7858 	if (!hdev)
7859 		return -EHOSTUNREACH;
7860 
7861 	hci_dev_lock(hdev);
7862 
7863 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7864 	    chan->chan_type != L2CAP_CHAN_RAW) {
7865 		err = -EINVAL;
7866 		goto done;
7867 	}
7868 
7869 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7870 		err = -EINVAL;
7871 		goto done;
7872 	}
7873 
7874 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7875 		err = -EINVAL;
7876 		goto done;
7877 	}
7878 
7879 	switch (chan->mode) {
7880 	case L2CAP_MODE_BASIC:
7881 		break;
7882 	case L2CAP_MODE_LE_FLOWCTL:
7883 		break;
7884 	case L2CAP_MODE_EXT_FLOWCTL:
7885 		if (!enable_ecred) {
7886 			err = -EOPNOTSUPP;
7887 			goto done;
7888 		}
7889 		break;
7890 	case L2CAP_MODE_ERTM:
7891 	case L2CAP_MODE_STREAMING:
7892 		if (!disable_ertm)
7893 			break;
7894 		fallthrough;
7895 	default:
7896 		err = -EOPNOTSUPP;
7897 		goto done;
7898 	}
7899 
7900 	switch (chan->state) {
7901 	case BT_CONNECT:
7902 	case BT_CONNECT2:
7903 	case BT_CONFIG:
7904 		/* Already connecting */
7905 		err = 0;
7906 		goto done;
7907 
7908 	case BT_CONNECTED:
7909 		/* Already connected */
7910 		err = -EISCONN;
7911 		goto done;
7912 
7913 	case BT_OPEN:
7914 	case BT_BOUND:
7915 		/* Can connect */
7916 		break;
7917 
7918 	default:
7919 		err = -EBADFD;
7920 		goto done;
7921 	}
7922 
7923 	/* Set destination address and psm */
7924 	bacpy(&chan->dst, dst);
7925 	chan->dst_type = dst_type;
7926 
7927 	chan->psm = psm;
7928 	chan->dcid = cid;
7929 
7930 	if (bdaddr_type_is_le(dst_type)) {
7931 		/* Convert from L2CAP channel address type to HCI address type
7932 		 */
7933 		if (dst_type == BDADDR_LE_PUBLIC)
7934 			dst_type = ADDR_LE_DEV_PUBLIC;
7935 		else
7936 			dst_type = ADDR_LE_DEV_RANDOM;
7937 
7938 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7939 			hcon = hci_connect_le(hdev, dst, dst_type, false,
7940 					      chan->sec_level,
7941 					      HCI_LE_CONN_TIMEOUT,
7942 					      HCI_ROLE_SLAVE);
7943 		else
7944 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7945 						   chan->sec_level,
7946 						   HCI_LE_CONN_TIMEOUT,
7947 						   CONN_REASON_L2CAP_CHAN);
7948 
7949 	} else {
7950 		u8 auth_type = l2cap_get_auth_type(chan);
7951 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7952 				       CONN_REASON_L2CAP_CHAN);
7953 	}
7954 
7955 	if (IS_ERR(hcon)) {
7956 		err = PTR_ERR(hcon);
7957 		goto done;
7958 	}
7959 
7960 	conn = l2cap_conn_add(hcon);
7961 	if (!conn) {
7962 		hci_conn_drop(hcon);
7963 		err = -ENOMEM;
7964 		goto done;
7965 	}
7966 
7967 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7968 		struct l2cap_chan_data data;
7969 
7970 		data.chan = chan;
7971 		data.pid = chan->ops->get_peer_pid(chan);
7972 		data.count = 1;
7973 
7974 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7975 
7976 		/* Check if there isn't too many channels being connected */
7977 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7978 			hci_conn_drop(hcon);
7979 			err = -EPROTO;
7980 			goto done;
7981 		}
7982 	}
7983 
7984 	mutex_lock(&conn->chan_lock);
7985 	l2cap_chan_lock(chan);
7986 
7987 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7988 		hci_conn_drop(hcon);
7989 		err = -EBUSY;
7990 		goto chan_unlock;
7991 	}
7992 
7993 	/* Update source addr of the socket */
7994 	bacpy(&chan->src, &hcon->src);
7995 	chan->src_type = bdaddr_src_type(hcon);
7996 
7997 	__l2cap_chan_add(conn, chan);
7998 
7999 	/* l2cap_chan_add takes its own ref so we can drop this one */
8000 	hci_conn_drop(hcon);
8001 
8002 	l2cap_state_change(chan, BT_CONNECT);
8003 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8004 
8005 	/* Release chan->sport so that it can be reused by other
8006 	 * sockets (as it's only used for listening sockets).
8007 	 */
8008 	write_lock(&chan_list_lock);
8009 	chan->sport = 0;
8010 	write_unlock(&chan_list_lock);
8011 
8012 	if (hcon->state == BT_CONNECTED) {
8013 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8014 			__clear_chan_timer(chan);
8015 			if (l2cap_chan_check_security(chan, true))
8016 				l2cap_state_change(chan, BT_CONNECTED);
8017 		} else
8018 			l2cap_do_start(chan);
8019 	}
8020 
8021 	err = 0;
8022 
8023 chan_unlock:
8024 	l2cap_chan_unlock(chan);
8025 	mutex_unlock(&conn->chan_lock);
8026 done:
8027 	hci_dev_unlock(hdev);
8028 	hci_dev_put(hdev);
8029 	return err;
8030 }
8031 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8032 
l2cap_ecred_reconfigure(struct l2cap_chan * chan)8033 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8034 {
8035 	struct l2cap_conn *conn = chan->conn;
8036 	struct {
8037 		struct l2cap_ecred_reconf_req req;
8038 		__le16 scid;
8039 	} pdu;
8040 
8041 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8042 	pdu.req.mps = cpu_to_le16(chan->mps);
8043 	pdu.scid    = cpu_to_le16(chan->scid);
8044 
8045 	chan->ident = l2cap_get_ident(conn);
8046 
8047 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8048 		       sizeof(pdu), &pdu);
8049 }
8050 
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)8051 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8052 {
8053 	if (chan->imtu > mtu)
8054 		return -EINVAL;
8055 
8056 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8057 
8058 	chan->imtu = mtu;
8059 
8060 	l2cap_ecred_reconfigure(chan);
8061 
8062 	return 0;
8063 }
8064 
8065 /* ---- L2CAP interface with lower layer (HCI) ---- */
8066 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)8067 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8068 {
8069 	int exact = 0, lm1 = 0, lm2 = 0;
8070 	struct l2cap_chan *c;
8071 
8072 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8073 
8074 	/* Find listening sockets and check their link_mode */
8075 	read_lock(&chan_list_lock);
8076 	list_for_each_entry(c, &chan_list, global_l) {
8077 		if (c->state != BT_LISTEN)
8078 			continue;
8079 
8080 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8081 			lm1 |= HCI_LM_ACCEPT;
8082 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8083 				lm1 |= HCI_LM_MASTER;
8084 			exact++;
8085 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8086 			lm2 |= HCI_LM_ACCEPT;
8087 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8088 				lm2 |= HCI_LM_MASTER;
8089 		}
8090 	}
8091 	read_unlock(&chan_list_lock);
8092 
8093 	return exact ? lm1 : lm2;
8094 }
8095 
8096 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8097  * from an existing channel in the list or from the beginning of the
8098  * global list (by passing NULL as first parameter).
8099  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)8100 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8101 						  struct hci_conn *hcon)
8102 {
8103 	u8 src_type = bdaddr_src_type(hcon);
8104 
8105 	read_lock(&chan_list_lock);
8106 
8107 	if (c)
8108 		c = list_next_entry(c, global_l);
8109 	else
8110 		c = list_entry(chan_list.next, typeof(*c), global_l);
8111 
8112 	list_for_each_entry_from(c, &chan_list, global_l) {
8113 		if (c->chan_type != L2CAP_CHAN_FIXED)
8114 			continue;
8115 		if (c->state != BT_LISTEN)
8116 			continue;
8117 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8118 			continue;
8119 		if (src_type != c->src_type)
8120 			continue;
8121 
8122 		c = l2cap_chan_hold_unless_zero(c);
8123 		read_unlock(&chan_list_lock);
8124 		return c;
8125 	}
8126 
8127 	read_unlock(&chan_list_lock);
8128 
8129 	return NULL;
8130 }
8131 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)8132 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8133 {
8134 	struct hci_dev *hdev = hcon->hdev;
8135 	struct l2cap_conn *conn;
8136 	struct l2cap_chan *pchan;
8137 	u8 dst_type;
8138 
8139 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8140 		return;
8141 
8142 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8143 
8144 	if (status) {
8145 		l2cap_conn_del(hcon, bt_to_errno(status));
8146 		return;
8147 	}
8148 
8149 	conn = l2cap_conn_add(hcon);
8150 	if (!conn)
8151 		return;
8152 
8153 	dst_type = bdaddr_dst_type(hcon);
8154 
8155 	/* If device is blocked, do not create channels for it */
8156 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8157 		return;
8158 
8159 	/* Find fixed channels and notify them of the new connection. We
8160 	 * use multiple individual lookups, continuing each time where
8161 	 * we left off, because the list lock would prevent calling the
8162 	 * potentially sleeping l2cap_chan_lock() function.
8163 	 */
8164 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8165 	while (pchan) {
8166 		struct l2cap_chan *chan, *next;
8167 
8168 		/* Client fixed channels should override server ones */
8169 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8170 			goto next;
8171 
8172 		l2cap_chan_lock(pchan);
8173 		chan = pchan->ops->new_connection(pchan);
8174 		if (chan) {
8175 			bacpy(&chan->src, &hcon->src);
8176 			bacpy(&chan->dst, &hcon->dst);
8177 			chan->src_type = bdaddr_src_type(hcon);
8178 			chan->dst_type = dst_type;
8179 
8180 			__l2cap_chan_add(conn, chan);
8181 		}
8182 
8183 		l2cap_chan_unlock(pchan);
8184 next:
8185 		next = l2cap_global_fixed_chan(pchan, hcon);
8186 		l2cap_chan_put(pchan);
8187 		pchan = next;
8188 	}
8189 
8190 	l2cap_conn_ready(conn);
8191 }
8192 
l2cap_disconn_ind(struct hci_conn * hcon)8193 int l2cap_disconn_ind(struct hci_conn *hcon)
8194 {
8195 	struct l2cap_conn *conn = hcon->l2cap_data;
8196 
8197 	BT_DBG("hcon %p", hcon);
8198 
8199 	if (!conn)
8200 		return HCI_ERROR_REMOTE_USER_TERM;
8201 	return conn->disc_reason;
8202 }
8203 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)8204 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8205 {
8206 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8207 		return;
8208 
8209 	BT_DBG("hcon %p reason %d", hcon, reason);
8210 
8211 	l2cap_conn_del(hcon, bt_to_errno(reason));
8212 }
8213 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)8214 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8215 {
8216 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8217 		return;
8218 
8219 	if (encrypt == 0x00) {
8220 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8221 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8222 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8223 			   chan->sec_level == BT_SECURITY_FIPS)
8224 			l2cap_chan_close(chan, ECONNREFUSED);
8225 	} else {
8226 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8227 			__clear_chan_timer(chan);
8228 	}
8229 }
8230 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)8231 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8232 {
8233 	struct l2cap_conn *conn = hcon->l2cap_data;
8234 	struct l2cap_chan *chan;
8235 
8236 	if (!conn)
8237 		return;
8238 
8239 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8240 
8241 	mutex_lock(&conn->chan_lock);
8242 
8243 	list_for_each_entry(chan, &conn->chan_l, list) {
8244 		l2cap_chan_lock(chan);
8245 
8246 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8247 		       state_to_string(chan->state));
8248 
8249 		if (chan->scid == L2CAP_CID_A2MP) {
8250 			l2cap_chan_unlock(chan);
8251 			continue;
8252 		}
8253 
8254 		if (!status && encrypt)
8255 			chan->sec_level = hcon->sec_level;
8256 
8257 		if (!__l2cap_no_conn_pending(chan)) {
8258 			l2cap_chan_unlock(chan);
8259 			continue;
8260 		}
8261 
8262 		if (!status && (chan->state == BT_CONNECTED ||
8263 				chan->state == BT_CONFIG)) {
8264 			chan->ops->resume(chan);
8265 			l2cap_check_encryption(chan, encrypt);
8266 			l2cap_chan_unlock(chan);
8267 			continue;
8268 		}
8269 
8270 		if (chan->state == BT_CONNECT) {
8271 			if (!status && l2cap_check_enc_key_size(hcon))
8272 				l2cap_start_connection(chan);
8273 			else
8274 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8275 		} else if (chan->state == BT_CONNECT2 &&
8276 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8277 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8278 			struct l2cap_conn_rsp rsp;
8279 			__u16 res, stat;
8280 
8281 			if (!status && l2cap_check_enc_key_size(hcon)) {
8282 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8283 					res = L2CAP_CR_PEND;
8284 					stat = L2CAP_CS_AUTHOR_PEND;
8285 					chan->ops->defer(chan);
8286 				} else {
8287 					l2cap_state_change(chan, BT_CONFIG);
8288 					res = L2CAP_CR_SUCCESS;
8289 					stat = L2CAP_CS_NO_INFO;
8290 				}
8291 			} else {
8292 				l2cap_state_change(chan, BT_DISCONN);
8293 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8294 				res = L2CAP_CR_SEC_BLOCK;
8295 				stat = L2CAP_CS_NO_INFO;
8296 			}
8297 
8298 			rsp.scid   = cpu_to_le16(chan->dcid);
8299 			rsp.dcid   = cpu_to_le16(chan->scid);
8300 			rsp.result = cpu_to_le16(res);
8301 			rsp.status = cpu_to_le16(stat);
8302 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8303 				       sizeof(rsp), &rsp);
8304 
8305 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8306 			    res == L2CAP_CR_SUCCESS) {
8307 				char buf[128];
8308 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8309 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8310 					       L2CAP_CONF_REQ,
8311 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8312 					       buf);
8313 				chan->num_conf_req++;
8314 			}
8315 		}
8316 
8317 		l2cap_chan_unlock(chan);
8318 	}
8319 
8320 	mutex_unlock(&conn->chan_lock);
8321 }
8322 
8323 /* Append fragment into frame respecting the maximum len of rx_skb */
l2cap_recv_frag(struct l2cap_conn * conn,struct sk_buff * skb,u16 len)8324 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8325 			   u16 len)
8326 {
8327 	if (!conn->rx_skb) {
8328 		/* Allocate skb for the complete frame (with header) */
8329 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8330 		if (!conn->rx_skb)
8331 			return -ENOMEM;
8332 		/* Init rx_len */
8333 		conn->rx_len = len;
8334 	}
8335 
8336 	/* Copy as much as the rx_skb can hold */
8337 	len = min_t(u16, len, skb->len);
8338 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8339 	skb_pull(skb, len);
8340 	conn->rx_len -= len;
8341 
8342 	return len;
8343 }
8344 
l2cap_recv_len(struct l2cap_conn * conn,struct sk_buff * skb)8345 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8346 {
8347 	struct sk_buff *rx_skb;
8348 	int len;
8349 
8350 	/* Append just enough to complete the header */
8351 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8352 
8353 	/* If header could not be read just continue */
8354 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8355 		return len;
8356 
8357 	rx_skb = conn->rx_skb;
8358 	len = get_unaligned_le16(rx_skb->data);
8359 
8360 	/* Check if rx_skb has enough space to received all fragments */
8361 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8362 		/* Update expected len */
8363 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8364 		return L2CAP_LEN_SIZE;
8365 	}
8366 
8367 	/* Reset conn->rx_skb since it will need to be reallocated in order to
8368 	 * fit all fragments.
8369 	 */
8370 	conn->rx_skb = NULL;
8371 
8372 	/* Reallocates rx_skb using the exact expected length */
8373 	len = l2cap_recv_frag(conn, rx_skb,
8374 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8375 	kfree_skb(rx_skb);
8376 
8377 	return len;
8378 }
8379 
l2cap_recv_reset(struct l2cap_conn * conn)8380 static void l2cap_recv_reset(struct l2cap_conn *conn)
8381 {
8382 	kfree_skb(conn->rx_skb);
8383 	conn->rx_skb = NULL;
8384 	conn->rx_len = 0;
8385 }
8386 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)8387 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8388 {
8389 	struct l2cap_conn *conn = hcon->l2cap_data;
8390 	int len;
8391 
8392 	/* For AMP controller do not create l2cap conn */
8393 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8394 		goto drop;
8395 
8396 	if (!conn)
8397 		conn = l2cap_conn_add(hcon);
8398 
8399 	if (!conn)
8400 		goto drop;
8401 
8402 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8403 
8404 	switch (flags) {
8405 	case ACL_START:
8406 	case ACL_START_NO_FLUSH:
8407 	case ACL_COMPLETE:
8408 		if (conn->rx_skb) {
8409 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8410 			l2cap_recv_reset(conn);
8411 			l2cap_conn_unreliable(conn, ECOMM);
8412 		}
8413 
8414 		/* Start fragment may not contain the L2CAP length so just
8415 		 * copy the initial byte when that happens and use conn->mtu as
8416 		 * expected length.
8417 		 */
8418 		if (skb->len < L2CAP_LEN_SIZE) {
8419 			if (l2cap_recv_frag(conn, skb, conn->mtu) < 0)
8420 				goto drop;
8421 			return;
8422 		}
8423 
8424 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8425 
8426 		if (len == skb->len) {
8427 			/* Complete frame received */
8428 			l2cap_recv_frame(conn, skb);
8429 			return;
8430 		}
8431 
8432 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8433 
8434 		if (skb->len > len) {
8435 			BT_ERR("Frame is too long (len %u, expected len %d)",
8436 			       skb->len, len);
8437 			l2cap_conn_unreliable(conn, ECOMM);
8438 			goto drop;
8439 		}
8440 
8441 		/* Append fragment into frame (with header) */
8442 		if (l2cap_recv_frag(conn, skb, len) < 0)
8443 			goto drop;
8444 
8445 		break;
8446 
8447 	case ACL_CONT:
8448 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8449 
8450 		if (!conn->rx_skb) {
8451 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8452 			l2cap_conn_unreliable(conn, ECOMM);
8453 			goto drop;
8454 		}
8455 
8456 		/* Complete the L2CAP length if it has not been read */
8457 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8458 			if (l2cap_recv_len(conn, skb) < 0) {
8459 				l2cap_conn_unreliable(conn, ECOMM);
8460 				goto drop;
8461 			}
8462 
8463 			/* Header still could not be read just continue */
8464 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8465 				return;
8466 		}
8467 
8468 		if (skb->len > conn->rx_len) {
8469 			BT_ERR("Fragment is too long (len %u, expected %u)",
8470 			       skb->len, conn->rx_len);
8471 			l2cap_recv_reset(conn);
8472 			l2cap_conn_unreliable(conn, ECOMM);
8473 			goto drop;
8474 		}
8475 
8476 		/* Append fragment into frame (with header) */
8477 		l2cap_recv_frag(conn, skb, skb->len);
8478 
8479 		if (!conn->rx_len) {
8480 			/* Complete frame received. l2cap_recv_frame
8481 			 * takes ownership of the skb so set the global
8482 			 * rx_skb pointer to NULL first.
8483 			 */
8484 			struct sk_buff *rx_skb = conn->rx_skb;
8485 			conn->rx_skb = NULL;
8486 			l2cap_recv_frame(conn, rx_skb);
8487 		}
8488 		break;
8489 	}
8490 
8491 drop:
8492 	kfree_skb(skb);
8493 }
8494 
8495 static struct hci_cb l2cap_cb = {
8496 	.name		= "L2CAP",
8497 	.connect_cfm	= l2cap_connect_cfm,
8498 	.disconn_cfm	= l2cap_disconn_cfm,
8499 	.security_cfm	= l2cap_security_cfm,
8500 };
8501 
l2cap_debugfs_show(struct seq_file * f,void * p)8502 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8503 {
8504 	struct l2cap_chan *c;
8505 
8506 	read_lock(&chan_list_lock);
8507 
8508 	list_for_each_entry(c, &chan_list, global_l) {
8509 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8510 			   &c->src, c->src_type, &c->dst, c->dst_type,
8511 			   c->state, __le16_to_cpu(c->psm),
8512 			   c->scid, c->dcid, c->imtu, c->omtu,
8513 			   c->sec_level, c->mode);
8514 	}
8515 
8516 	read_unlock(&chan_list_lock);
8517 
8518 	return 0;
8519 }
8520 
8521 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8522 
8523 static struct dentry *l2cap_debugfs;
8524 
l2cap_init(void)8525 int __init l2cap_init(void)
8526 {
8527 	int err;
8528 
8529 	err = l2cap_init_sockets();
8530 	if (err < 0)
8531 		return err;
8532 
8533 	hci_register_cb(&l2cap_cb);
8534 
8535 	if (IS_ERR_OR_NULL(bt_debugfs))
8536 		return 0;
8537 
8538 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8539 					    NULL, &l2cap_debugfs_fops);
8540 
8541 	return 0;
8542 }
8543 
l2cap_exit(void)8544 void l2cap_exit(void)
8545 {
8546 	debugfs_remove(l2cap_debugfs);
8547 	hci_unregister_cb(&l2cap_cb);
8548 	l2cap_cleanup_sockets();
8549 }
8550 
8551 module_param(disable_ertm, bool, 0644);
8552 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8553 
8554 module_param(enable_ecred, bool, 0644);
8555 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8556