1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57
58 int disable_ertm;
59
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
62
63 static struct workqueue_struct *_busy_wq;
64
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
67 };
68
69 static void l2cap_busy_work(struct work_struct *work);
70
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73
74 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
75
76 /* ---- L2CAP channels ---- */
__l2cap_get_chan_by_dcid(struct l2cap_chan_list * l,u16 cid)77 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
78 {
79 struct sock *s;
80 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
81 if (l2cap_pi(s)->dcid == cid)
82 break;
83 }
84 return s;
85 }
86
__l2cap_get_chan_by_scid(struct l2cap_chan_list * l,u16 cid)87 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
88 {
89 struct sock *s;
90 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
91 if (l2cap_pi(s)->scid == cid)
92 break;
93 }
94 return s;
95 }
96
97 /* Find channel with given SCID.
98 * Returns locked socket */
l2cap_get_chan_by_scid(struct l2cap_chan_list * l,u16 cid)99 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
100 {
101 struct sock *s;
102 read_lock(&l->lock);
103 s = __l2cap_get_chan_by_scid(l, cid);
104 if (s)
105 bh_lock_sock(s);
106 read_unlock(&l->lock);
107 return s;
108 }
109
__l2cap_get_chan_by_ident(struct l2cap_chan_list * l,u8 ident)110 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
111 {
112 struct sock *s;
113 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
114 if (l2cap_pi(s)->ident == ident)
115 break;
116 }
117 return s;
118 }
119
l2cap_get_chan_by_ident(struct l2cap_chan_list * l,u8 ident)120 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
121 {
122 struct sock *s;
123 read_lock(&l->lock);
124 s = __l2cap_get_chan_by_ident(l, ident);
125 if (s)
126 bh_lock_sock(s);
127 read_unlock(&l->lock);
128 return s;
129 }
130
l2cap_alloc_cid(struct l2cap_chan_list * l)131 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
132 {
133 u16 cid = L2CAP_CID_DYN_START;
134
135 for (; cid < L2CAP_CID_DYN_END; cid++) {
136 if (!__l2cap_get_chan_by_scid(l, cid))
137 return cid;
138 }
139
140 return 0;
141 }
142
__l2cap_chan_link(struct l2cap_chan_list * l,struct sock * sk)143 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
144 {
145 sock_hold(sk);
146
147 if (l->head)
148 l2cap_pi(l->head)->prev_c = sk;
149
150 l2cap_pi(sk)->next_c = l->head;
151 l2cap_pi(sk)->prev_c = NULL;
152 l->head = sk;
153 }
154
l2cap_chan_unlink(struct l2cap_chan_list * l,struct sock * sk)155 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
156 {
157 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
158
159 write_lock_bh(&l->lock);
160 if (sk == l->head)
161 l->head = next;
162
163 if (next)
164 l2cap_pi(next)->prev_c = prev;
165 if (prev)
166 l2cap_pi(prev)->next_c = next;
167 write_unlock_bh(&l->lock);
168
169 __sock_put(sk);
170 }
171
__l2cap_chan_add(struct l2cap_conn * conn,struct sock * sk,struct sock * parent)172 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
173 {
174 struct l2cap_chan_list *l = &conn->chan_list;
175
176 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
177 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
178
179 conn->disc_reason = 0x13;
180
181 l2cap_pi(sk)->conn = conn;
182
183 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
184 if (conn->hcon->type == LE_LINK) {
185 /* LE connection */
186 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
187 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
188 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
189 } else {
190 /* Alloc CID for connection-oriented socket */
191 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
192 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
193 }
194 } else if (sk->sk_type == SOCK_DGRAM) {
195 /* Connectionless socket */
196 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
197 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
198 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
199 } else {
200 /* Raw socket can send/recv signalling messages only */
201 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
202 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
203 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
204 }
205
206 __l2cap_chan_link(l, sk);
207
208 if (parent)
209 bt_accept_enqueue(parent, sk);
210 }
211
212 /* Delete channel.
213 * Must be called on the locked socket. */
l2cap_chan_del(struct sock * sk,int err)214 void l2cap_chan_del(struct sock *sk, int err)
215 {
216 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
217 struct sock *parent = bt_sk(sk)->parent;
218
219 l2cap_sock_clear_timer(sk);
220
221 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
222
223 if (conn) {
224 /* Unlink from channel list */
225 l2cap_chan_unlink(&conn->chan_list, sk);
226 l2cap_pi(sk)->conn = NULL;
227 hci_conn_put(conn->hcon);
228 }
229
230 sk->sk_state = BT_CLOSED;
231 sock_set_flag(sk, SOCK_ZAPPED);
232
233 if (err)
234 sk->sk_err = err;
235
236 if (parent) {
237 bt_accept_unlink(sk);
238 parent->sk_data_ready(parent, 0);
239 } else
240 sk->sk_state_change(sk);
241
242 skb_queue_purge(TX_QUEUE(sk));
243
244 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
245 struct srej_list *l, *tmp;
246
247 del_timer(&l2cap_pi(sk)->retrans_timer);
248 del_timer(&l2cap_pi(sk)->monitor_timer);
249 del_timer(&l2cap_pi(sk)->ack_timer);
250
251 skb_queue_purge(SREJ_QUEUE(sk));
252 skb_queue_purge(BUSY_QUEUE(sk));
253
254 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
255 list_del(&l->list);
256 kfree(l);
257 }
258 }
259 }
260
l2cap_get_auth_type(struct sock * sk)261 static inline u8 l2cap_get_auth_type(struct sock *sk)
262 {
263 if (sk->sk_type == SOCK_RAW) {
264 switch (l2cap_pi(sk)->sec_level) {
265 case BT_SECURITY_HIGH:
266 return HCI_AT_DEDICATED_BONDING_MITM;
267 case BT_SECURITY_MEDIUM:
268 return HCI_AT_DEDICATED_BONDING;
269 default:
270 return HCI_AT_NO_BONDING;
271 }
272 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
273 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
274 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
275
276 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
277 return HCI_AT_NO_BONDING_MITM;
278 else
279 return HCI_AT_NO_BONDING;
280 } else {
281 switch (l2cap_pi(sk)->sec_level) {
282 case BT_SECURITY_HIGH:
283 return HCI_AT_GENERAL_BONDING_MITM;
284 case BT_SECURITY_MEDIUM:
285 return HCI_AT_GENERAL_BONDING;
286 default:
287 return HCI_AT_NO_BONDING;
288 }
289 }
290 }
291
292 /* Service level security */
l2cap_check_security(struct sock * sk)293 static inline int l2cap_check_security(struct sock *sk)
294 {
295 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
296 __u8 auth_type;
297
298 auth_type = l2cap_get_auth_type(sk);
299
300 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
301 auth_type);
302 }
303
l2cap_get_ident(struct l2cap_conn * conn)304 u8 l2cap_get_ident(struct l2cap_conn *conn)
305 {
306 u8 id;
307
308 /* Get next available identificator.
309 * 1 - 128 are used by kernel.
310 * 129 - 199 are reserved.
311 * 200 - 254 are used by utilities like l2ping, etc.
312 */
313
314 spin_lock_bh(&conn->lock);
315
316 if (++conn->tx_ident > 128)
317 conn->tx_ident = 1;
318
319 id = conn->tx_ident;
320
321 spin_unlock_bh(&conn->lock);
322
323 return id;
324 }
325
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)326 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
327 {
328 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
329 u8 flags;
330
331 BT_DBG("code 0x%2.2x", code);
332
333 if (!skb)
334 return;
335
336 if (lmp_no_flush_capable(conn->hcon->hdev))
337 flags = ACL_START_NO_FLUSH;
338 else
339 flags = ACL_START;
340
341 hci_send_acl(conn->hcon, skb, flags);
342 }
343
l2cap_send_sframe(struct l2cap_pinfo * pi,u16 control)344 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
345 {
346 struct sk_buff *skb;
347 struct l2cap_hdr *lh;
348 struct l2cap_conn *conn = pi->conn;
349 struct sock *sk = (struct sock *)pi;
350 int count, hlen = L2CAP_HDR_SIZE + 2;
351 u8 flags;
352
353 if (sk->sk_state != BT_CONNECTED)
354 return;
355
356 if (pi->fcs == L2CAP_FCS_CRC16)
357 hlen += 2;
358
359 BT_DBG("pi %p, control 0x%2.2x", pi, control);
360
361 count = min_t(unsigned int, conn->mtu, hlen);
362 control |= L2CAP_CTRL_FRAME_TYPE;
363
364 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
365 control |= L2CAP_CTRL_FINAL;
366 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
367 }
368
369 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
370 control |= L2CAP_CTRL_POLL;
371 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
372 }
373
374 skb = bt_skb_alloc(count, GFP_ATOMIC);
375 if (!skb)
376 return;
377
378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 lh->cid = cpu_to_le16(pi->dcid);
381 put_unaligned_le16(control, skb_put(skb, 2));
382
383 if (pi->fcs == L2CAP_FCS_CRC16) {
384 u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 put_unaligned_le16(fcs, skb_put(skb, 2));
386 }
387
388 if (lmp_no_flush_capable(conn->hcon->hdev))
389 flags = ACL_START_NO_FLUSH;
390 else
391 flags = ACL_START;
392
393 hci_send_acl(pi->conn->hcon, skb, flags);
394 }
395
l2cap_send_rr_or_rnr(struct l2cap_pinfo * pi,u16 control)396 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
397 {
398 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
399 control |= L2CAP_SUPER_RCV_NOT_READY;
400 pi->conn_state |= L2CAP_CONN_RNR_SENT;
401 } else
402 control |= L2CAP_SUPER_RCV_READY;
403
404 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
405
406 l2cap_send_sframe(pi, control);
407 }
408
__l2cap_no_conn_pending(struct sock * sk)409 static inline int __l2cap_no_conn_pending(struct sock *sk)
410 {
411 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
412 }
413
l2cap_do_start(struct sock * sk)414 static void l2cap_do_start(struct sock *sk)
415 {
416 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
417
418 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
419 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
420 return;
421
422 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
423 struct l2cap_conn_req req;
424 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
425 req.psm = l2cap_pi(sk)->psm;
426
427 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
428 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
429
430 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
431 L2CAP_CONN_REQ, sizeof(req), &req);
432 }
433 } else {
434 struct l2cap_info_req req;
435 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
436
437 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
438 conn->info_ident = l2cap_get_ident(conn);
439
440 mod_timer(&conn->info_timer, jiffies +
441 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
442
443 l2cap_send_cmd(conn, conn->info_ident,
444 L2CAP_INFO_REQ, sizeof(req), &req);
445 }
446 }
447
l2cap_mode_supported(__u8 mode,__u32 feat_mask)448 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
449 {
450 u32 local_feat_mask = l2cap_feat_mask;
451 if (!disable_ertm)
452 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
453
454 switch (mode) {
455 case L2CAP_MODE_ERTM:
456 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
457 case L2CAP_MODE_STREAMING:
458 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
459 default:
460 return 0x00;
461 }
462 }
463
l2cap_send_disconn_req(struct l2cap_conn * conn,struct sock * sk,int err)464 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
465 {
466 struct l2cap_disconn_req req;
467
468 if (!conn)
469 return;
470
471 skb_queue_purge(TX_QUEUE(sk));
472
473 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
474 del_timer(&l2cap_pi(sk)->retrans_timer);
475 del_timer(&l2cap_pi(sk)->monitor_timer);
476 del_timer(&l2cap_pi(sk)->ack_timer);
477 }
478
479 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
480 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
481 l2cap_send_cmd(conn, l2cap_get_ident(conn),
482 L2CAP_DISCONN_REQ, sizeof(req), &req);
483
484 sk->sk_state = BT_DISCONN;
485 sk->sk_err = err;
486 }
487
488 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)489 static void l2cap_conn_start(struct l2cap_conn *conn)
490 {
491 struct l2cap_chan_list *l = &conn->chan_list;
492 struct sock_del_list del, *tmp1, *tmp2;
493 struct sock *sk;
494
495 BT_DBG("conn %p", conn);
496
497 INIT_LIST_HEAD(&del.list);
498
499 read_lock(&l->lock);
500
501 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
502 bh_lock_sock(sk);
503
504 if (sk->sk_type != SOCK_SEQPACKET &&
505 sk->sk_type != SOCK_STREAM) {
506 bh_unlock_sock(sk);
507 continue;
508 }
509
510 if (sk->sk_state == BT_CONNECT) {
511 struct l2cap_conn_req req;
512
513 if (!l2cap_check_security(sk) ||
514 !__l2cap_no_conn_pending(sk)) {
515 bh_unlock_sock(sk);
516 continue;
517 }
518
519 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
520 conn->feat_mask)
521 && l2cap_pi(sk)->conf_state &
522 L2CAP_CONF_STATE2_DEVICE) {
523 tmp1 = kzalloc(sizeof(struct sock_del_list),
524 GFP_ATOMIC);
525 tmp1->sk = sk;
526 list_add_tail(&tmp1->list, &del.list);
527 bh_unlock_sock(sk);
528 continue;
529 }
530
531 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
532 req.psm = l2cap_pi(sk)->psm;
533
534 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
535 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
536
537 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
538 L2CAP_CONN_REQ, sizeof(req), &req);
539
540 } else if (sk->sk_state == BT_CONNECT2) {
541 struct l2cap_conn_rsp rsp;
542 char buf[128];
543 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
544 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
545
546 if (l2cap_check_security(sk)) {
547 if (bt_sk(sk)->defer_setup) {
548 struct sock *parent = bt_sk(sk)->parent;
549 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
550 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
551 parent->sk_data_ready(parent, 0);
552
553 } else {
554 sk->sk_state = BT_CONFIG;
555 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
556 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
557 }
558 } else {
559 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
560 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
561 }
562
563 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
564 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
565
566 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
567 rsp.result != L2CAP_CR_SUCCESS) {
568 bh_unlock_sock(sk);
569 continue;
570 }
571
572 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
573 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
574 l2cap_build_conf_req(sk, buf), buf);
575 l2cap_pi(sk)->num_conf_req++;
576 }
577
578 bh_unlock_sock(sk);
579 }
580
581 read_unlock(&l->lock);
582
583 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
584 bh_lock_sock(tmp1->sk);
585 __l2cap_sock_close(tmp1->sk, ECONNRESET);
586 bh_unlock_sock(tmp1->sk);
587 list_del(&tmp1->list);
588 kfree(tmp1);
589 }
590 }
591
592 /* Find socket with cid and source bdaddr.
593 * Returns closest match, locked.
594 */
l2cap_get_sock_by_scid(int state,__le16 cid,bdaddr_t * src)595 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
596 {
597 struct sock *s, *sk = NULL, *sk1 = NULL;
598 struct hlist_node *node;
599
600 read_lock(&l2cap_sk_list.lock);
601
602 sk_for_each(sk, node, &l2cap_sk_list.head) {
603 if (state && sk->sk_state != state)
604 continue;
605
606 if (l2cap_pi(sk)->scid == cid) {
607 /* Exact match. */
608 if (!bacmp(&bt_sk(sk)->src, src))
609 break;
610
611 /* Closest match */
612 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
613 sk1 = sk;
614 }
615 }
616 s = node ? sk : sk1;
617 if (s)
618 bh_lock_sock(s);
619 read_unlock(&l2cap_sk_list.lock);
620
621 return s;
622 }
623
l2cap_le_conn_ready(struct l2cap_conn * conn)624 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
625 {
626 struct l2cap_chan_list *list = &conn->chan_list;
627 struct sock *parent, *uninitialized_var(sk);
628
629 BT_DBG("");
630
631 /* Check if we have socket listening on cid */
632 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
633 conn->src);
634 if (!parent)
635 return;
636
637 /* Check for backlog size */
638 if (sk_acceptq_is_full(parent)) {
639 BT_DBG("backlog full %d", parent->sk_ack_backlog);
640 goto clean;
641 }
642
643 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
644 if (!sk)
645 goto clean;
646
647 write_lock_bh(&list->lock);
648
649 hci_conn_hold(conn->hcon);
650
651 l2cap_sock_init(sk, parent);
652 bacpy(&bt_sk(sk)->src, conn->src);
653 bacpy(&bt_sk(sk)->dst, conn->dst);
654
655 __l2cap_chan_add(conn, sk, parent);
656
657 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
658
659 sk->sk_state = BT_CONNECTED;
660 parent->sk_data_ready(parent, 0);
661
662 write_unlock_bh(&list->lock);
663
664 clean:
665 bh_unlock_sock(parent);
666 }
667
l2cap_conn_ready(struct l2cap_conn * conn)668 static void l2cap_conn_ready(struct l2cap_conn *conn)
669 {
670 struct l2cap_chan_list *l = &conn->chan_list;
671 struct sock *sk;
672
673 BT_DBG("conn %p", conn);
674
675 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
676 l2cap_le_conn_ready(conn);
677
678 read_lock(&l->lock);
679
680 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
681 bh_lock_sock(sk);
682
683 if (conn->hcon->type == LE_LINK) {
684 l2cap_sock_clear_timer(sk);
685 sk->sk_state = BT_CONNECTED;
686 sk->sk_state_change(sk);
687 }
688
689 if (sk->sk_type != SOCK_SEQPACKET &&
690 sk->sk_type != SOCK_STREAM) {
691 l2cap_sock_clear_timer(sk);
692 sk->sk_state = BT_CONNECTED;
693 sk->sk_state_change(sk);
694 } else if (sk->sk_state == BT_CONNECT)
695 l2cap_do_start(sk);
696
697 bh_unlock_sock(sk);
698 }
699
700 read_unlock(&l->lock);
701 }
702
703 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)704 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
705 {
706 struct l2cap_chan_list *l = &conn->chan_list;
707 struct sock *sk;
708
709 BT_DBG("conn %p", conn);
710
711 read_lock(&l->lock);
712
713 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
714 if (l2cap_pi(sk)->force_reliable)
715 sk->sk_err = err;
716 }
717
718 read_unlock(&l->lock);
719 }
720
l2cap_info_timeout(unsigned long arg)721 static void l2cap_info_timeout(unsigned long arg)
722 {
723 struct l2cap_conn *conn = (void *) arg;
724
725 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
726 conn->info_ident = 0;
727
728 l2cap_conn_start(conn);
729 }
730
l2cap_conn_add(struct hci_conn * hcon,u8 status)731 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
732 {
733 struct l2cap_conn *conn = hcon->l2cap_data;
734
735 if (conn || status)
736 return conn;
737
738 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
739 if (!conn)
740 return NULL;
741
742 hcon->l2cap_data = conn;
743 conn->hcon = hcon;
744
745 BT_DBG("hcon %p conn %p", hcon, conn);
746
747 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
748 conn->mtu = hcon->hdev->le_mtu;
749 else
750 conn->mtu = hcon->hdev->acl_mtu;
751
752 conn->src = &hcon->hdev->bdaddr;
753 conn->dst = &hcon->dst;
754
755 conn->feat_mask = 0;
756
757 spin_lock_init(&conn->lock);
758 rwlock_init(&conn->chan_list.lock);
759
760 if (hcon->type != LE_LINK)
761 setup_timer(&conn->info_timer, l2cap_info_timeout,
762 (unsigned long) conn);
763
764 conn->disc_reason = 0x13;
765
766 return conn;
767 }
768
l2cap_conn_del(struct hci_conn * hcon,int err)769 static void l2cap_conn_del(struct hci_conn *hcon, int err)
770 {
771 struct l2cap_conn *conn = hcon->l2cap_data;
772 struct sock *sk;
773
774 if (!conn)
775 return;
776
777 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
778
779 kfree_skb(conn->rx_skb);
780
781 /* Kill channels */
782 while ((sk = conn->chan_list.head)) {
783 bh_lock_sock(sk);
784 l2cap_chan_del(sk, err);
785 bh_unlock_sock(sk);
786 l2cap_sock_kill(sk);
787 }
788
789 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
790 del_timer_sync(&conn->info_timer);
791
792 hcon->l2cap_data = NULL;
793 kfree(conn);
794 }
795
l2cap_chan_add(struct l2cap_conn * conn,struct sock * sk,struct sock * parent)796 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
797 {
798 struct l2cap_chan_list *l = &conn->chan_list;
799 write_lock_bh(&l->lock);
800 __l2cap_chan_add(conn, sk, parent);
801 write_unlock_bh(&l->lock);
802 }
803
804 /* ---- Socket interface ---- */
805
806 /* Find socket with psm and source bdaddr.
807 * Returns closest match.
808 */
l2cap_get_sock_by_psm(int state,__le16 psm,bdaddr_t * src)809 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
810 {
811 struct sock *sk = NULL, *sk1 = NULL;
812 struct hlist_node *node;
813
814 read_lock(&l2cap_sk_list.lock);
815
816 sk_for_each(sk, node, &l2cap_sk_list.head) {
817 if (state && sk->sk_state != state)
818 continue;
819
820 if (l2cap_pi(sk)->psm == psm) {
821 /* Exact match. */
822 if (!bacmp(&bt_sk(sk)->src, src))
823 break;
824
825 /* Closest match */
826 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
827 sk1 = sk;
828 }
829 }
830
831 read_unlock(&l2cap_sk_list.lock);
832
833 return node ? sk : sk1;
834 }
835
l2cap_do_connect(struct sock * sk)836 int l2cap_do_connect(struct sock *sk)
837 {
838 bdaddr_t *src = &bt_sk(sk)->src;
839 bdaddr_t *dst = &bt_sk(sk)->dst;
840 struct l2cap_conn *conn;
841 struct hci_conn *hcon;
842 struct hci_dev *hdev;
843 __u8 auth_type;
844 int err;
845
846 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
847 l2cap_pi(sk)->psm);
848
849 hdev = hci_get_route(dst, src);
850 if (!hdev)
851 return -EHOSTUNREACH;
852
853 hci_dev_lock_bh(hdev);
854
855 auth_type = l2cap_get_auth_type(sk);
856
857 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
858 hcon = hci_connect(hdev, LE_LINK, dst,
859 l2cap_pi(sk)->sec_level, auth_type);
860 else
861 hcon = hci_connect(hdev, ACL_LINK, dst,
862 l2cap_pi(sk)->sec_level, auth_type);
863
864 if (IS_ERR(hcon)) {
865 err = PTR_ERR(hcon);
866 goto done;
867 }
868
869 conn = l2cap_conn_add(hcon, 0);
870 if (!conn) {
871 hci_conn_put(hcon);
872 err = -ENOMEM;
873 goto done;
874 }
875
876 /* Update source addr of the socket */
877 bacpy(src, conn->src);
878
879 l2cap_chan_add(conn, sk, NULL);
880
881 sk->sk_state = BT_CONNECT;
882 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
883
884 if (hcon->state == BT_CONNECTED) {
885 if (sk->sk_type != SOCK_SEQPACKET &&
886 sk->sk_type != SOCK_STREAM) {
887 l2cap_sock_clear_timer(sk);
888 if (l2cap_check_security(sk))
889 sk->sk_state = BT_CONNECTED;
890 } else
891 l2cap_do_start(sk);
892 }
893
894 err = 0;
895
896 done:
897 hci_dev_unlock_bh(hdev);
898 hci_dev_put(hdev);
899 return err;
900 }
901
__l2cap_wait_ack(struct sock * sk)902 int __l2cap_wait_ack(struct sock *sk)
903 {
904 DECLARE_WAITQUEUE(wait, current);
905 int err = 0;
906 int timeo = HZ/5;
907
908 add_wait_queue(sk_sleep(sk), &wait);
909 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
910 set_current_state(TASK_INTERRUPTIBLE);
911
912 if (!timeo)
913 timeo = HZ/5;
914
915 if (signal_pending(current)) {
916 err = sock_intr_errno(timeo);
917 break;
918 }
919
920 release_sock(sk);
921 timeo = schedule_timeout(timeo);
922 lock_sock(sk);
923
924 err = sock_error(sk);
925 if (err)
926 break;
927 }
928 set_current_state(TASK_RUNNING);
929 remove_wait_queue(sk_sleep(sk), &wait);
930 return err;
931 }
932
l2cap_monitor_timeout(unsigned long arg)933 static void l2cap_monitor_timeout(unsigned long arg)
934 {
935 struct sock *sk = (void *) arg;
936
937 BT_DBG("sk %p", sk);
938
939 bh_lock_sock(sk);
940 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
941 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
942 bh_unlock_sock(sk);
943 return;
944 }
945
946 l2cap_pi(sk)->retry_count++;
947 __mod_monitor_timer();
948
949 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
950 bh_unlock_sock(sk);
951 }
952
l2cap_retrans_timeout(unsigned long arg)953 static void l2cap_retrans_timeout(unsigned long arg)
954 {
955 struct sock *sk = (void *) arg;
956
957 BT_DBG("sk %p", sk);
958
959 bh_lock_sock(sk);
960 l2cap_pi(sk)->retry_count = 1;
961 __mod_monitor_timer();
962
963 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
964
965 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
966 bh_unlock_sock(sk);
967 }
968
l2cap_drop_acked_frames(struct sock * sk)969 static void l2cap_drop_acked_frames(struct sock *sk)
970 {
971 struct sk_buff *skb;
972
973 while ((skb = skb_peek(TX_QUEUE(sk))) &&
974 l2cap_pi(sk)->unacked_frames) {
975 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
976 break;
977
978 skb = skb_dequeue(TX_QUEUE(sk));
979 kfree_skb(skb);
980
981 l2cap_pi(sk)->unacked_frames--;
982 }
983
984 if (!l2cap_pi(sk)->unacked_frames)
985 del_timer(&l2cap_pi(sk)->retrans_timer);
986 }
987
l2cap_do_send(struct sock * sk,struct sk_buff * skb)988 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
989 {
990 struct l2cap_pinfo *pi = l2cap_pi(sk);
991 struct hci_conn *hcon = pi->conn->hcon;
992 u16 flags;
993
994 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
995
996 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
997 flags = ACL_START_NO_FLUSH;
998 else
999 flags = ACL_START;
1000
1001 hci_send_acl(hcon, skb, flags);
1002 }
1003
l2cap_streaming_send(struct sock * sk)1004 void l2cap_streaming_send(struct sock *sk)
1005 {
1006 struct sk_buff *skb;
1007 struct l2cap_pinfo *pi = l2cap_pi(sk);
1008 u16 control, fcs;
1009
1010 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1011 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1012 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1013 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1014
1015 if (pi->fcs == L2CAP_FCS_CRC16) {
1016 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1017 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1018 }
1019
1020 l2cap_do_send(sk, skb);
1021
1022 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1023 }
1024 }
1025
l2cap_retransmit_one_frame(struct sock * sk,u8 tx_seq)1026 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1027 {
1028 struct l2cap_pinfo *pi = l2cap_pi(sk);
1029 struct sk_buff *skb, *tx_skb;
1030 u16 control, fcs;
1031
1032 skb = skb_peek(TX_QUEUE(sk));
1033 if (!skb)
1034 return;
1035
1036 do {
1037 if (bt_cb(skb)->tx_seq == tx_seq)
1038 break;
1039
1040 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1041 return;
1042
1043 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1044
1045 if (pi->remote_max_tx &&
1046 bt_cb(skb)->retries == pi->remote_max_tx) {
1047 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1048 return;
1049 }
1050
1051 tx_skb = skb_clone(skb, GFP_ATOMIC);
1052 bt_cb(skb)->retries++;
1053 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1054 control &= L2CAP_CTRL_SAR;
1055
1056 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1057 control |= L2CAP_CTRL_FINAL;
1058 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1059 }
1060
1061 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1062 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1063
1064 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1065
1066 if (pi->fcs == L2CAP_FCS_CRC16) {
1067 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1068 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1069 }
1070
1071 l2cap_do_send(sk, tx_skb);
1072 }
1073
l2cap_ertm_send(struct sock * sk)1074 int l2cap_ertm_send(struct sock *sk)
1075 {
1076 struct sk_buff *skb, *tx_skb;
1077 struct l2cap_pinfo *pi = l2cap_pi(sk);
1078 u16 control, fcs;
1079 int nsent = 0;
1080
1081 if (sk->sk_state != BT_CONNECTED)
1082 return -ENOTCONN;
1083
1084 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1085
1086 if (pi->remote_max_tx &&
1087 bt_cb(skb)->retries == pi->remote_max_tx) {
1088 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1089 break;
1090 }
1091
1092 tx_skb = skb_clone(skb, GFP_ATOMIC);
1093
1094 bt_cb(skb)->retries++;
1095
1096 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1097 control &= L2CAP_CTRL_SAR;
1098
1099 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1100 control |= L2CAP_CTRL_FINAL;
1101 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1102 }
1103 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1104 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1105 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1106
1107
1108 if (pi->fcs == L2CAP_FCS_CRC16) {
1109 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1110 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1111 }
1112
1113 l2cap_do_send(sk, tx_skb);
1114
1115 __mod_retrans_timer();
1116
1117 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1118 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1119
1120 if (bt_cb(skb)->retries == 1)
1121 pi->unacked_frames++;
1122
1123 pi->frames_sent++;
1124
1125 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1126 sk->sk_send_head = NULL;
1127 else
1128 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1129
1130 nsent++;
1131 }
1132
1133 return nsent;
1134 }
1135
l2cap_retransmit_frames(struct sock * sk)1136 static int l2cap_retransmit_frames(struct sock *sk)
1137 {
1138 struct l2cap_pinfo *pi = l2cap_pi(sk);
1139 int ret;
1140
1141 if (!skb_queue_empty(TX_QUEUE(sk)))
1142 sk->sk_send_head = TX_QUEUE(sk)->next;
1143
1144 pi->next_tx_seq = pi->expected_ack_seq;
1145 ret = l2cap_ertm_send(sk);
1146 return ret;
1147 }
1148
l2cap_send_ack(struct l2cap_pinfo * pi)1149 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1150 {
1151 struct sock *sk = (struct sock *)pi;
1152 u16 control = 0;
1153
1154 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1155
1156 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1157 control |= L2CAP_SUPER_RCV_NOT_READY;
1158 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1159 l2cap_send_sframe(pi, control);
1160 return;
1161 }
1162
1163 if (l2cap_ertm_send(sk) > 0)
1164 return;
1165
1166 control |= L2CAP_SUPER_RCV_READY;
1167 l2cap_send_sframe(pi, control);
1168 }
1169
l2cap_send_srejtail(struct sock * sk)1170 static void l2cap_send_srejtail(struct sock *sk)
1171 {
1172 struct srej_list *tail;
1173 u16 control;
1174
1175 control = L2CAP_SUPER_SELECT_REJECT;
1176 control |= L2CAP_CTRL_FINAL;
1177
1178 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1179 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1180
1181 l2cap_send_sframe(l2cap_pi(sk), control);
1182 }
1183
l2cap_skbuff_fromiovec(struct sock * sk,struct msghdr * msg,int len,int count,struct sk_buff * skb)1184 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1185 {
1186 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1187 struct sk_buff **frag;
1188 int err, sent = 0;
1189
1190 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1191 return -EFAULT;
1192
1193 sent += count;
1194 len -= count;
1195
1196 /* Continuation fragments (no L2CAP header) */
1197 frag = &skb_shinfo(skb)->frag_list;
1198 while (len) {
1199 count = min_t(unsigned int, conn->mtu, len);
1200
1201 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1202 if (!*frag)
1203 return err;
1204 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1205 return -EFAULT;
1206
1207 sent += count;
1208 len -= count;
1209
1210 frag = &(*frag)->next;
1211 }
1212
1213 return sent;
1214 }
1215
l2cap_create_connless_pdu(struct sock * sk,struct msghdr * msg,size_t len)1216 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1217 {
1218 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1219 struct sk_buff *skb;
1220 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1221 struct l2cap_hdr *lh;
1222
1223 BT_DBG("sk %p len %d", sk, (int)len);
1224
1225 count = min_t(unsigned int, (conn->mtu - hlen), len);
1226 skb = bt_skb_send_alloc(sk, count + hlen,
1227 msg->msg_flags & MSG_DONTWAIT, &err);
1228 if (!skb)
1229 return ERR_PTR(err);
1230
1231 /* Create L2CAP header */
1232 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1233 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1234 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1235 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1236
1237 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1238 if (unlikely(err < 0)) {
1239 kfree_skb(skb);
1240 return ERR_PTR(err);
1241 }
1242 return skb;
1243 }
1244
l2cap_create_basic_pdu(struct sock * sk,struct msghdr * msg,size_t len)1245 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1246 {
1247 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1248 struct sk_buff *skb;
1249 int err, count, hlen = L2CAP_HDR_SIZE;
1250 struct l2cap_hdr *lh;
1251
1252 BT_DBG("sk %p len %d", sk, (int)len);
1253
1254 count = min_t(unsigned int, (conn->mtu - hlen), len);
1255 skb = bt_skb_send_alloc(sk, count + hlen,
1256 msg->msg_flags & MSG_DONTWAIT, &err);
1257 if (!skb)
1258 return ERR_PTR(err);
1259
1260 /* Create L2CAP header */
1261 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1262 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1263 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1264
1265 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1266 if (unlikely(err < 0)) {
1267 kfree_skb(skb);
1268 return ERR_PTR(err);
1269 }
1270 return skb;
1271 }
1272
l2cap_create_iframe_pdu(struct sock * sk,struct msghdr * msg,size_t len,u16 control,u16 sdulen)1273 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1274 {
1275 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1276 struct sk_buff *skb;
1277 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1278 struct l2cap_hdr *lh;
1279
1280 BT_DBG("sk %p len %d", sk, (int)len);
1281
1282 if (!conn)
1283 return ERR_PTR(-ENOTCONN);
1284
1285 if (sdulen)
1286 hlen += 2;
1287
1288 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1289 hlen += 2;
1290
1291 count = min_t(unsigned int, (conn->mtu - hlen), len);
1292 skb = bt_skb_send_alloc(sk, count + hlen,
1293 msg->msg_flags & MSG_DONTWAIT, &err);
1294 if (!skb)
1295 return ERR_PTR(err);
1296
1297 /* Create L2CAP header */
1298 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1299 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1300 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1301 put_unaligned_le16(control, skb_put(skb, 2));
1302 if (sdulen)
1303 put_unaligned_le16(sdulen, skb_put(skb, 2));
1304
1305 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1306 if (unlikely(err < 0)) {
1307 kfree_skb(skb);
1308 return ERR_PTR(err);
1309 }
1310
1311 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1312 put_unaligned_le16(0, skb_put(skb, 2));
1313
1314 bt_cb(skb)->retries = 0;
1315 return skb;
1316 }
1317
l2cap_sar_segment_sdu(struct sock * sk,struct msghdr * msg,size_t len)1318 int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1319 {
1320 struct l2cap_pinfo *pi = l2cap_pi(sk);
1321 struct sk_buff *skb;
1322 struct sk_buff_head sar_queue;
1323 u16 control;
1324 size_t size = 0;
1325
1326 skb_queue_head_init(&sar_queue);
1327 control = L2CAP_SDU_START;
1328 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1329 if (IS_ERR(skb))
1330 return PTR_ERR(skb);
1331
1332 __skb_queue_tail(&sar_queue, skb);
1333 len -= pi->remote_mps;
1334 size += pi->remote_mps;
1335
1336 while (len > 0) {
1337 size_t buflen;
1338
1339 if (len > pi->remote_mps) {
1340 control = L2CAP_SDU_CONTINUE;
1341 buflen = pi->remote_mps;
1342 } else {
1343 control = L2CAP_SDU_END;
1344 buflen = len;
1345 }
1346
1347 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1348 if (IS_ERR(skb)) {
1349 skb_queue_purge(&sar_queue);
1350 return PTR_ERR(skb);
1351 }
1352
1353 __skb_queue_tail(&sar_queue, skb);
1354 len -= buflen;
1355 size += buflen;
1356 }
1357 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1358 if (sk->sk_send_head == NULL)
1359 sk->sk_send_head = sar_queue.next;
1360
1361 return size;
1362 }
1363
l2cap_chan_ready(struct sock * sk)1364 static void l2cap_chan_ready(struct sock *sk)
1365 {
1366 struct sock *parent = bt_sk(sk)->parent;
1367
1368 BT_DBG("sk %p, parent %p", sk, parent);
1369
1370 l2cap_pi(sk)->conf_state = 0;
1371 l2cap_sock_clear_timer(sk);
1372
1373 if (!parent) {
1374 /* Outgoing channel.
1375 * Wake up socket sleeping on connect.
1376 */
1377 sk->sk_state = BT_CONNECTED;
1378 sk->sk_state_change(sk);
1379 } else {
1380 /* Incoming channel.
1381 * Wake up socket sleeping on accept.
1382 */
1383 parent->sk_data_ready(parent, 0);
1384 }
1385 }
1386
1387 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)1388 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1389 {
1390 struct l2cap_chan_list *l = &conn->chan_list;
1391 struct sk_buff *nskb;
1392 struct sock *sk;
1393
1394 BT_DBG("conn %p", conn);
1395
1396 read_lock(&l->lock);
1397 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1398 if (sk->sk_type != SOCK_RAW)
1399 continue;
1400
1401 /* Don't send frame to the socket it came from */
1402 if (skb->sk == sk)
1403 continue;
1404 nskb = skb_clone(skb, GFP_ATOMIC);
1405 if (!nskb)
1406 continue;
1407
1408 if (sock_queue_rcv_skb(sk, nskb))
1409 kfree_skb(nskb);
1410 }
1411 read_unlock(&l->lock);
1412 }
1413
1414 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)1415 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1416 u8 code, u8 ident, u16 dlen, void *data)
1417 {
1418 struct sk_buff *skb, **frag;
1419 struct l2cap_cmd_hdr *cmd;
1420 struct l2cap_hdr *lh;
1421 int len, count;
1422
1423 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1424 conn, code, ident, dlen);
1425
1426 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1427 count = min_t(unsigned int, conn->mtu, len);
1428
1429 skb = bt_skb_alloc(count, GFP_ATOMIC);
1430 if (!skb)
1431 return NULL;
1432
1433 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1434 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1435
1436 if (conn->hcon->type == LE_LINK)
1437 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1438 else
1439 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1440
1441 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1442 cmd->code = code;
1443 cmd->ident = ident;
1444 cmd->len = cpu_to_le16(dlen);
1445
1446 if (dlen) {
1447 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1448 memcpy(skb_put(skb, count), data, count);
1449 data += count;
1450 }
1451
1452 len -= skb->len;
1453
1454 /* Continuation fragments (no L2CAP header) */
1455 frag = &skb_shinfo(skb)->frag_list;
1456 while (len) {
1457 count = min_t(unsigned int, conn->mtu, len);
1458
1459 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1460 if (!*frag)
1461 goto fail;
1462
1463 memcpy(skb_put(*frag, count), data, count);
1464
1465 len -= count;
1466 data += count;
1467
1468 frag = &(*frag)->next;
1469 }
1470
1471 return skb;
1472
1473 fail:
1474 kfree_skb(skb);
1475 return NULL;
1476 }
1477
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)1478 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1479 {
1480 struct l2cap_conf_opt *opt = *ptr;
1481 int len;
1482
1483 len = L2CAP_CONF_OPT_SIZE + opt->len;
1484 *ptr += len;
1485
1486 *type = opt->type;
1487 *olen = opt->len;
1488
1489 switch (opt->len) {
1490 case 1:
1491 *val = *((u8 *) opt->val);
1492 break;
1493
1494 case 2:
1495 *val = get_unaligned_le16(opt->val);
1496 break;
1497
1498 case 4:
1499 *val = get_unaligned_le32(opt->val);
1500 break;
1501
1502 default:
1503 *val = (unsigned long) opt->val;
1504 break;
1505 }
1506
1507 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1508 return len;
1509 }
1510
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val)1511 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1512 {
1513 struct l2cap_conf_opt *opt = *ptr;
1514
1515 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1516
1517 opt->type = type;
1518 opt->len = len;
1519
1520 switch (len) {
1521 case 1:
1522 *((u8 *) opt->val) = val;
1523 break;
1524
1525 case 2:
1526 put_unaligned_le16(val, opt->val);
1527 break;
1528
1529 case 4:
1530 put_unaligned_le32(val, opt->val);
1531 break;
1532
1533 default:
1534 memcpy(opt->val, (void *) val, len);
1535 break;
1536 }
1537
1538 *ptr += L2CAP_CONF_OPT_SIZE + len;
1539 }
1540
l2cap_ack_timeout(unsigned long arg)1541 static void l2cap_ack_timeout(unsigned long arg)
1542 {
1543 struct sock *sk = (void *) arg;
1544
1545 bh_lock_sock(sk);
1546 l2cap_send_ack(l2cap_pi(sk));
1547 bh_unlock_sock(sk);
1548 }
1549
l2cap_ertm_init(struct sock * sk)1550 static inline void l2cap_ertm_init(struct sock *sk)
1551 {
1552 l2cap_pi(sk)->expected_ack_seq = 0;
1553 l2cap_pi(sk)->unacked_frames = 0;
1554 l2cap_pi(sk)->buffer_seq = 0;
1555 l2cap_pi(sk)->num_acked = 0;
1556 l2cap_pi(sk)->frames_sent = 0;
1557
1558 setup_timer(&l2cap_pi(sk)->retrans_timer,
1559 l2cap_retrans_timeout, (unsigned long) sk);
1560 setup_timer(&l2cap_pi(sk)->monitor_timer,
1561 l2cap_monitor_timeout, (unsigned long) sk);
1562 setup_timer(&l2cap_pi(sk)->ack_timer,
1563 l2cap_ack_timeout, (unsigned long) sk);
1564
1565 __skb_queue_head_init(SREJ_QUEUE(sk));
1566 __skb_queue_head_init(BUSY_QUEUE(sk));
1567
1568 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
1569
1570 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1571 }
1572
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)1573 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1574 {
1575 switch (mode) {
1576 case L2CAP_MODE_STREAMING:
1577 case L2CAP_MODE_ERTM:
1578 if (l2cap_mode_supported(mode, remote_feat_mask))
1579 return mode;
1580 /* fall through */
1581 default:
1582 return L2CAP_MODE_BASIC;
1583 }
1584 }
1585
l2cap_build_conf_req(struct sock * sk,void * data)1586 int l2cap_build_conf_req(struct sock *sk, void *data)
1587 {
1588 struct l2cap_pinfo *pi = l2cap_pi(sk);
1589 struct l2cap_conf_req *req = data;
1590 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1591 void *ptr = req->data;
1592
1593 BT_DBG("sk %p", sk);
1594
1595 if (pi->num_conf_req || pi->num_conf_rsp)
1596 goto done;
1597
1598 switch (pi->mode) {
1599 case L2CAP_MODE_STREAMING:
1600 case L2CAP_MODE_ERTM:
1601 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1602 break;
1603
1604 /* fall through */
1605 default:
1606 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1607 break;
1608 }
1609
1610 done:
1611 if (pi->imtu != L2CAP_DEFAULT_MTU)
1612 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1613
1614 switch (pi->mode) {
1615 case L2CAP_MODE_BASIC:
1616 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1617 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1618 break;
1619
1620 rfc.mode = L2CAP_MODE_BASIC;
1621 rfc.txwin_size = 0;
1622 rfc.max_transmit = 0;
1623 rfc.retrans_timeout = 0;
1624 rfc.monitor_timeout = 0;
1625 rfc.max_pdu_size = 0;
1626
1627 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1628 (unsigned long) &rfc);
1629 break;
1630
1631 case L2CAP_MODE_ERTM:
1632 rfc.mode = L2CAP_MODE_ERTM;
1633 rfc.txwin_size = pi->tx_win;
1634 rfc.max_transmit = pi->max_tx;
1635 rfc.retrans_timeout = 0;
1636 rfc.monitor_timeout = 0;
1637 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1638 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1639 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1640
1641 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1642 (unsigned long) &rfc);
1643
1644 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1645 break;
1646
1647 if (pi->fcs == L2CAP_FCS_NONE ||
1648 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1649 pi->fcs = L2CAP_FCS_NONE;
1650 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1651 }
1652 break;
1653
1654 case L2CAP_MODE_STREAMING:
1655 rfc.mode = L2CAP_MODE_STREAMING;
1656 rfc.txwin_size = 0;
1657 rfc.max_transmit = 0;
1658 rfc.retrans_timeout = 0;
1659 rfc.monitor_timeout = 0;
1660 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1661 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1662 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1663
1664 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1665 (unsigned long) &rfc);
1666
1667 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1668 break;
1669
1670 if (pi->fcs == L2CAP_FCS_NONE ||
1671 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1672 pi->fcs = L2CAP_FCS_NONE;
1673 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1674 }
1675 break;
1676 }
1677
1678 req->dcid = cpu_to_le16(pi->dcid);
1679 req->flags = cpu_to_le16(0);
1680
1681 return ptr - data;
1682 }
1683
l2cap_parse_conf_req(struct sock * sk,void * data)1684 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1685 {
1686 struct l2cap_pinfo *pi = l2cap_pi(sk);
1687 struct l2cap_conf_rsp *rsp = data;
1688 void *ptr = rsp->data;
1689 void *req = pi->conf_req;
1690 int len = pi->conf_len;
1691 int type, hint, olen;
1692 unsigned long val;
1693 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1694 u16 mtu = L2CAP_DEFAULT_MTU;
1695 u16 result = L2CAP_CONF_SUCCESS;
1696
1697 BT_DBG("sk %p", sk);
1698
1699 while (len >= L2CAP_CONF_OPT_SIZE) {
1700 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1701
1702 hint = type & L2CAP_CONF_HINT;
1703 type &= L2CAP_CONF_MASK;
1704
1705 switch (type) {
1706 case L2CAP_CONF_MTU:
1707 mtu = val;
1708 break;
1709
1710 case L2CAP_CONF_FLUSH_TO:
1711 pi->flush_to = val;
1712 break;
1713
1714 case L2CAP_CONF_QOS:
1715 break;
1716
1717 case L2CAP_CONF_RFC:
1718 if (olen == sizeof(rfc))
1719 memcpy(&rfc, (void *) val, olen);
1720 break;
1721
1722 case L2CAP_CONF_FCS:
1723 if (val == L2CAP_FCS_NONE)
1724 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1725
1726 break;
1727
1728 default:
1729 if (hint)
1730 break;
1731
1732 result = L2CAP_CONF_UNKNOWN;
1733 *((u8 *) ptr++) = type;
1734 break;
1735 }
1736 }
1737
1738 if (pi->num_conf_rsp || pi->num_conf_req > 1)
1739 goto done;
1740
1741 switch (pi->mode) {
1742 case L2CAP_MODE_STREAMING:
1743 case L2CAP_MODE_ERTM:
1744 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1745 pi->mode = l2cap_select_mode(rfc.mode,
1746 pi->conn->feat_mask);
1747 break;
1748 }
1749
1750 if (pi->mode != rfc.mode)
1751 return -ECONNREFUSED;
1752
1753 break;
1754 }
1755
1756 done:
1757 if (pi->mode != rfc.mode) {
1758 result = L2CAP_CONF_UNACCEPT;
1759 rfc.mode = pi->mode;
1760
1761 if (pi->num_conf_rsp == 1)
1762 return -ECONNREFUSED;
1763
1764 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1765 sizeof(rfc), (unsigned long) &rfc);
1766 }
1767
1768
1769 if (result == L2CAP_CONF_SUCCESS) {
1770 /* Configure output options and let the other side know
1771 * which ones we don't like. */
1772
1773 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1774 result = L2CAP_CONF_UNACCEPT;
1775 else {
1776 pi->omtu = mtu;
1777 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1778 }
1779 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1780
1781 switch (rfc.mode) {
1782 case L2CAP_MODE_BASIC:
1783 pi->fcs = L2CAP_FCS_NONE;
1784 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1785 break;
1786
1787 case L2CAP_MODE_ERTM:
1788 pi->remote_tx_win = rfc.txwin_size;
1789 pi->remote_max_tx = rfc.max_transmit;
1790
1791 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1792 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1793
1794 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1795
1796 rfc.retrans_timeout =
1797 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1798 rfc.monitor_timeout =
1799 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1800
1801 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1802
1803 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1804 sizeof(rfc), (unsigned long) &rfc);
1805
1806 break;
1807
1808 case L2CAP_MODE_STREAMING:
1809 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1810 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1811
1812 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1813
1814 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1815
1816 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1817 sizeof(rfc), (unsigned long) &rfc);
1818
1819 break;
1820
1821 default:
1822 result = L2CAP_CONF_UNACCEPT;
1823
1824 memset(&rfc, 0, sizeof(rfc));
1825 rfc.mode = pi->mode;
1826 }
1827
1828 if (result == L2CAP_CONF_SUCCESS)
1829 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1830 }
1831 rsp->scid = cpu_to_le16(pi->dcid);
1832 rsp->result = cpu_to_le16(result);
1833 rsp->flags = cpu_to_le16(0x0000);
1834
1835 return ptr - data;
1836 }
1837
l2cap_parse_conf_rsp(struct sock * sk,void * rsp,int len,void * data,u16 * result)1838 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1839 {
1840 struct l2cap_pinfo *pi = l2cap_pi(sk);
1841 struct l2cap_conf_req *req = data;
1842 void *ptr = req->data;
1843 int type, olen;
1844 unsigned long val;
1845 struct l2cap_conf_rfc rfc;
1846
1847 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1848
1849 while (len >= L2CAP_CONF_OPT_SIZE) {
1850 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1851
1852 switch (type) {
1853 case L2CAP_CONF_MTU:
1854 if (val < L2CAP_DEFAULT_MIN_MTU) {
1855 *result = L2CAP_CONF_UNACCEPT;
1856 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1857 } else
1858 pi->imtu = val;
1859 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1860 break;
1861
1862 case L2CAP_CONF_FLUSH_TO:
1863 pi->flush_to = val;
1864 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1865 2, pi->flush_to);
1866 break;
1867
1868 case L2CAP_CONF_RFC:
1869 if (olen == sizeof(rfc))
1870 memcpy(&rfc, (void *)val, olen);
1871
1872 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1873 rfc.mode != pi->mode)
1874 return -ECONNREFUSED;
1875
1876 pi->fcs = 0;
1877
1878 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1879 sizeof(rfc), (unsigned long) &rfc);
1880 break;
1881 }
1882 }
1883
1884 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1885 return -ECONNREFUSED;
1886
1887 pi->mode = rfc.mode;
1888
1889 if (*result == L2CAP_CONF_SUCCESS) {
1890 switch (rfc.mode) {
1891 case L2CAP_MODE_ERTM:
1892 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1893 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1894 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1895 break;
1896 case L2CAP_MODE_STREAMING:
1897 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1898 }
1899 }
1900
1901 req->dcid = cpu_to_le16(pi->dcid);
1902 req->flags = cpu_to_le16(0x0000);
1903
1904 return ptr - data;
1905 }
1906
l2cap_build_conf_rsp(struct sock * sk,void * data,u16 result,u16 flags)1907 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1908 {
1909 struct l2cap_conf_rsp *rsp = data;
1910 void *ptr = rsp->data;
1911
1912 BT_DBG("sk %p", sk);
1913
1914 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1915 rsp->result = cpu_to_le16(result);
1916 rsp->flags = cpu_to_le16(flags);
1917
1918 return ptr - data;
1919 }
1920
l2cap_conf_rfc_get(struct sock * sk,void * rsp,int len)1921 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1922 {
1923 struct l2cap_pinfo *pi = l2cap_pi(sk);
1924 int type, olen;
1925 unsigned long val;
1926 struct l2cap_conf_rfc rfc;
1927
1928 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1929
1930 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1931 return;
1932
1933 while (len >= L2CAP_CONF_OPT_SIZE) {
1934 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1935
1936 switch (type) {
1937 case L2CAP_CONF_RFC:
1938 if (olen == sizeof(rfc))
1939 memcpy(&rfc, (void *)val, olen);
1940 goto done;
1941 }
1942 }
1943
1944 done:
1945 switch (rfc.mode) {
1946 case L2CAP_MODE_ERTM:
1947 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1948 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1949 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1950 break;
1951 case L2CAP_MODE_STREAMING:
1952 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1953 }
1954 }
1955
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data)1956 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1957 {
1958 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1959
1960 if (rej->reason != 0x0000)
1961 return 0;
1962
1963 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1964 cmd->ident == conn->info_ident) {
1965 del_timer(&conn->info_timer);
1966
1967 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1968 conn->info_ident = 0;
1969
1970 l2cap_conn_start(conn);
1971 }
1972
1973 return 0;
1974 }
1975
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data)1976 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1977 {
1978 struct l2cap_chan_list *list = &conn->chan_list;
1979 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1980 struct l2cap_conn_rsp rsp;
1981 struct sock *parent, *sk = NULL;
1982 int result, status = L2CAP_CS_NO_INFO;
1983
1984 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1985 __le16 psm = req->psm;
1986
1987 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1988
1989 /* Check if we have socket listening on psm */
1990 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1991 if (!parent) {
1992 result = L2CAP_CR_BAD_PSM;
1993 goto sendresp;
1994 }
1995
1996 bh_lock_sock(parent);
1997
1998 /* Check if the ACL is secure enough (if not SDP) */
1999 if (psm != cpu_to_le16(0x0001) &&
2000 !hci_conn_check_link_mode(conn->hcon)) {
2001 conn->disc_reason = 0x05;
2002 result = L2CAP_CR_SEC_BLOCK;
2003 goto response;
2004 }
2005
2006 result = L2CAP_CR_NO_MEM;
2007
2008 /* Check for backlog size */
2009 if (sk_acceptq_is_full(parent)) {
2010 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2011 goto response;
2012 }
2013
2014 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2015 if (!sk)
2016 goto response;
2017
2018 write_lock_bh(&list->lock);
2019
2020 /* Check if we already have channel with that dcid */
2021 if (__l2cap_get_chan_by_dcid(list, scid)) {
2022 write_unlock_bh(&list->lock);
2023 sock_set_flag(sk, SOCK_ZAPPED);
2024 l2cap_sock_kill(sk);
2025 goto response;
2026 }
2027
2028 hci_conn_hold(conn->hcon);
2029
2030 l2cap_sock_init(sk, parent);
2031 bacpy(&bt_sk(sk)->src, conn->src);
2032 bacpy(&bt_sk(sk)->dst, conn->dst);
2033 l2cap_pi(sk)->psm = psm;
2034 l2cap_pi(sk)->dcid = scid;
2035
2036 __l2cap_chan_add(conn, sk, parent);
2037 dcid = l2cap_pi(sk)->scid;
2038
2039 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2040
2041 l2cap_pi(sk)->ident = cmd->ident;
2042
2043 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2044 if (l2cap_check_security(sk)) {
2045 if (bt_sk(sk)->defer_setup) {
2046 sk->sk_state = BT_CONNECT2;
2047 result = L2CAP_CR_PEND;
2048 status = L2CAP_CS_AUTHOR_PEND;
2049 parent->sk_data_ready(parent, 0);
2050 } else {
2051 sk->sk_state = BT_CONFIG;
2052 result = L2CAP_CR_SUCCESS;
2053 status = L2CAP_CS_NO_INFO;
2054 }
2055 } else {
2056 sk->sk_state = BT_CONNECT2;
2057 result = L2CAP_CR_PEND;
2058 status = L2CAP_CS_AUTHEN_PEND;
2059 }
2060 } else {
2061 sk->sk_state = BT_CONNECT2;
2062 result = L2CAP_CR_PEND;
2063 status = L2CAP_CS_NO_INFO;
2064 }
2065
2066 write_unlock_bh(&list->lock);
2067
2068 response:
2069 bh_unlock_sock(parent);
2070
2071 sendresp:
2072 rsp.scid = cpu_to_le16(scid);
2073 rsp.dcid = cpu_to_le16(dcid);
2074 rsp.result = cpu_to_le16(result);
2075 rsp.status = cpu_to_le16(status);
2076 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2077
2078 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2079 struct l2cap_info_req info;
2080 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2081
2082 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2083 conn->info_ident = l2cap_get_ident(conn);
2084
2085 mod_timer(&conn->info_timer, jiffies +
2086 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2087
2088 l2cap_send_cmd(conn, conn->info_ident,
2089 L2CAP_INFO_REQ, sizeof(info), &info);
2090 }
2091
2092 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2093 result == L2CAP_CR_SUCCESS) {
2094 u8 buf[128];
2095 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2096 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2097 l2cap_build_conf_req(sk, buf), buf);
2098 l2cap_pi(sk)->num_conf_req++;
2099 }
2100
2101 return 0;
2102 }
2103
l2cap_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data)2104 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2105 {
2106 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2107 u16 scid, dcid, result, status;
2108 struct sock *sk;
2109 u8 req[128];
2110
2111 scid = __le16_to_cpu(rsp->scid);
2112 dcid = __le16_to_cpu(rsp->dcid);
2113 result = __le16_to_cpu(rsp->result);
2114 status = __le16_to_cpu(rsp->status);
2115
2116 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2117
2118 if (scid) {
2119 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2120 if (!sk)
2121 return -EFAULT;
2122 } else {
2123 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2124 if (!sk)
2125 return -EFAULT;
2126 }
2127
2128 switch (result) {
2129 case L2CAP_CR_SUCCESS:
2130 sk->sk_state = BT_CONFIG;
2131 l2cap_pi(sk)->ident = 0;
2132 l2cap_pi(sk)->dcid = dcid;
2133 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2134
2135 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2136 break;
2137
2138 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2139
2140 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2141 l2cap_build_conf_req(sk, req), req);
2142 l2cap_pi(sk)->num_conf_req++;
2143 break;
2144
2145 case L2CAP_CR_PEND:
2146 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2147 break;
2148
2149 default:
2150 /* don't delete l2cap channel if sk is owned by user */
2151 if (sock_owned_by_user(sk)) {
2152 sk->sk_state = BT_DISCONN;
2153 l2cap_sock_clear_timer(sk);
2154 l2cap_sock_set_timer(sk, HZ / 5);
2155 break;
2156 }
2157
2158 l2cap_chan_del(sk, ECONNREFUSED);
2159 break;
2160 }
2161
2162 bh_unlock_sock(sk);
2163 return 0;
2164 }
2165
set_default_fcs(struct l2cap_pinfo * pi)2166 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2167 {
2168 /* FCS is enabled only in ERTM or streaming mode, if one or both
2169 * sides request it.
2170 */
2171 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2172 pi->fcs = L2CAP_FCS_NONE;
2173 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2174 pi->fcs = L2CAP_FCS_CRC16;
2175 }
2176
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)2177 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2178 {
2179 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2180 u16 dcid, flags;
2181 u8 rsp[64];
2182 struct sock *sk;
2183 int len;
2184
2185 dcid = __le16_to_cpu(req->dcid);
2186 flags = __le16_to_cpu(req->flags);
2187
2188 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2189
2190 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2191 if (!sk)
2192 return -ENOENT;
2193
2194 if (sk->sk_state != BT_CONFIG) {
2195 struct l2cap_cmd_rej rej;
2196
2197 rej.reason = cpu_to_le16(0x0002);
2198 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2199 sizeof(rej), &rej);
2200 goto unlock;
2201 }
2202
2203 /* Reject if config buffer is too small. */
2204 len = cmd_len - sizeof(*req);
2205 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2206 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2207 l2cap_build_conf_rsp(sk, rsp,
2208 L2CAP_CONF_REJECT, flags), rsp);
2209 goto unlock;
2210 }
2211
2212 /* Store config. */
2213 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2214 l2cap_pi(sk)->conf_len += len;
2215
2216 if (flags & 0x0001) {
2217 /* Incomplete config. Send empty response. */
2218 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2219 l2cap_build_conf_rsp(sk, rsp,
2220 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2221 goto unlock;
2222 }
2223
2224 /* Complete config. */
2225 len = l2cap_parse_conf_req(sk, rsp);
2226 if (len < 0) {
2227 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2228 goto unlock;
2229 }
2230
2231 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2232 l2cap_pi(sk)->num_conf_rsp++;
2233
2234 /* Reset config buffer. */
2235 l2cap_pi(sk)->conf_len = 0;
2236
2237 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2238 goto unlock;
2239
2240 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2241 set_default_fcs(l2cap_pi(sk));
2242
2243 sk->sk_state = BT_CONNECTED;
2244
2245 l2cap_pi(sk)->next_tx_seq = 0;
2246 l2cap_pi(sk)->expected_tx_seq = 0;
2247 __skb_queue_head_init(TX_QUEUE(sk));
2248 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2249 l2cap_ertm_init(sk);
2250
2251 l2cap_chan_ready(sk);
2252 goto unlock;
2253 }
2254
2255 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2256 u8 buf[64];
2257 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2258 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2259 l2cap_build_conf_req(sk, buf), buf);
2260 l2cap_pi(sk)->num_conf_req++;
2261 }
2262
2263 unlock:
2264 bh_unlock_sock(sk);
2265 return 0;
2266 }
2267
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data)2268 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2269 {
2270 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2271 u16 scid, flags, result;
2272 struct sock *sk;
2273 int len = cmd->len - sizeof(*rsp);
2274
2275 scid = __le16_to_cpu(rsp->scid);
2276 flags = __le16_to_cpu(rsp->flags);
2277 result = __le16_to_cpu(rsp->result);
2278
2279 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2280 scid, flags, result);
2281
2282 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2283 if (!sk)
2284 return 0;
2285
2286 switch (result) {
2287 case L2CAP_CONF_SUCCESS:
2288 l2cap_conf_rfc_get(sk, rsp->data, len);
2289 break;
2290
2291 case L2CAP_CONF_UNACCEPT:
2292 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2293 char req[64];
2294
2295 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2296 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2297 goto done;
2298 }
2299
2300 /* throw out any old stored conf requests */
2301 result = L2CAP_CONF_SUCCESS;
2302 len = l2cap_parse_conf_rsp(sk, rsp->data,
2303 len, req, &result);
2304 if (len < 0) {
2305 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2306 goto done;
2307 }
2308
2309 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2310 L2CAP_CONF_REQ, len, req);
2311 l2cap_pi(sk)->num_conf_req++;
2312 if (result != L2CAP_CONF_SUCCESS)
2313 goto done;
2314 break;
2315 }
2316
2317 default:
2318 sk->sk_err = ECONNRESET;
2319 l2cap_sock_set_timer(sk, HZ * 5);
2320 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2321 goto done;
2322 }
2323
2324 if (flags & 0x01)
2325 goto done;
2326
2327 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2328
2329 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2330 set_default_fcs(l2cap_pi(sk));
2331
2332 sk->sk_state = BT_CONNECTED;
2333 l2cap_pi(sk)->next_tx_seq = 0;
2334 l2cap_pi(sk)->expected_tx_seq = 0;
2335 __skb_queue_head_init(TX_QUEUE(sk));
2336 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2337 l2cap_ertm_init(sk);
2338
2339 l2cap_chan_ready(sk);
2340 }
2341
2342 done:
2343 bh_unlock_sock(sk);
2344 return 0;
2345 }
2346
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data)2347 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2348 {
2349 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2350 struct l2cap_disconn_rsp rsp;
2351 u16 dcid, scid;
2352 struct sock *sk;
2353
2354 scid = __le16_to_cpu(req->scid);
2355 dcid = __le16_to_cpu(req->dcid);
2356
2357 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2358
2359 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2360 if (!sk)
2361 return 0;
2362
2363 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2364 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2365 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2366
2367 sk->sk_shutdown = SHUTDOWN_MASK;
2368
2369 /* don't delete l2cap channel if sk is owned by user */
2370 if (sock_owned_by_user(sk)) {
2371 sk->sk_state = BT_DISCONN;
2372 l2cap_sock_clear_timer(sk);
2373 l2cap_sock_set_timer(sk, HZ / 5);
2374 bh_unlock_sock(sk);
2375 return 0;
2376 }
2377
2378 l2cap_chan_del(sk, ECONNRESET);
2379 bh_unlock_sock(sk);
2380
2381 l2cap_sock_kill(sk);
2382 return 0;
2383 }
2384
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data)2385 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2386 {
2387 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2388 u16 dcid, scid;
2389 struct sock *sk;
2390
2391 scid = __le16_to_cpu(rsp->scid);
2392 dcid = __le16_to_cpu(rsp->dcid);
2393
2394 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2395
2396 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2397 if (!sk)
2398 return 0;
2399
2400 /* don't delete l2cap channel if sk is owned by user */
2401 if (sock_owned_by_user(sk)) {
2402 sk->sk_state = BT_DISCONN;
2403 l2cap_sock_clear_timer(sk);
2404 l2cap_sock_set_timer(sk, HZ / 5);
2405 bh_unlock_sock(sk);
2406 return 0;
2407 }
2408
2409 l2cap_chan_del(sk, 0);
2410 bh_unlock_sock(sk);
2411
2412 l2cap_sock_kill(sk);
2413 return 0;
2414 }
2415
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data)2416 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2417 {
2418 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2419 u16 type;
2420
2421 type = __le16_to_cpu(req->type);
2422
2423 BT_DBG("type 0x%4.4x", type);
2424
2425 if (type == L2CAP_IT_FEAT_MASK) {
2426 u8 buf[8];
2427 u32 feat_mask = l2cap_feat_mask;
2428 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2429 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2430 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2431 if (!disable_ertm)
2432 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2433 | L2CAP_FEAT_FCS;
2434 put_unaligned_le32(feat_mask, rsp->data);
2435 l2cap_send_cmd(conn, cmd->ident,
2436 L2CAP_INFO_RSP, sizeof(buf), buf);
2437 } else if (type == L2CAP_IT_FIXED_CHAN) {
2438 u8 buf[12];
2439 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2440 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2441 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2442 memcpy(buf + 4, l2cap_fixed_chan, 8);
2443 l2cap_send_cmd(conn, cmd->ident,
2444 L2CAP_INFO_RSP, sizeof(buf), buf);
2445 } else {
2446 struct l2cap_info_rsp rsp;
2447 rsp.type = cpu_to_le16(type);
2448 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2449 l2cap_send_cmd(conn, cmd->ident,
2450 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2451 }
2452
2453 return 0;
2454 }
2455
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data)2456 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2457 {
2458 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2459 u16 type, result;
2460
2461 type = __le16_to_cpu(rsp->type);
2462 result = __le16_to_cpu(rsp->result);
2463
2464 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2465
2466 del_timer(&conn->info_timer);
2467
2468 if (result != L2CAP_IR_SUCCESS) {
2469 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2470 conn->info_ident = 0;
2471
2472 l2cap_conn_start(conn);
2473
2474 return 0;
2475 }
2476
2477 if (type == L2CAP_IT_FEAT_MASK) {
2478 conn->feat_mask = get_unaligned_le32(rsp->data);
2479
2480 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2481 struct l2cap_info_req req;
2482 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2483
2484 conn->info_ident = l2cap_get_ident(conn);
2485
2486 l2cap_send_cmd(conn, conn->info_ident,
2487 L2CAP_INFO_REQ, sizeof(req), &req);
2488 } else {
2489 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2490 conn->info_ident = 0;
2491
2492 l2cap_conn_start(conn);
2493 }
2494 } else if (type == L2CAP_IT_FIXED_CHAN) {
2495 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2496 conn->info_ident = 0;
2497
2498 l2cap_conn_start(conn);
2499 }
2500
2501 return 0;
2502 }
2503
l2cap_check_conn_param(u16 min,u16 max,u16 latency,u16 to_multiplier)2504 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2505 u16 to_multiplier)
2506 {
2507 u16 max_latency;
2508
2509 if (min > max || min < 6 || max > 3200)
2510 return -EINVAL;
2511
2512 if (to_multiplier < 10 || to_multiplier > 3200)
2513 return -EINVAL;
2514
2515 if (max >= to_multiplier * 8)
2516 return -EINVAL;
2517
2518 max_latency = (to_multiplier * 8 / max) - 1;
2519 if (latency > 499 || latency > max_latency)
2520 return -EINVAL;
2521
2522 return 0;
2523 }
2524
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data)2525 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2526 struct l2cap_cmd_hdr *cmd, u8 *data)
2527 {
2528 struct hci_conn *hcon = conn->hcon;
2529 struct l2cap_conn_param_update_req *req;
2530 struct l2cap_conn_param_update_rsp rsp;
2531 u16 min, max, latency, to_multiplier, cmd_len;
2532 int err;
2533
2534 if (!(hcon->link_mode & HCI_LM_MASTER))
2535 return -EINVAL;
2536
2537 cmd_len = __le16_to_cpu(cmd->len);
2538 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2539 return -EPROTO;
2540
2541 req = (struct l2cap_conn_param_update_req *) data;
2542 min = __le16_to_cpu(req->min);
2543 max = __le16_to_cpu(req->max);
2544 latency = __le16_to_cpu(req->latency);
2545 to_multiplier = __le16_to_cpu(req->to_multiplier);
2546
2547 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2548 min, max, latency, to_multiplier);
2549
2550 memset(&rsp, 0, sizeof(rsp));
2551
2552 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2553 if (err)
2554 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2555 else
2556 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2557
2558 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2559 sizeof(rsp), &rsp);
2560
2561 if (!err)
2562 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2563
2564 return 0;
2565 }
2566
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)2567 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2568 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2569 {
2570 int err = 0;
2571
2572 switch (cmd->code) {
2573 case L2CAP_COMMAND_REJ:
2574 l2cap_command_rej(conn, cmd, data);
2575 break;
2576
2577 case L2CAP_CONN_REQ:
2578 err = l2cap_connect_req(conn, cmd, data);
2579 break;
2580
2581 case L2CAP_CONN_RSP:
2582 err = l2cap_connect_rsp(conn, cmd, data);
2583 break;
2584
2585 case L2CAP_CONF_REQ:
2586 err = l2cap_config_req(conn, cmd, cmd_len, data);
2587 break;
2588
2589 case L2CAP_CONF_RSP:
2590 err = l2cap_config_rsp(conn, cmd, data);
2591 break;
2592
2593 case L2CAP_DISCONN_REQ:
2594 err = l2cap_disconnect_req(conn, cmd, data);
2595 break;
2596
2597 case L2CAP_DISCONN_RSP:
2598 err = l2cap_disconnect_rsp(conn, cmd, data);
2599 break;
2600
2601 case L2CAP_ECHO_REQ:
2602 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2603 break;
2604
2605 case L2CAP_ECHO_RSP:
2606 break;
2607
2608 case L2CAP_INFO_REQ:
2609 err = l2cap_information_req(conn, cmd, data);
2610 break;
2611
2612 case L2CAP_INFO_RSP:
2613 err = l2cap_information_rsp(conn, cmd, data);
2614 break;
2615
2616 default:
2617 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2618 err = -EINVAL;
2619 break;
2620 }
2621
2622 return err;
2623 }
2624
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data)2625 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2626 struct l2cap_cmd_hdr *cmd, u8 *data)
2627 {
2628 switch (cmd->code) {
2629 case L2CAP_COMMAND_REJ:
2630 return 0;
2631
2632 case L2CAP_CONN_PARAM_UPDATE_REQ:
2633 return l2cap_conn_param_update_req(conn, cmd, data);
2634
2635 case L2CAP_CONN_PARAM_UPDATE_RSP:
2636 return 0;
2637
2638 default:
2639 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2640 return -EINVAL;
2641 }
2642 }
2643
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)2644 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2645 struct sk_buff *skb)
2646 {
2647 u8 *data = skb->data;
2648 int len = skb->len;
2649 struct l2cap_cmd_hdr cmd;
2650 int err;
2651
2652 l2cap_raw_recv(conn, skb);
2653
2654 while (len >= L2CAP_CMD_HDR_SIZE) {
2655 u16 cmd_len;
2656 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2657 data += L2CAP_CMD_HDR_SIZE;
2658 len -= L2CAP_CMD_HDR_SIZE;
2659
2660 cmd_len = le16_to_cpu(cmd.len);
2661
2662 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2663
2664 if (cmd_len > len || !cmd.ident) {
2665 BT_DBG("corrupted command");
2666 break;
2667 }
2668
2669 if (conn->hcon->type == LE_LINK)
2670 err = l2cap_le_sig_cmd(conn, &cmd, data);
2671 else
2672 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2673
2674 if (err) {
2675 struct l2cap_cmd_rej rej;
2676 BT_DBG("error %d", err);
2677
2678 /* FIXME: Map err to a valid reason */
2679 rej.reason = cpu_to_le16(0);
2680 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2681 }
2682
2683 data += cmd_len;
2684 len -= cmd_len;
2685 }
2686
2687 kfree_skb(skb);
2688 }
2689
l2cap_check_fcs(struct l2cap_pinfo * pi,struct sk_buff * skb)2690 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2691 {
2692 u16 our_fcs, rcv_fcs;
2693 int hdr_size = L2CAP_HDR_SIZE + 2;
2694
2695 if (pi->fcs == L2CAP_FCS_CRC16) {
2696 skb_trim(skb, skb->len - 2);
2697 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2698 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2699
2700 if (our_fcs != rcv_fcs)
2701 return -EBADMSG;
2702 }
2703 return 0;
2704 }
2705
l2cap_send_i_or_rr_or_rnr(struct sock * sk)2706 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
2707 {
2708 struct l2cap_pinfo *pi = l2cap_pi(sk);
2709 u16 control = 0;
2710
2711 pi->frames_sent = 0;
2712
2713 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2714
2715 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2716 control |= L2CAP_SUPER_RCV_NOT_READY;
2717 l2cap_send_sframe(pi, control);
2718 pi->conn_state |= L2CAP_CONN_RNR_SENT;
2719 }
2720
2721 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
2722 l2cap_retransmit_frames(sk);
2723
2724 l2cap_ertm_send(sk);
2725
2726 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2727 pi->frames_sent == 0) {
2728 control |= L2CAP_SUPER_RCV_READY;
2729 l2cap_send_sframe(pi, control);
2730 }
2731 }
2732
l2cap_add_to_srej_queue(struct sock * sk,struct sk_buff * skb,u8 tx_seq,u8 sar)2733 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
2734 {
2735 struct sk_buff *next_skb;
2736 struct l2cap_pinfo *pi = l2cap_pi(sk);
2737 int tx_seq_offset, next_tx_seq_offset;
2738
2739 bt_cb(skb)->tx_seq = tx_seq;
2740 bt_cb(skb)->sar = sar;
2741
2742 next_skb = skb_peek(SREJ_QUEUE(sk));
2743 if (!next_skb) {
2744 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2745 return 0;
2746 }
2747
2748 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
2749 if (tx_seq_offset < 0)
2750 tx_seq_offset += 64;
2751
2752 do {
2753 if (bt_cb(next_skb)->tx_seq == tx_seq)
2754 return -EINVAL;
2755
2756 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2757 pi->buffer_seq) % 64;
2758 if (next_tx_seq_offset < 0)
2759 next_tx_seq_offset += 64;
2760
2761 if (next_tx_seq_offset > tx_seq_offset) {
2762 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
2763 return 0;
2764 }
2765
2766 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
2767 break;
2768
2769 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
2770
2771 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2772
2773 return 0;
2774 }
2775
l2cap_ertm_reassembly_sdu(struct sock * sk,struct sk_buff * skb,u16 control)2776 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2777 {
2778 struct l2cap_pinfo *pi = l2cap_pi(sk);
2779 struct sk_buff *_skb;
2780 int err;
2781
2782 switch (control & L2CAP_CTRL_SAR) {
2783 case L2CAP_SDU_UNSEGMENTED:
2784 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2785 goto drop;
2786
2787 err = sock_queue_rcv_skb(sk, skb);
2788 if (!err)
2789 return err;
2790
2791 break;
2792
2793 case L2CAP_SDU_START:
2794 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2795 goto drop;
2796
2797 pi->sdu_len = get_unaligned_le16(skb->data);
2798
2799 if (pi->sdu_len > pi->imtu)
2800 goto disconnect;
2801
2802 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2803 if (!pi->sdu)
2804 return -ENOMEM;
2805
2806 /* pull sdu_len bytes only after alloc, because of Local Busy
2807 * condition we have to be sure that this will be executed
2808 * only once, i.e., when alloc does not fail */
2809 skb_pull(skb, 2);
2810
2811 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2812
2813 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2814 pi->partial_sdu_len = skb->len;
2815 break;
2816
2817 case L2CAP_SDU_CONTINUE:
2818 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2819 goto disconnect;
2820
2821 if (!pi->sdu)
2822 goto disconnect;
2823
2824 pi->partial_sdu_len += skb->len;
2825 if (pi->partial_sdu_len > pi->sdu_len)
2826 goto drop;
2827
2828 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2829
2830 break;
2831
2832 case L2CAP_SDU_END:
2833 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2834 goto disconnect;
2835
2836 if (!pi->sdu)
2837 goto disconnect;
2838
2839 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
2840 pi->partial_sdu_len += skb->len;
2841
2842 if (pi->partial_sdu_len > pi->imtu)
2843 goto drop;
2844
2845 if (pi->partial_sdu_len != pi->sdu_len)
2846 goto drop;
2847
2848 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2849 }
2850
2851 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2852 if (!_skb) {
2853 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2854 return -ENOMEM;
2855 }
2856
2857 err = sock_queue_rcv_skb(sk, _skb);
2858 if (err < 0) {
2859 kfree_skb(_skb);
2860 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2861 return err;
2862 }
2863
2864 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2865 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2866
2867 kfree_skb(pi->sdu);
2868 break;
2869 }
2870
2871 kfree_skb(skb);
2872 return 0;
2873
2874 drop:
2875 kfree_skb(pi->sdu);
2876 pi->sdu = NULL;
2877
2878 disconnect:
2879 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2880 kfree_skb(skb);
2881 return 0;
2882 }
2883
l2cap_try_push_rx_skb(struct sock * sk)2884 static int l2cap_try_push_rx_skb(struct sock *sk)
2885 {
2886 struct l2cap_pinfo *pi = l2cap_pi(sk);
2887 struct sk_buff *skb;
2888 u16 control;
2889 int err;
2890
2891 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
2892 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2893 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2894 if (err < 0) {
2895 skb_queue_head(BUSY_QUEUE(sk), skb);
2896 return -EBUSY;
2897 }
2898
2899 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2900 }
2901
2902 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
2903 goto done;
2904
2905 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2906 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2907 l2cap_send_sframe(pi, control);
2908 l2cap_pi(sk)->retry_count = 1;
2909
2910 del_timer(&pi->retrans_timer);
2911 __mod_monitor_timer();
2912
2913 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
2914
2915 done:
2916 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2917 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
2918
2919 BT_DBG("sk %p, Exit local busy", sk);
2920
2921 return 0;
2922 }
2923
l2cap_busy_work(struct work_struct * work)2924 static void l2cap_busy_work(struct work_struct *work)
2925 {
2926 DECLARE_WAITQUEUE(wait, current);
2927 struct l2cap_pinfo *pi =
2928 container_of(work, struct l2cap_pinfo, busy_work);
2929 struct sock *sk = (struct sock *)pi;
2930 int n_tries = 0, timeo = HZ/5, err;
2931 struct sk_buff *skb;
2932
2933 lock_sock(sk);
2934
2935 add_wait_queue(sk_sleep(sk), &wait);
2936 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
2937 set_current_state(TASK_INTERRUPTIBLE);
2938
2939 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
2940 err = -EBUSY;
2941 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
2942 break;
2943 }
2944
2945 if (!timeo)
2946 timeo = HZ/5;
2947
2948 if (signal_pending(current)) {
2949 err = sock_intr_errno(timeo);
2950 break;
2951 }
2952
2953 release_sock(sk);
2954 timeo = schedule_timeout(timeo);
2955 lock_sock(sk);
2956
2957 err = sock_error(sk);
2958 if (err)
2959 break;
2960
2961 if (l2cap_try_push_rx_skb(sk) == 0)
2962 break;
2963 }
2964
2965 set_current_state(TASK_RUNNING);
2966 remove_wait_queue(sk_sleep(sk), &wait);
2967
2968 release_sock(sk);
2969 }
2970
l2cap_push_rx_skb(struct sock * sk,struct sk_buff * skb,u16 control)2971 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
2972 {
2973 struct l2cap_pinfo *pi = l2cap_pi(sk);
2974 int sctrl, err;
2975
2976 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2977 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2978 __skb_queue_tail(BUSY_QUEUE(sk), skb);
2979 return l2cap_try_push_rx_skb(sk);
2980
2981
2982 }
2983
2984 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2985 if (err >= 0) {
2986 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2987 return err;
2988 }
2989
2990 /* Busy Condition */
2991 BT_DBG("sk %p, Enter local busy", sk);
2992
2993 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2994 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2995 __skb_queue_tail(BUSY_QUEUE(sk), skb);
2996
2997 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2998 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
2999 l2cap_send_sframe(pi, sctrl);
3000
3001 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3002
3003 del_timer(&pi->ack_timer);
3004
3005 queue_work(_busy_wq, &pi->busy_work);
3006
3007 return err;
3008 }
3009
l2cap_streaming_reassembly_sdu(struct sock * sk,struct sk_buff * skb,u16 control)3010 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3011 {
3012 struct l2cap_pinfo *pi = l2cap_pi(sk);
3013 struct sk_buff *_skb;
3014 int err = -EINVAL;
3015
3016 /*
3017 * TODO: We have to notify the userland if some data is lost with the
3018 * Streaming Mode.
3019 */
3020
3021 switch (control & L2CAP_CTRL_SAR) {
3022 case L2CAP_SDU_UNSEGMENTED:
3023 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3024 kfree_skb(pi->sdu);
3025 break;
3026 }
3027
3028 err = sock_queue_rcv_skb(sk, skb);
3029 if (!err)
3030 return 0;
3031
3032 break;
3033
3034 case L2CAP_SDU_START:
3035 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3036 kfree_skb(pi->sdu);
3037 break;
3038 }
3039
3040 pi->sdu_len = get_unaligned_le16(skb->data);
3041 skb_pull(skb, 2);
3042
3043 if (pi->sdu_len > pi->imtu) {
3044 err = -EMSGSIZE;
3045 break;
3046 }
3047
3048 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3049 if (!pi->sdu) {
3050 err = -ENOMEM;
3051 break;
3052 }
3053
3054 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3055
3056 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3057 pi->partial_sdu_len = skb->len;
3058 err = 0;
3059 break;
3060
3061 case L2CAP_SDU_CONTINUE:
3062 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3063 break;
3064
3065 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3066
3067 pi->partial_sdu_len += skb->len;
3068 if (pi->partial_sdu_len > pi->sdu_len)
3069 kfree_skb(pi->sdu);
3070 else
3071 err = 0;
3072
3073 break;
3074
3075 case L2CAP_SDU_END:
3076 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3077 break;
3078
3079 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3080
3081 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3082 pi->partial_sdu_len += skb->len;
3083
3084 if (pi->partial_sdu_len > pi->imtu)
3085 goto drop;
3086
3087 if (pi->partial_sdu_len == pi->sdu_len) {
3088 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3089 err = sock_queue_rcv_skb(sk, _skb);
3090 if (err < 0)
3091 kfree_skb(_skb);
3092 }
3093 err = 0;
3094
3095 drop:
3096 kfree_skb(pi->sdu);
3097 break;
3098 }
3099
3100 kfree_skb(skb);
3101 return err;
3102 }
3103
l2cap_check_srej_gap(struct sock * sk,u8 tx_seq)3104 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3105 {
3106 struct sk_buff *skb;
3107 u16 control;
3108
3109 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3110 if (bt_cb(skb)->tx_seq != tx_seq)
3111 break;
3112
3113 skb = skb_dequeue(SREJ_QUEUE(sk));
3114 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3115 l2cap_ertm_reassembly_sdu(sk, skb, control);
3116 l2cap_pi(sk)->buffer_seq_srej =
3117 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3118 tx_seq = (tx_seq + 1) % 64;
3119 }
3120 }
3121
l2cap_resend_srejframe(struct sock * sk,u8 tx_seq)3122 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3123 {
3124 struct l2cap_pinfo *pi = l2cap_pi(sk);
3125 struct srej_list *l, *tmp;
3126 u16 control;
3127
3128 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3129 if (l->tx_seq == tx_seq) {
3130 list_del(&l->list);
3131 kfree(l);
3132 return;
3133 }
3134 control = L2CAP_SUPER_SELECT_REJECT;
3135 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3136 l2cap_send_sframe(pi, control);
3137 list_del(&l->list);
3138 list_add_tail(&l->list, SREJ_LIST(sk));
3139 }
3140 }
3141
l2cap_send_srejframe(struct sock * sk,u8 tx_seq)3142 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3143 {
3144 struct l2cap_pinfo *pi = l2cap_pi(sk);
3145 struct srej_list *new;
3146 u16 control;
3147
3148 while (tx_seq != pi->expected_tx_seq) {
3149 control = L2CAP_SUPER_SELECT_REJECT;
3150 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3151 l2cap_send_sframe(pi, control);
3152
3153 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3154 new->tx_seq = pi->expected_tx_seq;
3155 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3156 list_add_tail(&new->list, SREJ_LIST(sk));
3157 }
3158 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3159 }
3160
l2cap_data_channel_iframe(struct sock * sk,u16 rx_control,struct sk_buff * skb)3161 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3162 {
3163 struct l2cap_pinfo *pi = l2cap_pi(sk);
3164 u8 tx_seq = __get_txseq(rx_control);
3165 u8 req_seq = __get_reqseq(rx_control);
3166 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3167 int tx_seq_offset, expected_tx_seq_offset;
3168 int num_to_ack = (pi->tx_win/6) + 1;
3169 int err = 0;
3170
3171 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3172 rx_control);
3173
3174 if (L2CAP_CTRL_FINAL & rx_control &&
3175 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3176 del_timer(&pi->monitor_timer);
3177 if (pi->unacked_frames > 0)
3178 __mod_retrans_timer();
3179 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3180 }
3181
3182 pi->expected_ack_seq = req_seq;
3183 l2cap_drop_acked_frames(sk);
3184
3185 if (tx_seq == pi->expected_tx_seq)
3186 goto expected;
3187
3188 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3189 if (tx_seq_offset < 0)
3190 tx_seq_offset += 64;
3191
3192 /* invalid tx_seq */
3193 if (tx_seq_offset >= pi->tx_win) {
3194 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3195 goto drop;
3196 }
3197
3198 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3199 goto drop;
3200
3201 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3202 struct srej_list *first;
3203
3204 first = list_first_entry(SREJ_LIST(sk),
3205 struct srej_list, list);
3206 if (tx_seq == first->tx_seq) {
3207 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3208 l2cap_check_srej_gap(sk, tx_seq);
3209
3210 list_del(&first->list);
3211 kfree(first);
3212
3213 if (list_empty(SREJ_LIST(sk))) {
3214 pi->buffer_seq = pi->buffer_seq_srej;
3215 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3216 l2cap_send_ack(pi);
3217 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3218 }
3219 } else {
3220 struct srej_list *l;
3221
3222 /* duplicated tx_seq */
3223 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3224 goto drop;
3225
3226 list_for_each_entry(l, SREJ_LIST(sk), list) {
3227 if (l->tx_seq == tx_seq) {
3228 l2cap_resend_srejframe(sk, tx_seq);
3229 return 0;
3230 }
3231 }
3232 l2cap_send_srejframe(sk, tx_seq);
3233 }
3234 } else {
3235 expected_tx_seq_offset =
3236 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3237 if (expected_tx_seq_offset < 0)
3238 expected_tx_seq_offset += 64;
3239
3240 /* duplicated tx_seq */
3241 if (tx_seq_offset < expected_tx_seq_offset)
3242 goto drop;
3243
3244 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3245
3246 BT_DBG("sk %p, Enter SREJ", sk);
3247
3248 INIT_LIST_HEAD(SREJ_LIST(sk));
3249 pi->buffer_seq_srej = pi->buffer_seq;
3250
3251 __skb_queue_head_init(SREJ_QUEUE(sk));
3252 __skb_queue_head_init(BUSY_QUEUE(sk));
3253 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3254
3255 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3256
3257 l2cap_send_srejframe(sk, tx_seq);
3258
3259 del_timer(&pi->ack_timer);
3260 }
3261 return 0;
3262
3263 expected:
3264 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3265
3266 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3267 bt_cb(skb)->tx_seq = tx_seq;
3268 bt_cb(skb)->sar = sar;
3269 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3270 return 0;
3271 }
3272
3273 err = l2cap_push_rx_skb(sk, skb, rx_control);
3274 if (err < 0)
3275 return 0;
3276
3277 if (rx_control & L2CAP_CTRL_FINAL) {
3278 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3279 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3280 else
3281 l2cap_retransmit_frames(sk);
3282 }
3283
3284 __mod_ack_timer();
3285
3286 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3287 if (pi->num_acked == num_to_ack - 1)
3288 l2cap_send_ack(pi);
3289
3290 return 0;
3291
3292 drop:
3293 kfree_skb(skb);
3294 return 0;
3295 }
3296
l2cap_data_channel_rrframe(struct sock * sk,u16 rx_control)3297 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3298 {
3299 struct l2cap_pinfo *pi = l2cap_pi(sk);
3300
3301 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3302 rx_control);
3303
3304 pi->expected_ack_seq = __get_reqseq(rx_control);
3305 l2cap_drop_acked_frames(sk);
3306
3307 if (rx_control & L2CAP_CTRL_POLL) {
3308 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3309 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3310 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3311 (pi->unacked_frames > 0))
3312 __mod_retrans_timer();
3313
3314 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3315 l2cap_send_srejtail(sk);
3316 } else {
3317 l2cap_send_i_or_rr_or_rnr(sk);
3318 }
3319
3320 } else if (rx_control & L2CAP_CTRL_FINAL) {
3321 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3322
3323 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3324 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3325 else
3326 l2cap_retransmit_frames(sk);
3327
3328 } else {
3329 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3330 (pi->unacked_frames > 0))
3331 __mod_retrans_timer();
3332
3333 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3334 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3335 l2cap_send_ack(pi);
3336 else
3337 l2cap_ertm_send(sk);
3338 }
3339 }
3340
l2cap_data_channel_rejframe(struct sock * sk,u16 rx_control)3341 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3342 {
3343 struct l2cap_pinfo *pi = l2cap_pi(sk);
3344 u8 tx_seq = __get_reqseq(rx_control);
3345
3346 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3347
3348 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3349
3350 pi->expected_ack_seq = tx_seq;
3351 l2cap_drop_acked_frames(sk);
3352
3353 if (rx_control & L2CAP_CTRL_FINAL) {
3354 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3355 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3356 else
3357 l2cap_retransmit_frames(sk);
3358 } else {
3359 l2cap_retransmit_frames(sk);
3360
3361 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3362 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3363 }
3364 }
l2cap_data_channel_srejframe(struct sock * sk,u16 rx_control)3365 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3366 {
3367 struct l2cap_pinfo *pi = l2cap_pi(sk);
3368 u8 tx_seq = __get_reqseq(rx_control);
3369
3370 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3371
3372 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3373
3374 if (rx_control & L2CAP_CTRL_POLL) {
3375 pi->expected_ack_seq = tx_seq;
3376 l2cap_drop_acked_frames(sk);
3377
3378 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3379 l2cap_retransmit_one_frame(sk, tx_seq);
3380
3381 l2cap_ertm_send(sk);
3382
3383 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3384 pi->srej_save_reqseq = tx_seq;
3385 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3386 }
3387 } else if (rx_control & L2CAP_CTRL_FINAL) {
3388 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3389 pi->srej_save_reqseq == tx_seq)
3390 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3391 else
3392 l2cap_retransmit_one_frame(sk, tx_seq);
3393 } else {
3394 l2cap_retransmit_one_frame(sk, tx_seq);
3395 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3396 pi->srej_save_reqseq = tx_seq;
3397 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3398 }
3399 }
3400 }
3401
l2cap_data_channel_rnrframe(struct sock * sk,u16 rx_control)3402 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3403 {
3404 struct l2cap_pinfo *pi = l2cap_pi(sk);
3405 u8 tx_seq = __get_reqseq(rx_control);
3406
3407 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3408
3409 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3410 pi->expected_ack_seq = tx_seq;
3411 l2cap_drop_acked_frames(sk);
3412
3413 if (rx_control & L2CAP_CTRL_POLL)
3414 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3415
3416 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3417 del_timer(&pi->retrans_timer);
3418 if (rx_control & L2CAP_CTRL_POLL)
3419 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3420 return;
3421 }
3422
3423 if (rx_control & L2CAP_CTRL_POLL)
3424 l2cap_send_srejtail(sk);
3425 else
3426 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3427 }
3428
l2cap_data_channel_sframe(struct sock * sk,u16 rx_control,struct sk_buff * skb)3429 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3430 {
3431 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3432
3433 if (L2CAP_CTRL_FINAL & rx_control &&
3434 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3435 del_timer(&l2cap_pi(sk)->monitor_timer);
3436 if (l2cap_pi(sk)->unacked_frames > 0)
3437 __mod_retrans_timer();
3438 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3439 }
3440
3441 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3442 case L2CAP_SUPER_RCV_READY:
3443 l2cap_data_channel_rrframe(sk, rx_control);
3444 break;
3445
3446 case L2CAP_SUPER_REJECT:
3447 l2cap_data_channel_rejframe(sk, rx_control);
3448 break;
3449
3450 case L2CAP_SUPER_SELECT_REJECT:
3451 l2cap_data_channel_srejframe(sk, rx_control);
3452 break;
3453
3454 case L2CAP_SUPER_RCV_NOT_READY:
3455 l2cap_data_channel_rnrframe(sk, rx_control);
3456 break;
3457 }
3458
3459 kfree_skb(skb);
3460 return 0;
3461 }
3462
l2cap_ertm_data_rcv(struct sock * sk,struct sk_buff * skb)3463 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3464 {
3465 struct l2cap_pinfo *pi = l2cap_pi(sk);
3466 u16 control;
3467 u8 req_seq;
3468 int len, next_tx_seq_offset, req_seq_offset;
3469
3470 control = get_unaligned_le16(skb->data);
3471 skb_pull(skb, 2);
3472 len = skb->len;
3473
3474 /*
3475 * We can just drop the corrupted I-frame here.
3476 * Receiver will miss it and start proper recovery
3477 * procedures and ask retransmission.
3478 */
3479 if (l2cap_check_fcs(pi, skb))
3480 goto drop;
3481
3482 if (__is_sar_start(control) && __is_iframe(control))
3483 len -= 2;
3484
3485 if (pi->fcs == L2CAP_FCS_CRC16)
3486 len -= 2;
3487
3488 if (len > pi->mps) {
3489 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3490 goto drop;
3491 }
3492
3493 req_seq = __get_reqseq(control);
3494 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
3495 if (req_seq_offset < 0)
3496 req_seq_offset += 64;
3497
3498 next_tx_seq_offset =
3499 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
3500 if (next_tx_seq_offset < 0)
3501 next_tx_seq_offset += 64;
3502
3503 /* check for invalid req-seq */
3504 if (req_seq_offset > next_tx_seq_offset) {
3505 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3506 goto drop;
3507 }
3508
3509 if (__is_iframe(control)) {
3510 if (len < 0) {
3511 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3512 goto drop;
3513 }
3514
3515 l2cap_data_channel_iframe(sk, control, skb);
3516 } else {
3517 if (len != 0) {
3518 BT_ERR("%d", len);
3519 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3520 goto drop;
3521 }
3522
3523 l2cap_data_channel_sframe(sk, control, skb);
3524 }
3525
3526 return 0;
3527
3528 drop:
3529 kfree_skb(skb);
3530 return 0;
3531 }
3532
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)3533 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3534 {
3535 struct sock *sk;
3536 struct l2cap_pinfo *pi;
3537 u16 control;
3538 u8 tx_seq;
3539 int len;
3540
3541 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3542 if (!sk) {
3543 BT_DBG("unknown cid 0x%4.4x", cid);
3544 goto drop;
3545 }
3546
3547 pi = l2cap_pi(sk);
3548
3549 BT_DBG("sk %p, len %d", sk, skb->len);
3550
3551 if (sk->sk_state != BT_CONNECTED)
3552 goto drop;
3553
3554 switch (pi->mode) {
3555 case L2CAP_MODE_BASIC:
3556 /* If socket recv buffers overflows we drop data here
3557 * which is *bad* because L2CAP has to be reliable.
3558 * But we don't have any other choice. L2CAP doesn't
3559 * provide flow control mechanism. */
3560
3561 if (pi->imtu < skb->len)
3562 goto drop;
3563
3564 if (!sock_queue_rcv_skb(sk, skb))
3565 goto done;
3566 break;
3567
3568 case L2CAP_MODE_ERTM:
3569 if (!sock_owned_by_user(sk)) {
3570 l2cap_ertm_data_rcv(sk, skb);
3571 } else {
3572 if (sk_add_backlog(sk, skb))
3573 goto drop;
3574 }
3575
3576 goto done;
3577
3578 case L2CAP_MODE_STREAMING:
3579 control = get_unaligned_le16(skb->data);
3580 skb_pull(skb, 2);
3581 len = skb->len;
3582
3583 if (l2cap_check_fcs(pi, skb))
3584 goto drop;
3585
3586 if (__is_sar_start(control))
3587 len -= 2;
3588
3589 if (pi->fcs == L2CAP_FCS_CRC16)
3590 len -= 2;
3591
3592 if (len > pi->mps || len < 0 || __is_sframe(control))
3593 goto drop;
3594
3595 tx_seq = __get_txseq(control);
3596
3597 if (pi->expected_tx_seq == tx_seq)
3598 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3599 else
3600 pi->expected_tx_seq = (tx_seq + 1) % 64;
3601
3602 l2cap_streaming_reassembly_sdu(sk, skb, control);
3603
3604 goto done;
3605
3606 default:
3607 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3608 break;
3609 }
3610
3611 drop:
3612 kfree_skb(skb);
3613
3614 done:
3615 if (sk)
3616 bh_unlock_sock(sk);
3617
3618 return 0;
3619 }
3620
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)3621 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3622 {
3623 struct sock *sk;
3624
3625 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3626 if (!sk)
3627 goto drop;
3628
3629 bh_lock_sock(sk);
3630
3631 BT_DBG("sk %p, len %d", sk, skb->len);
3632
3633 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3634 goto drop;
3635
3636 if (l2cap_pi(sk)->imtu < skb->len)
3637 goto drop;
3638
3639 if (!sock_queue_rcv_skb(sk, skb))
3640 goto done;
3641
3642 drop:
3643 kfree_skb(skb);
3644
3645 done:
3646 if (sk)
3647 bh_unlock_sock(sk);
3648 return 0;
3649 }
3650
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)3651 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3652 {
3653 struct l2cap_hdr *lh = (void *) skb->data;
3654 u16 cid, len;
3655 __le16 psm;
3656
3657 skb_pull(skb, L2CAP_HDR_SIZE);
3658 cid = __le16_to_cpu(lh->cid);
3659 len = __le16_to_cpu(lh->len);
3660
3661 if (len != skb->len) {
3662 kfree_skb(skb);
3663 return;
3664 }
3665
3666 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3667
3668 switch (cid) {
3669 case L2CAP_CID_LE_SIGNALING:
3670 case L2CAP_CID_SIGNALING:
3671 l2cap_sig_channel(conn, skb);
3672 break;
3673
3674 case L2CAP_CID_CONN_LESS:
3675 psm = get_unaligned_le16(skb->data);
3676 skb_pull(skb, 2);
3677 l2cap_conless_channel(conn, psm, skb);
3678 break;
3679
3680 default:
3681 l2cap_data_channel(conn, cid, skb);
3682 break;
3683 }
3684 }
3685
3686 /* ---- L2CAP interface with lower layer (HCI) ---- */
3687
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)3688 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3689 {
3690 int exact = 0, lm1 = 0, lm2 = 0;
3691 register struct sock *sk;
3692 struct hlist_node *node;
3693
3694 if (type != ACL_LINK)
3695 return -EINVAL;
3696
3697 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3698
3699 /* Find listening sockets and check their link_mode */
3700 read_lock(&l2cap_sk_list.lock);
3701 sk_for_each(sk, node, &l2cap_sk_list.head) {
3702 if (sk->sk_state != BT_LISTEN)
3703 continue;
3704
3705 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3706 lm1 |= HCI_LM_ACCEPT;
3707 if (l2cap_pi(sk)->role_switch)
3708 lm1 |= HCI_LM_MASTER;
3709 exact++;
3710 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3711 lm2 |= HCI_LM_ACCEPT;
3712 if (l2cap_pi(sk)->role_switch)
3713 lm2 |= HCI_LM_MASTER;
3714 }
3715 }
3716 read_unlock(&l2cap_sk_list.lock);
3717
3718 return exact ? lm1 : lm2;
3719 }
3720
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)3721 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3722 {
3723 struct l2cap_conn *conn;
3724
3725 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3726
3727 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3728 return -EINVAL;
3729
3730 if (!status) {
3731 conn = l2cap_conn_add(hcon, status);
3732 if (conn)
3733 l2cap_conn_ready(conn);
3734 } else
3735 l2cap_conn_del(hcon, bt_err(status));
3736
3737 return 0;
3738 }
3739
l2cap_disconn_ind(struct hci_conn * hcon)3740 static int l2cap_disconn_ind(struct hci_conn *hcon)
3741 {
3742 struct l2cap_conn *conn = hcon->l2cap_data;
3743
3744 BT_DBG("hcon %p", hcon);
3745
3746 if (hcon->type != ACL_LINK || !conn)
3747 return 0x13;
3748
3749 return conn->disc_reason;
3750 }
3751
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)3752 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3753 {
3754 BT_DBG("hcon %p reason %d", hcon, reason);
3755
3756 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3757 return -EINVAL;
3758
3759 l2cap_conn_del(hcon, bt_err(reason));
3760
3761 return 0;
3762 }
3763
l2cap_check_encryption(struct sock * sk,u8 encrypt)3764 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3765 {
3766 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3767 return;
3768
3769 if (encrypt == 0x00) {
3770 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3771 l2cap_sock_clear_timer(sk);
3772 l2cap_sock_set_timer(sk, HZ * 5);
3773 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3774 __l2cap_sock_close(sk, ECONNREFUSED);
3775 } else {
3776 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3777 l2cap_sock_clear_timer(sk);
3778 }
3779 }
3780
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)3781 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3782 {
3783 struct l2cap_chan_list *l;
3784 struct l2cap_conn *conn = hcon->l2cap_data;
3785 struct sock *sk;
3786
3787 if (!conn)
3788 return 0;
3789
3790 l = &conn->chan_list;
3791
3792 BT_DBG("conn %p", conn);
3793
3794 read_lock(&l->lock);
3795
3796 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3797 bh_lock_sock(sk);
3798
3799 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3800 bh_unlock_sock(sk);
3801 continue;
3802 }
3803
3804 if (!status && (sk->sk_state == BT_CONNECTED ||
3805 sk->sk_state == BT_CONFIG)) {
3806 l2cap_check_encryption(sk, encrypt);
3807 bh_unlock_sock(sk);
3808 continue;
3809 }
3810
3811 if (sk->sk_state == BT_CONNECT) {
3812 if (!status) {
3813 struct l2cap_conn_req req;
3814 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3815 req.psm = l2cap_pi(sk)->psm;
3816
3817 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3818 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3819
3820 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3821 L2CAP_CONN_REQ, sizeof(req), &req);
3822 } else {
3823 l2cap_sock_clear_timer(sk);
3824 l2cap_sock_set_timer(sk, HZ / 10);
3825 }
3826 } else if (sk->sk_state == BT_CONNECT2) {
3827 struct l2cap_conn_rsp rsp;
3828 __u16 result;
3829
3830 if (!status) {
3831 sk->sk_state = BT_CONFIG;
3832 result = L2CAP_CR_SUCCESS;
3833 } else {
3834 sk->sk_state = BT_DISCONN;
3835 l2cap_sock_set_timer(sk, HZ / 10);
3836 result = L2CAP_CR_SEC_BLOCK;
3837 }
3838
3839 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3840 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3841 rsp.result = cpu_to_le16(result);
3842 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3843 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3844 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3845 }
3846
3847 bh_unlock_sock(sk);
3848 }
3849
3850 read_unlock(&l->lock);
3851
3852 return 0;
3853 }
3854
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)3855 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3856 {
3857 struct l2cap_conn *conn = hcon->l2cap_data;
3858
3859 if (!conn)
3860 conn = l2cap_conn_add(hcon, 0);
3861
3862 if (!conn)
3863 goto drop;
3864
3865 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3866
3867 if (!(flags & ACL_CONT)) {
3868 struct l2cap_hdr *hdr;
3869 struct sock *sk;
3870 u16 cid;
3871 int len;
3872
3873 if (conn->rx_len) {
3874 BT_ERR("Unexpected start frame (len %d)", skb->len);
3875 kfree_skb(conn->rx_skb);
3876 conn->rx_skb = NULL;
3877 conn->rx_len = 0;
3878 l2cap_conn_unreliable(conn, ECOMM);
3879 }
3880
3881 /* Start fragment always begin with Basic L2CAP header */
3882 if (skb->len < L2CAP_HDR_SIZE) {
3883 BT_ERR("Frame is too short (len %d)", skb->len);
3884 l2cap_conn_unreliable(conn, ECOMM);
3885 goto drop;
3886 }
3887
3888 hdr = (struct l2cap_hdr *) skb->data;
3889 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3890 cid = __le16_to_cpu(hdr->cid);
3891
3892 if (len == skb->len) {
3893 /* Complete frame received */
3894 l2cap_recv_frame(conn, skb);
3895 return 0;
3896 }
3897
3898 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3899
3900 if (skb->len > len) {
3901 BT_ERR("Frame is too long (len %d, expected len %d)",
3902 skb->len, len);
3903 l2cap_conn_unreliable(conn, ECOMM);
3904 goto drop;
3905 }
3906
3907 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3908
3909 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3910 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
3911 len, l2cap_pi(sk)->imtu);
3912 bh_unlock_sock(sk);
3913 l2cap_conn_unreliable(conn, ECOMM);
3914 goto drop;
3915 }
3916
3917 if (sk)
3918 bh_unlock_sock(sk);
3919
3920 /* Allocate skb for the complete frame (with header) */
3921 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3922 if (!conn->rx_skb)
3923 goto drop;
3924
3925 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3926 skb->len);
3927 conn->rx_len = len - skb->len;
3928 } else {
3929 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3930
3931 if (!conn->rx_len) {
3932 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3933 l2cap_conn_unreliable(conn, ECOMM);
3934 goto drop;
3935 }
3936
3937 if (skb->len > conn->rx_len) {
3938 BT_ERR("Fragment is too long (len %d, expected %d)",
3939 skb->len, conn->rx_len);
3940 kfree_skb(conn->rx_skb);
3941 conn->rx_skb = NULL;
3942 conn->rx_len = 0;
3943 l2cap_conn_unreliable(conn, ECOMM);
3944 goto drop;
3945 }
3946
3947 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3948 skb->len);
3949 conn->rx_len -= skb->len;
3950
3951 if (!conn->rx_len) {
3952 /* Complete frame received */
3953 l2cap_recv_frame(conn, conn->rx_skb);
3954 conn->rx_skb = NULL;
3955 }
3956 }
3957
3958 drop:
3959 kfree_skb(skb);
3960 return 0;
3961 }
3962
l2cap_debugfs_show(struct seq_file * f,void * p)3963 static int l2cap_debugfs_show(struct seq_file *f, void *p)
3964 {
3965 struct sock *sk;
3966 struct hlist_node *node;
3967
3968 read_lock_bh(&l2cap_sk_list.lock);
3969
3970 sk_for_each(sk, node, &l2cap_sk_list.head) {
3971 struct l2cap_pinfo *pi = l2cap_pi(sk);
3972
3973 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
3974 batostr(&bt_sk(sk)->src),
3975 batostr(&bt_sk(sk)->dst),
3976 sk->sk_state, __le16_to_cpu(pi->psm),
3977 pi->scid, pi->dcid,
3978 pi->imtu, pi->omtu, pi->sec_level,
3979 pi->mode);
3980 }
3981
3982 read_unlock_bh(&l2cap_sk_list.lock);
3983
3984 return 0;
3985 }
3986
l2cap_debugfs_open(struct inode * inode,struct file * file)3987 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3988 {
3989 return single_open(file, l2cap_debugfs_show, inode->i_private);
3990 }
3991
3992 static const struct file_operations l2cap_debugfs_fops = {
3993 .open = l2cap_debugfs_open,
3994 .read = seq_read,
3995 .llseek = seq_lseek,
3996 .release = single_release,
3997 };
3998
3999 static struct dentry *l2cap_debugfs;
4000
4001 static struct hci_proto l2cap_hci_proto = {
4002 .name = "L2CAP",
4003 .id = HCI_PROTO_L2CAP,
4004 .connect_ind = l2cap_connect_ind,
4005 .connect_cfm = l2cap_connect_cfm,
4006 .disconn_ind = l2cap_disconn_ind,
4007 .disconn_cfm = l2cap_disconn_cfm,
4008 .security_cfm = l2cap_security_cfm,
4009 .recv_acldata = l2cap_recv_acldata
4010 };
4011
l2cap_init(void)4012 int __init l2cap_init(void)
4013 {
4014 int err;
4015
4016 err = l2cap_init_sockets();
4017 if (err < 0)
4018 return err;
4019
4020 _busy_wq = create_singlethread_workqueue("l2cap");
4021 if (!_busy_wq) {
4022 err = -ENOMEM;
4023 goto error;
4024 }
4025
4026 err = hci_register_proto(&l2cap_hci_proto);
4027 if (err < 0) {
4028 BT_ERR("L2CAP protocol registration failed");
4029 bt_sock_unregister(BTPROTO_L2CAP);
4030 goto error;
4031 }
4032
4033 if (bt_debugfs) {
4034 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4035 bt_debugfs, NULL, &l2cap_debugfs_fops);
4036 if (!l2cap_debugfs)
4037 BT_ERR("Failed to create L2CAP debug file");
4038 }
4039
4040 return 0;
4041
4042 error:
4043 destroy_workqueue(_busy_wq);
4044 l2cap_cleanup_sockets();
4045 return err;
4046 }
4047
l2cap_exit(void)4048 void l2cap_exit(void)
4049 {
4050 debugfs_remove(l2cap_debugfs);
4051
4052 flush_workqueue(_busy_wq);
4053 destroy_workqueue(_busy_wq);
4054
4055 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4056 BT_ERR("L2CAP protocol unregistration failed");
4057
4058 l2cap_cleanup_sockets();
4059 }
4060
4061 module_param(disable_ertm, bool, 0644);
4062 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4063