1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47
hci_le_connect(struct hci_conn * conn)48 static void hci_le_connect(struct hci_conn *conn)
49 {
50 struct hci_dev *hdev = conn->hdev;
51 struct hci_cp_le_create_conn cp;
52
53 conn->state = BT_CONNECT;
54 conn->out = 1;
55 conn->link_mode |= HCI_LM_MASTER;
56
57 memset(&cp, 0, sizeof(cp));
58 cp.scan_interval = cpu_to_le16(0x0004);
59 cp.scan_window = cpu_to_le16(0x0004);
60 bacpy(&cp.peer_addr, &conn->dst);
61 cp.conn_interval_min = cpu_to_le16(0x0008);
62 cp.conn_interval_max = cpu_to_le16(0x0100);
63 cp.supervision_timeout = cpu_to_le16(0x0064);
64 cp.min_ce_len = cpu_to_le16(0x0001);
65 cp.max_ce_len = cpu_to_le16(0x0001);
66
67 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
68 }
69
hci_le_connect_cancel(struct hci_conn * conn)70 static void hci_le_connect_cancel(struct hci_conn *conn)
71 {
72 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
73 }
74
hci_acl_connect(struct hci_conn * conn)75 void hci_acl_connect(struct hci_conn *conn)
76 {
77 struct hci_dev *hdev = conn->hdev;
78 struct inquiry_entry *ie;
79 struct hci_cp_create_conn cp;
80
81 BT_DBG("%p", conn);
82
83 conn->state = BT_CONNECT;
84 conn->out = 1;
85
86 conn->link_mode = HCI_LM_MASTER;
87
88 conn->attempt++;
89
90 conn->link_policy = hdev->link_policy;
91
92 memset(&cp, 0, sizeof(cp));
93 bacpy(&cp.bdaddr, &conn->dst);
94 cp.pscan_rep_mode = 0x02;
95
96 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
97 if (ie) {
98 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
99 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
100 cp.pscan_mode = ie->data.pscan_mode;
101 cp.clock_offset = ie->data.clock_offset |
102 cpu_to_le16(0x8000);
103 }
104
105 memcpy(conn->dev_class, ie->data.dev_class, 3);
106 conn->ssp_mode = ie->data.ssp_mode;
107 }
108
109 cp.pkt_type = cpu_to_le16(conn->pkt_type);
110 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
111 cp.role_switch = 0x01;
112 else
113 cp.role_switch = 0x00;
114
115 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
116 }
117
hci_acl_connect_cancel(struct hci_conn * conn)118 static void hci_acl_connect_cancel(struct hci_conn *conn)
119 {
120 struct hci_cp_create_conn_cancel cp;
121
122 BT_DBG("%p", conn);
123
124 if (conn->hdev->hci_ver < 2)
125 return;
126
127 bacpy(&cp.bdaddr, &conn->dst);
128 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
129 }
130
hci_acl_disconn(struct hci_conn * conn,__u8 reason)131 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
132 {
133 struct hci_cp_disconnect cp;
134
135 BT_DBG("%p", conn);
136
137 conn->state = BT_DISCONN;
138
139 cp.handle = cpu_to_le16(conn->handle);
140 cp.reason = reason;
141 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
142 }
143
hci_add_sco(struct hci_conn * conn,__u16 handle)144 void hci_add_sco(struct hci_conn *conn, __u16 handle)
145 {
146 struct hci_dev *hdev = conn->hdev;
147 struct hci_cp_add_sco cp;
148
149 BT_DBG("%p", conn);
150
151 conn->state = BT_CONNECT;
152 conn->out = 1;
153
154 conn->attempt++;
155
156 cp.handle = cpu_to_le16(handle);
157 cp.pkt_type = cpu_to_le16(conn->pkt_type);
158
159 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
160 }
161
hci_setup_sync(struct hci_conn * conn,__u16 handle)162 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
163 {
164 struct hci_dev *hdev = conn->hdev;
165 struct hci_cp_setup_sync_conn cp;
166
167 BT_DBG("%p", conn);
168
169 conn->state = BT_CONNECT;
170 conn->out = 1;
171
172 conn->attempt++;
173
174 cp.handle = cpu_to_le16(handle);
175 cp.pkt_type = cpu_to_le16(conn->pkt_type);
176
177 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
178 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
179 cp.max_latency = cpu_to_le16(0xffff);
180 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
181 cp.retrans_effort = 0xff;
182
183 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
184 }
185
hci_le_conn_update(struct hci_conn * conn,u16 min,u16 max,u16 latency,u16 to_multiplier)186 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
187 u16 latency, u16 to_multiplier)
188 {
189 struct hci_cp_le_conn_update cp;
190 struct hci_dev *hdev = conn->hdev;
191
192 memset(&cp, 0, sizeof(cp));
193
194 cp.handle = cpu_to_le16(conn->handle);
195 cp.conn_interval_min = cpu_to_le16(min);
196 cp.conn_interval_max = cpu_to_le16(max);
197 cp.conn_latency = cpu_to_le16(latency);
198 cp.supervision_timeout = cpu_to_le16(to_multiplier);
199 cp.min_ce_len = cpu_to_le16(0x0001);
200 cp.max_ce_len = cpu_to_le16(0x0001);
201
202 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
203 }
204 EXPORT_SYMBOL(hci_le_conn_update);
205
206 /* Device _must_ be locked */
hci_sco_setup(struct hci_conn * conn,__u8 status)207 void hci_sco_setup(struct hci_conn *conn, __u8 status)
208 {
209 struct hci_conn *sco = conn->link;
210
211 BT_DBG("%p", conn);
212
213 if (!sco)
214 return;
215
216 if (!status) {
217 if (lmp_esco_capable(conn->hdev))
218 hci_setup_sync(sco, conn->handle);
219 else
220 hci_add_sco(sco, conn->handle);
221 } else {
222 hci_proto_connect_cfm(sco, status);
223 hci_conn_del(sco);
224 }
225 }
226
hci_conn_timeout(unsigned long arg)227 static void hci_conn_timeout(unsigned long arg)
228 {
229 struct hci_conn *conn = (void *) arg;
230 struct hci_dev *hdev = conn->hdev;
231 __u8 reason;
232
233 BT_DBG("conn %p state %d", conn, conn->state);
234
235 if (atomic_read(&conn->refcnt))
236 return;
237
238 hci_dev_lock(hdev);
239
240 switch (conn->state) {
241 case BT_CONNECT:
242 case BT_CONNECT2:
243 if (conn->out) {
244 if (conn->type == ACL_LINK)
245 hci_acl_connect_cancel(conn);
246 else if (conn->type == LE_LINK)
247 hci_le_connect_cancel(conn);
248 }
249 break;
250 case BT_CONFIG:
251 case BT_CONNECTED:
252 reason = hci_proto_disconn_ind(conn);
253 hci_acl_disconn(conn, reason);
254 break;
255 default:
256 conn->state = BT_CLOSED;
257 break;
258 }
259
260 hci_dev_unlock(hdev);
261 }
262
hci_conn_idle(unsigned long arg)263 static void hci_conn_idle(unsigned long arg)
264 {
265 struct hci_conn *conn = (void *) arg;
266
267 BT_DBG("conn %p mode %d", conn, conn->mode);
268
269 hci_conn_enter_sniff_mode(conn);
270 }
271
hci_conn_add(struct hci_dev * hdev,int type,bdaddr_t * dst)272 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
273 {
274 struct hci_conn *conn;
275
276 BT_DBG("%s dst %s", hdev->name, batostr(dst));
277
278 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
279 if (!conn)
280 return NULL;
281
282 bacpy(&conn->dst, dst);
283 conn->hdev = hdev;
284 conn->type = type;
285 conn->mode = HCI_CM_ACTIVE;
286 conn->state = BT_OPEN;
287 conn->auth_type = HCI_AT_GENERAL_BONDING;
288 conn->io_capability = hdev->io_capability;
289 conn->remote_auth = 0xff;
290
291 conn->power_save = 1;
292 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
293
294 switch (type) {
295 case ACL_LINK:
296 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
297 break;
298 case SCO_LINK:
299 if (lmp_esco_capable(hdev))
300 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
301 (hdev->esco_type & EDR_ESCO_MASK);
302 else
303 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
304 break;
305 case ESCO_LINK:
306 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
307 break;
308 }
309
310 skb_queue_head_init(&conn->data_q);
311
312 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
313 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
314
315 atomic_set(&conn->refcnt, 0);
316
317 hci_dev_hold(hdev);
318
319 tasklet_disable(&hdev->tx_task);
320
321 hci_conn_hash_add(hdev, conn);
322 if (hdev->notify)
323 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
324
325 atomic_set(&conn->devref, 0);
326
327 hci_conn_init_sysfs(conn);
328
329 tasklet_enable(&hdev->tx_task);
330
331 return conn;
332 }
333
hci_conn_del(struct hci_conn * conn)334 int hci_conn_del(struct hci_conn *conn)
335 {
336 struct hci_dev *hdev = conn->hdev;
337
338 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
339
340 del_timer(&conn->idle_timer);
341
342 del_timer(&conn->disc_timer);
343
344 if (conn->type == ACL_LINK) {
345 struct hci_conn *sco = conn->link;
346 if (sco)
347 sco->link = NULL;
348
349 /* Unacked frames */
350 hdev->acl_cnt += conn->sent;
351 } else if (conn->type == LE_LINK) {
352 if (hdev->le_pkts)
353 hdev->le_cnt += conn->sent;
354 else
355 hdev->acl_cnt += conn->sent;
356 } else {
357 struct hci_conn *acl = conn->link;
358 if (acl) {
359 acl->link = NULL;
360 hci_conn_put(acl);
361 }
362 }
363
364 tasklet_disable(&hdev->tx_task);
365
366 hci_conn_hash_del(hdev, conn);
367 if (hdev->notify)
368 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
369
370 tasklet_enable(&hdev->tx_task);
371
372 skb_queue_purge(&conn->data_q);
373
374 hci_conn_put_device(conn);
375
376 hci_dev_put(hdev);
377
378 return 0;
379 }
380
hci_get_route(bdaddr_t * dst,bdaddr_t * src)381 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
382 {
383 int use_src = bacmp(src, BDADDR_ANY);
384 struct hci_dev *hdev = NULL;
385 struct list_head *p;
386
387 BT_DBG("%s -> %s", batostr(src), batostr(dst));
388
389 read_lock_bh(&hci_dev_list_lock);
390
391 list_for_each(p, &hci_dev_list) {
392 struct hci_dev *d = list_entry(p, struct hci_dev, list);
393
394 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
395 continue;
396
397 /* Simple routing:
398 * No source address - find interface with bdaddr != dst
399 * Source address - find interface with bdaddr == src
400 */
401
402 if (use_src) {
403 if (!bacmp(&d->bdaddr, src)) {
404 hdev = d; break;
405 }
406 } else {
407 if (bacmp(&d->bdaddr, dst)) {
408 hdev = d; break;
409 }
410 }
411 }
412
413 if (hdev)
414 hdev = hci_dev_hold(hdev);
415
416 read_unlock_bh(&hci_dev_list_lock);
417 return hdev;
418 }
419 EXPORT_SYMBOL(hci_get_route);
420
421 /* Create SCO, ACL or LE connection.
422 * Device _must_ be locked */
hci_connect(struct hci_dev * hdev,int type,bdaddr_t * dst,__u8 sec_level,__u8 auth_type)423 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
424 {
425 struct hci_conn *acl;
426 struct hci_conn *sco;
427 struct hci_conn *le;
428
429 BT_DBG("%s dst %s", hdev->name, batostr(dst));
430
431 if (type == LE_LINK) {
432 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
433 if (le)
434 return ERR_PTR(-EBUSY);
435 le = hci_conn_add(hdev, LE_LINK, dst);
436 if (!le)
437 return ERR_PTR(-ENOMEM);
438 if (le->state == BT_OPEN)
439 hci_le_connect(le);
440
441 hci_conn_hold(le);
442
443 return le;
444 }
445
446 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
447 if (!acl) {
448 acl = hci_conn_add(hdev, ACL_LINK, dst);
449 if (!acl)
450 return NULL;
451 }
452
453 hci_conn_hold(acl);
454
455 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
456 acl->sec_level = BT_SECURITY_LOW;
457 acl->pending_sec_level = sec_level;
458 acl->auth_type = auth_type;
459 hci_acl_connect(acl);
460 }
461
462 if (type == ACL_LINK)
463 return acl;
464
465 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
466 if (!sco) {
467 sco = hci_conn_add(hdev, type, dst);
468 if (!sco) {
469 hci_conn_put(acl);
470 return NULL;
471 }
472 }
473
474 acl->link = sco;
475 sco->link = acl;
476
477 hci_conn_hold(sco);
478
479 if (acl->state == BT_CONNECTED &&
480 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
481 acl->power_save = 1;
482 hci_conn_enter_active_mode(acl);
483
484 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
485 /* defer SCO setup until mode change completed */
486 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend);
487 return sco;
488 }
489
490 hci_sco_setup(acl, 0x00);
491 }
492
493 return sco;
494 }
495 EXPORT_SYMBOL(hci_connect);
496
497 /* Check link security requirement */
hci_conn_check_link_mode(struct hci_conn * conn)498 int hci_conn_check_link_mode(struct hci_conn *conn)
499 {
500 BT_DBG("conn %p", conn);
501
502 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
503 !(conn->link_mode & HCI_LM_ENCRYPT))
504 return 0;
505
506 return 1;
507 }
508 EXPORT_SYMBOL(hci_conn_check_link_mode);
509
510 /* Authenticate remote device */
hci_conn_auth(struct hci_conn * conn,__u8 sec_level,__u8 auth_type)511 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
512 {
513 BT_DBG("conn %p", conn);
514
515 if (conn->pending_sec_level > sec_level)
516 sec_level = conn->pending_sec_level;
517
518 if (sec_level > conn->sec_level)
519 conn->pending_sec_level = sec_level;
520 else if (conn->link_mode & HCI_LM_AUTH)
521 return 1;
522
523 /* Make sure we preserve an existing MITM requirement*/
524 auth_type |= (conn->auth_type & 0x01);
525
526 conn->auth_type = auth_type;
527
528 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
529 struct hci_cp_auth_requested cp;
530 cp.handle = cpu_to_le16(conn->handle);
531 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
532 sizeof(cp), &cp);
533 }
534
535 return 0;
536 }
537
538 /* Enable security */
hci_conn_security(struct hci_conn * conn,__u8 sec_level,__u8 auth_type)539 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
540 {
541 BT_DBG("conn %p", conn);
542
543 if (sec_level == BT_SECURITY_SDP)
544 return 1;
545
546 if (sec_level == BT_SECURITY_LOW &&
547 (!conn->ssp_mode || !conn->hdev->ssp_mode))
548 return 1;
549
550 if (conn->link_mode & HCI_LM_ENCRYPT)
551 return hci_conn_auth(conn, sec_level, auth_type);
552
553 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
554 return 0;
555
556 if (hci_conn_auth(conn, sec_level, auth_type)) {
557 struct hci_cp_set_conn_encrypt cp;
558 cp.handle = cpu_to_le16(conn->handle);
559 cp.encrypt = 1;
560 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
561 sizeof(cp), &cp);
562 }
563
564 return 0;
565 }
566 EXPORT_SYMBOL(hci_conn_security);
567
568 /* Change link key */
hci_conn_change_link_key(struct hci_conn * conn)569 int hci_conn_change_link_key(struct hci_conn *conn)
570 {
571 BT_DBG("conn %p", conn);
572
573 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
574 struct hci_cp_change_conn_link_key cp;
575 cp.handle = cpu_to_le16(conn->handle);
576 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
577 sizeof(cp), &cp);
578 }
579
580 return 0;
581 }
582 EXPORT_SYMBOL(hci_conn_change_link_key);
583
584 /* Switch role */
hci_conn_switch_role(struct hci_conn * conn,__u8 role)585 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
586 {
587 BT_DBG("conn %p", conn);
588
589 if (!role && conn->link_mode & HCI_LM_MASTER)
590 return 1;
591
592 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
593 struct hci_cp_switch_role cp;
594 bacpy(&cp.bdaddr, &conn->dst);
595 cp.role = role;
596 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
597 }
598
599 return 0;
600 }
601 EXPORT_SYMBOL(hci_conn_switch_role);
602
603 /* Enter active mode */
hci_conn_enter_active_mode(struct hci_conn * conn)604 void hci_conn_enter_active_mode(struct hci_conn *conn)
605 {
606 struct hci_dev *hdev = conn->hdev;
607
608 BT_DBG("conn %p mode %d", conn, conn->mode);
609
610 if (test_bit(HCI_RAW, &hdev->flags))
611 return;
612
613 if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
614 goto timer;
615
616 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
617 struct hci_cp_exit_sniff_mode cp;
618 cp.handle = cpu_to_le16(conn->handle);
619 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
620 }
621
622 timer:
623 if (hdev->idle_timeout > 0)
624 mod_timer(&conn->idle_timer,
625 jiffies + msecs_to_jiffies(hdev->idle_timeout));
626 }
627
628 /* Enter sniff mode */
hci_conn_enter_sniff_mode(struct hci_conn * conn)629 void hci_conn_enter_sniff_mode(struct hci_conn *conn)
630 {
631 struct hci_dev *hdev = conn->hdev;
632
633 BT_DBG("conn %p mode %d", conn, conn->mode);
634
635 if (test_bit(HCI_RAW, &hdev->flags))
636 return;
637
638 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
639 return;
640
641 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
642 return;
643
644 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
645 struct hci_cp_sniff_subrate cp;
646 cp.handle = cpu_to_le16(conn->handle);
647 cp.max_latency = cpu_to_le16(0);
648 cp.min_remote_timeout = cpu_to_le16(0);
649 cp.min_local_timeout = cpu_to_le16(0);
650 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
651 }
652
653 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
654 struct hci_cp_sniff_mode cp;
655 cp.handle = cpu_to_le16(conn->handle);
656 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
657 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
658 cp.attempt = cpu_to_le16(4);
659 cp.timeout = cpu_to_le16(1);
660 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
661 }
662 }
663
664 /* Drop all connection on the device */
hci_conn_hash_flush(struct hci_dev * hdev)665 void hci_conn_hash_flush(struct hci_dev *hdev)
666 {
667 struct hci_conn_hash *h = &hdev->conn_hash;
668 struct list_head *p;
669
670 BT_DBG("hdev %s", hdev->name);
671
672 p = h->list.next;
673 while (p != &h->list) {
674 struct hci_conn *c;
675
676 c = list_entry(p, struct hci_conn, list);
677 p = p->next;
678
679 c->state = BT_CLOSED;
680
681 hci_proto_disconn_cfm(c, 0x16);
682 hci_conn_del(c);
683 }
684 }
685
686 /* Check pending connect attempts */
hci_conn_check_pending(struct hci_dev * hdev)687 void hci_conn_check_pending(struct hci_dev *hdev)
688 {
689 struct hci_conn *conn;
690
691 BT_DBG("hdev %s", hdev->name);
692
693 hci_dev_lock(hdev);
694
695 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
696 if (conn)
697 hci_acl_connect(conn);
698
699 hci_dev_unlock(hdev);
700 }
701
hci_conn_hold_device(struct hci_conn * conn)702 void hci_conn_hold_device(struct hci_conn *conn)
703 {
704 atomic_inc(&conn->devref);
705 }
706 EXPORT_SYMBOL(hci_conn_hold_device);
707
hci_conn_put_device(struct hci_conn * conn)708 void hci_conn_put_device(struct hci_conn *conn)
709 {
710 if (atomic_dec_and_test(&conn->devref))
711 hci_conn_del_sysfs(conn);
712 }
713 EXPORT_SYMBOL(hci_conn_put_device);
714
hci_get_conn_list(void __user * arg)715 int hci_get_conn_list(void __user *arg)
716 {
717 struct hci_conn_list_req req, *cl;
718 struct hci_conn_info *ci;
719 struct hci_dev *hdev;
720 struct list_head *p;
721 int n = 0, size, err;
722
723 if (copy_from_user(&req, arg, sizeof(req)))
724 return -EFAULT;
725
726 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
727 return -EINVAL;
728
729 size = sizeof(req) + req.conn_num * sizeof(*ci);
730
731 cl = kmalloc(size, GFP_KERNEL);
732 if (!cl)
733 return -ENOMEM;
734
735 hdev = hci_dev_get(req.dev_id);
736 if (!hdev) {
737 kfree(cl);
738 return -ENODEV;
739 }
740
741 ci = cl->conn_info;
742
743 hci_dev_lock_bh(hdev);
744 list_for_each(p, &hdev->conn_hash.list) {
745 register struct hci_conn *c;
746 c = list_entry(p, struct hci_conn, list);
747
748 bacpy(&(ci + n)->bdaddr, &c->dst);
749 (ci + n)->handle = c->handle;
750 (ci + n)->type = c->type;
751 (ci + n)->out = c->out;
752 (ci + n)->state = c->state;
753 (ci + n)->link_mode = c->link_mode;
754 if (++n >= req.conn_num)
755 break;
756 }
757 hci_dev_unlock_bh(hdev);
758
759 cl->dev_id = hdev->id;
760 cl->conn_num = n;
761 size = sizeof(req) + n * sizeof(*ci);
762
763 hci_dev_put(hdev);
764
765 err = copy_to_user(arg, cl, size);
766 kfree(cl);
767
768 return err ? -EFAULT : 0;
769 }
770
hci_get_conn_info(struct hci_dev * hdev,void __user * arg)771 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
772 {
773 struct hci_conn_info_req req;
774 struct hci_conn_info ci;
775 struct hci_conn *conn;
776 char __user *ptr = arg + sizeof(req);
777
778 if (copy_from_user(&req, arg, sizeof(req)))
779 return -EFAULT;
780
781 hci_dev_lock_bh(hdev);
782 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
783 if (conn) {
784 bacpy(&ci.bdaddr, &conn->dst);
785 ci.handle = conn->handle;
786 ci.type = conn->type;
787 ci.out = conn->out;
788 ci.state = conn->state;
789 ci.link_mode = conn->link_mode;
790 }
791 hci_dev_unlock_bh(hdev);
792
793 if (!conn)
794 return -ENOENT;
795
796 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
797 }
798
hci_get_auth_info(struct hci_dev * hdev,void __user * arg)799 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
800 {
801 struct hci_auth_info_req req;
802 struct hci_conn *conn;
803
804 if (copy_from_user(&req, arg, sizeof(req)))
805 return -EFAULT;
806
807 hci_dev_lock_bh(hdev);
808 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
809 if (conn)
810 req.type = conn->auth_type;
811 hci_dev_unlock_bh(hdev);
812
813 if (!conn)
814 return -ENOENT;
815
816 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
817 }
818