1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
41
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48 #include "hci_codec.h"
49
50 static void hci_rx_work(struct work_struct *work);
51 static void hci_cmd_work(struct work_struct *work);
52 static void hci_tx_work(struct work_struct *work);
53
54 /* HCI device list */
55 LIST_HEAD(hci_dev_list);
56 DEFINE_RWLOCK(hci_dev_list_lock);
57
58 /* HCI callback list */
59 LIST_HEAD(hci_cb_list);
60 DEFINE_MUTEX(hci_cb_list_lock);
61
62 /* HCI ID Numbering */
63 static DEFINE_IDA(hci_index_ida);
64
hci_scan_req(struct hci_request * req,unsigned long opt)65 static int hci_scan_req(struct hci_request *req, unsigned long opt)
66 {
67 __u8 scan = opt;
68
69 BT_DBG("%s %x", req->hdev->name, scan);
70
71 /* Inquiry and Page scans */
72 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
73 return 0;
74 }
75
hci_auth_req(struct hci_request * req,unsigned long opt)76 static int hci_auth_req(struct hci_request *req, unsigned long opt)
77 {
78 __u8 auth = opt;
79
80 BT_DBG("%s %x", req->hdev->name, auth);
81
82 /* Authentication */
83 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
84 return 0;
85 }
86
hci_encrypt_req(struct hci_request * req,unsigned long opt)87 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
88 {
89 __u8 encrypt = opt;
90
91 BT_DBG("%s %x", req->hdev->name, encrypt);
92
93 /* Encryption */
94 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
95 return 0;
96 }
97
hci_linkpol_req(struct hci_request * req,unsigned long opt)98 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
99 {
100 __le16 policy = cpu_to_le16(opt);
101
102 BT_DBG("%s %x", req->hdev->name, policy);
103
104 /* Default link policy */
105 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
106 return 0;
107 }
108
109 /* Get HCI device by index.
110 * Device is held on return. */
hci_dev_get(int index)111 struct hci_dev *hci_dev_get(int index)
112 {
113 struct hci_dev *hdev = NULL, *d;
114
115 BT_DBG("%d", index);
116
117 if (index < 0)
118 return NULL;
119
120 read_lock(&hci_dev_list_lock);
121 list_for_each_entry(d, &hci_dev_list, list) {
122 if (d->id == index) {
123 hdev = hci_dev_hold(d);
124 break;
125 }
126 }
127 read_unlock(&hci_dev_list_lock);
128 return hdev;
129 }
130
131 /* ---- Inquiry support ---- */
132
hci_discovery_active(struct hci_dev * hdev)133 bool hci_discovery_active(struct hci_dev *hdev)
134 {
135 struct discovery_state *discov = &hdev->discovery;
136
137 switch (discov->state) {
138 case DISCOVERY_FINDING:
139 case DISCOVERY_RESOLVING:
140 return true;
141
142 default:
143 return false;
144 }
145 }
146
hci_discovery_set_state(struct hci_dev * hdev,int state)147 void hci_discovery_set_state(struct hci_dev *hdev, int state)
148 {
149 int old_state = hdev->discovery.state;
150
151 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
152
153 if (old_state == state)
154 return;
155
156 hdev->discovery.state = state;
157
158 switch (state) {
159 case DISCOVERY_STOPPED:
160 hci_update_passive_scan(hdev);
161
162 if (old_state != DISCOVERY_STARTING)
163 mgmt_discovering(hdev, 0);
164 break;
165 case DISCOVERY_STARTING:
166 break;
167 case DISCOVERY_FINDING:
168 mgmt_discovering(hdev, 1);
169 break;
170 case DISCOVERY_RESOLVING:
171 break;
172 case DISCOVERY_STOPPING:
173 break;
174 }
175 }
176
hci_inquiry_cache_flush(struct hci_dev * hdev)177 void hci_inquiry_cache_flush(struct hci_dev *hdev)
178 {
179 struct discovery_state *cache = &hdev->discovery;
180 struct inquiry_entry *p, *n;
181
182 list_for_each_entry_safe(p, n, &cache->all, all) {
183 list_del(&p->all);
184 kfree(p);
185 }
186
187 INIT_LIST_HEAD(&cache->unknown);
188 INIT_LIST_HEAD(&cache->resolve);
189 }
190
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)191 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
192 bdaddr_t *bdaddr)
193 {
194 struct discovery_state *cache = &hdev->discovery;
195 struct inquiry_entry *e;
196
197 BT_DBG("cache %p, %pMR", cache, bdaddr);
198
199 list_for_each_entry(e, &cache->all, all) {
200 if (!bacmp(&e->data.bdaddr, bdaddr))
201 return e;
202 }
203
204 return NULL;
205 }
206
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)207 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
208 bdaddr_t *bdaddr)
209 {
210 struct discovery_state *cache = &hdev->discovery;
211 struct inquiry_entry *e;
212
213 BT_DBG("cache %p, %pMR", cache, bdaddr);
214
215 list_for_each_entry(e, &cache->unknown, list) {
216 if (!bacmp(&e->data.bdaddr, bdaddr))
217 return e;
218 }
219
220 return NULL;
221 }
222
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)223 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
224 bdaddr_t *bdaddr,
225 int state)
226 {
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
231
232 list_for_each_entry(e, &cache->resolve, list) {
233 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
234 return e;
235 if (!bacmp(&e->data.bdaddr, bdaddr))
236 return e;
237 }
238
239 return NULL;
240 }
241
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)242 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
243 struct inquiry_entry *ie)
244 {
245 struct discovery_state *cache = &hdev->discovery;
246 struct list_head *pos = &cache->resolve;
247 struct inquiry_entry *p;
248
249 list_del(&ie->list);
250
251 list_for_each_entry(p, &cache->resolve, list) {
252 if (p->name_state != NAME_PENDING &&
253 abs(p->data.rssi) >= abs(ie->data.rssi))
254 break;
255 pos = &p->list;
256 }
257
258 list_add(&ie->list, pos);
259 }
260
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)261 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
262 bool name_known)
263 {
264 struct discovery_state *cache = &hdev->discovery;
265 struct inquiry_entry *ie;
266 u32 flags = 0;
267
268 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
269
270 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
271
272 if (!data->ssp_mode)
273 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
274
275 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
276 if (ie) {
277 if (!ie->data.ssp_mode)
278 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
279
280 if (ie->name_state == NAME_NEEDED &&
281 data->rssi != ie->data.rssi) {
282 ie->data.rssi = data->rssi;
283 hci_inquiry_cache_update_resolve(hdev, ie);
284 }
285
286 goto update;
287 }
288
289 /* Entry not in the cache. Add new one. */
290 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
291 if (!ie) {
292 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
293 goto done;
294 }
295
296 list_add(&ie->all, &cache->all);
297
298 if (name_known) {
299 ie->name_state = NAME_KNOWN;
300 } else {
301 ie->name_state = NAME_NOT_KNOWN;
302 list_add(&ie->list, &cache->unknown);
303 }
304
305 update:
306 if (name_known && ie->name_state != NAME_KNOWN &&
307 ie->name_state != NAME_PENDING) {
308 ie->name_state = NAME_KNOWN;
309 list_del(&ie->list);
310 }
311
312 memcpy(&ie->data, data, sizeof(*data));
313 ie->timestamp = jiffies;
314 cache->timestamp = jiffies;
315
316 if (ie->name_state == NAME_NOT_KNOWN)
317 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
318
319 done:
320 return flags;
321 }
322
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)323 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
324 {
325 struct discovery_state *cache = &hdev->discovery;
326 struct inquiry_info *info = (struct inquiry_info *) buf;
327 struct inquiry_entry *e;
328 int copied = 0;
329
330 list_for_each_entry(e, &cache->all, all) {
331 struct inquiry_data *data = &e->data;
332
333 if (copied >= num)
334 break;
335
336 bacpy(&info->bdaddr, &data->bdaddr);
337 info->pscan_rep_mode = data->pscan_rep_mode;
338 info->pscan_period_mode = data->pscan_period_mode;
339 info->pscan_mode = data->pscan_mode;
340 memcpy(info->dev_class, data->dev_class, 3);
341 info->clock_offset = data->clock_offset;
342
343 info++;
344 copied++;
345 }
346
347 BT_DBG("cache %p, copied %d", cache, copied);
348 return copied;
349 }
350
hci_inq_req(struct hci_request * req,unsigned long opt)351 static int hci_inq_req(struct hci_request *req, unsigned long opt)
352 {
353 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_inquiry cp;
356
357 BT_DBG("%s", hdev->name);
358
359 if (test_bit(HCI_INQUIRY, &hdev->flags))
360 return 0;
361
362 /* Start Inquiry */
363 memcpy(&cp.lap, &ir->lap, 3);
364 cp.length = ir->length;
365 cp.num_rsp = ir->num_rsp;
366 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
367
368 return 0;
369 }
370
hci_inquiry(void __user * arg)371 int hci_inquiry(void __user *arg)
372 {
373 __u8 __user *ptr = arg;
374 struct hci_inquiry_req ir;
375 struct hci_dev *hdev;
376 int err = 0, do_inquiry = 0, max_rsp;
377 long timeo;
378 __u8 *buf;
379
380 if (copy_from_user(&ir, ptr, sizeof(ir)))
381 return -EFAULT;
382
383 hdev = hci_dev_get(ir.dev_id);
384 if (!hdev)
385 return -ENODEV;
386
387 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
388 err = -EBUSY;
389 goto done;
390 }
391
392 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
393 err = -EOPNOTSUPP;
394 goto done;
395 }
396
397 if (hdev->dev_type != HCI_PRIMARY) {
398 err = -EOPNOTSUPP;
399 goto done;
400 }
401
402 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
403 err = -EOPNOTSUPP;
404 goto done;
405 }
406
407 /* Restrict maximum inquiry length to 60 seconds */
408 if (ir.length > 60) {
409 err = -EINVAL;
410 goto done;
411 }
412
413 hci_dev_lock(hdev);
414 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
415 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
416 hci_inquiry_cache_flush(hdev);
417 do_inquiry = 1;
418 }
419 hci_dev_unlock(hdev);
420
421 timeo = ir.length * msecs_to_jiffies(2000);
422
423 if (do_inquiry) {
424 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
425 timeo, NULL);
426 if (err < 0)
427 goto done;
428
429 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
430 * cleared). If it is interrupted by a signal, return -EINTR.
431 */
432 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
433 TASK_INTERRUPTIBLE)) {
434 err = -EINTR;
435 goto done;
436 }
437 }
438
439 /* for unlimited number of responses we will use buffer with
440 * 255 entries
441 */
442 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
443
444 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
445 * copy it to the user space.
446 */
447 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
448 if (!buf) {
449 err = -ENOMEM;
450 goto done;
451 }
452
453 hci_dev_lock(hdev);
454 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
455 hci_dev_unlock(hdev);
456
457 BT_DBG("num_rsp %d", ir.num_rsp);
458
459 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
460 ptr += sizeof(ir);
461 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
462 ir.num_rsp))
463 err = -EFAULT;
464 } else
465 err = -EFAULT;
466
467 kfree(buf);
468
469 done:
470 hci_dev_put(hdev);
471 return err;
472 }
473
hci_dev_do_open(struct hci_dev * hdev)474 static int hci_dev_do_open(struct hci_dev *hdev)
475 {
476 int ret = 0;
477
478 BT_DBG("%s %p", hdev->name, hdev);
479
480 hci_req_sync_lock(hdev);
481
482 ret = hci_dev_open_sync(hdev);
483
484 hci_req_sync_unlock(hdev);
485 return ret;
486 }
487
488 /* ---- HCI ioctl helpers ---- */
489
hci_dev_open(__u16 dev)490 int hci_dev_open(__u16 dev)
491 {
492 struct hci_dev *hdev;
493 int err;
494
495 hdev = hci_dev_get(dev);
496 if (!hdev)
497 return -ENODEV;
498
499 /* Devices that are marked as unconfigured can only be powered
500 * up as user channel. Trying to bring them up as normal devices
501 * will result into a failure. Only user channel operation is
502 * possible.
503 *
504 * When this function is called for a user channel, the flag
505 * HCI_USER_CHANNEL will be set first before attempting to
506 * open the device.
507 */
508 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
509 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
510 err = -EOPNOTSUPP;
511 goto done;
512 }
513
514 /* We need to ensure that no other power on/off work is pending
515 * before proceeding to call hci_dev_do_open. This is
516 * particularly important if the setup procedure has not yet
517 * completed.
518 */
519 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
520 cancel_delayed_work(&hdev->power_off);
521
522 /* After this call it is guaranteed that the setup procedure
523 * has finished. This means that error conditions like RFKILL
524 * or no valid public or static random address apply.
525 */
526 flush_workqueue(hdev->req_workqueue);
527
528 /* For controllers not using the management interface and that
529 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
530 * so that pairing works for them. Once the management interface
531 * is in use this bit will be cleared again and userspace has
532 * to explicitly enable it.
533 */
534 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
535 !hci_dev_test_flag(hdev, HCI_MGMT))
536 hci_dev_set_flag(hdev, HCI_BONDABLE);
537
538 err = hci_dev_do_open(hdev);
539
540 done:
541 hci_dev_put(hdev);
542 return err;
543 }
544
hci_dev_do_close(struct hci_dev * hdev)545 int hci_dev_do_close(struct hci_dev *hdev)
546 {
547 int err;
548
549 BT_DBG("%s %p", hdev->name, hdev);
550
551 hci_req_sync_lock(hdev);
552
553 err = hci_dev_close_sync(hdev);
554
555 hci_req_sync_unlock(hdev);
556
557 return err;
558 }
559
hci_dev_close(__u16 dev)560 int hci_dev_close(__u16 dev)
561 {
562 struct hci_dev *hdev;
563 int err;
564
565 hdev = hci_dev_get(dev);
566 if (!hdev)
567 return -ENODEV;
568
569 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
570 err = -EBUSY;
571 goto done;
572 }
573
574 cancel_work_sync(&hdev->power_on);
575 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
576 cancel_delayed_work(&hdev->power_off);
577
578 err = hci_dev_do_close(hdev);
579
580 done:
581 hci_dev_put(hdev);
582 return err;
583 }
584
hci_dev_do_reset(struct hci_dev * hdev)585 static int hci_dev_do_reset(struct hci_dev *hdev)
586 {
587 int ret;
588
589 BT_DBG("%s %p", hdev->name, hdev);
590
591 hci_req_sync_lock(hdev);
592
593 /* Drop queues */
594 skb_queue_purge(&hdev->rx_q);
595 skb_queue_purge(&hdev->cmd_q);
596
597 /* Cancel these to avoid queueing non-chained pending work */
598 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
599 cancel_delayed_work(&hdev->cmd_timer);
600 cancel_delayed_work(&hdev->ncmd_timer);
601
602 /* Avoid potential lockdep warnings from the *_flush() calls by
603 * ensuring the workqueue is empty up front.
604 */
605 drain_workqueue(hdev->workqueue);
606
607 hci_dev_lock(hdev);
608 hci_inquiry_cache_flush(hdev);
609 hci_conn_hash_flush(hdev);
610 hci_dev_unlock(hdev);
611
612 if (hdev->flush)
613 hdev->flush(hdev);
614
615 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
616
617 atomic_set(&hdev->cmd_cnt, 1);
618 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
619
620 ret = hci_reset_sync(hdev);
621
622 hci_req_sync_unlock(hdev);
623 return ret;
624 }
625
hci_dev_reset(__u16 dev)626 int hci_dev_reset(__u16 dev)
627 {
628 struct hci_dev *hdev;
629 int err;
630
631 hdev = hci_dev_get(dev);
632 if (!hdev)
633 return -ENODEV;
634
635 if (!test_bit(HCI_UP, &hdev->flags)) {
636 err = -ENETDOWN;
637 goto done;
638 }
639
640 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
641 err = -EBUSY;
642 goto done;
643 }
644
645 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
646 err = -EOPNOTSUPP;
647 goto done;
648 }
649
650 err = hci_dev_do_reset(hdev);
651
652 done:
653 hci_dev_put(hdev);
654 return err;
655 }
656
hci_dev_reset_stat(__u16 dev)657 int hci_dev_reset_stat(__u16 dev)
658 {
659 struct hci_dev *hdev;
660 int ret = 0;
661
662 hdev = hci_dev_get(dev);
663 if (!hdev)
664 return -ENODEV;
665
666 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
667 ret = -EBUSY;
668 goto done;
669 }
670
671 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
672 ret = -EOPNOTSUPP;
673 goto done;
674 }
675
676 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
677
678 done:
679 hci_dev_put(hdev);
680 return ret;
681 }
682
hci_update_passive_scan_state(struct hci_dev * hdev,u8 scan)683 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
684 {
685 bool conn_changed, discov_changed;
686
687 BT_DBG("%s scan 0x%02x", hdev->name, scan);
688
689 if ((scan & SCAN_PAGE))
690 conn_changed = !hci_dev_test_and_set_flag(hdev,
691 HCI_CONNECTABLE);
692 else
693 conn_changed = hci_dev_test_and_clear_flag(hdev,
694 HCI_CONNECTABLE);
695
696 if ((scan & SCAN_INQUIRY)) {
697 discov_changed = !hci_dev_test_and_set_flag(hdev,
698 HCI_DISCOVERABLE);
699 } else {
700 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
701 discov_changed = hci_dev_test_and_clear_flag(hdev,
702 HCI_DISCOVERABLE);
703 }
704
705 if (!hci_dev_test_flag(hdev, HCI_MGMT))
706 return;
707
708 if (conn_changed || discov_changed) {
709 /* In case this was disabled through mgmt */
710 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
711
712 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
713 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
714
715 mgmt_new_settings(hdev);
716 }
717 }
718
hci_dev_cmd(unsigned int cmd,void __user * arg)719 int hci_dev_cmd(unsigned int cmd, void __user *arg)
720 {
721 struct hci_dev *hdev;
722 struct hci_dev_req dr;
723 int err = 0;
724
725 if (copy_from_user(&dr, arg, sizeof(dr)))
726 return -EFAULT;
727
728 hdev = hci_dev_get(dr.dev_id);
729 if (!hdev)
730 return -ENODEV;
731
732 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
733 err = -EBUSY;
734 goto done;
735 }
736
737 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
738 err = -EOPNOTSUPP;
739 goto done;
740 }
741
742 if (hdev->dev_type != HCI_PRIMARY) {
743 err = -EOPNOTSUPP;
744 goto done;
745 }
746
747 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
748 err = -EOPNOTSUPP;
749 goto done;
750 }
751
752 switch (cmd) {
753 case HCISETAUTH:
754 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
755 HCI_INIT_TIMEOUT, NULL);
756 break;
757
758 case HCISETENCRYPT:
759 if (!lmp_encrypt_capable(hdev)) {
760 err = -EOPNOTSUPP;
761 break;
762 }
763
764 if (!test_bit(HCI_AUTH, &hdev->flags)) {
765 /* Auth must be enabled first */
766 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
767 HCI_INIT_TIMEOUT, NULL);
768 if (err)
769 break;
770 }
771
772 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
773 HCI_INIT_TIMEOUT, NULL);
774 break;
775
776 case HCISETSCAN:
777 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
778 HCI_INIT_TIMEOUT, NULL);
779
780 /* Ensure that the connectable and discoverable states
781 * get correctly modified as this was a non-mgmt change.
782 */
783 if (!err)
784 hci_update_passive_scan_state(hdev, dr.dev_opt);
785 break;
786
787 case HCISETLINKPOL:
788 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
789 HCI_INIT_TIMEOUT, NULL);
790 break;
791
792 case HCISETLINKMODE:
793 hdev->link_mode = ((__u16) dr.dev_opt) &
794 (HCI_LM_MASTER | HCI_LM_ACCEPT);
795 break;
796
797 case HCISETPTYPE:
798 if (hdev->pkt_type == (__u16) dr.dev_opt)
799 break;
800
801 hdev->pkt_type = (__u16) dr.dev_opt;
802 mgmt_phy_configuration_changed(hdev, NULL);
803 break;
804
805 case HCISETACLMTU:
806 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
807 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
808 break;
809
810 case HCISETSCOMTU:
811 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
812 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
813 break;
814
815 default:
816 err = -EINVAL;
817 break;
818 }
819
820 done:
821 hci_dev_put(hdev);
822 return err;
823 }
824
hci_get_dev_list(void __user * arg)825 int hci_get_dev_list(void __user *arg)
826 {
827 struct hci_dev *hdev;
828 struct hci_dev_list_req *dl;
829 struct hci_dev_req *dr;
830 int n = 0, size, err;
831 __u16 dev_num;
832
833 if (get_user(dev_num, (__u16 __user *) arg))
834 return -EFAULT;
835
836 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
837 return -EINVAL;
838
839 size = sizeof(*dl) + dev_num * sizeof(*dr);
840
841 dl = kzalloc(size, GFP_KERNEL);
842 if (!dl)
843 return -ENOMEM;
844
845 dr = dl->dev_req;
846
847 read_lock(&hci_dev_list_lock);
848 list_for_each_entry(hdev, &hci_dev_list, list) {
849 unsigned long flags = hdev->flags;
850
851 /* When the auto-off is configured it means the transport
852 * is running, but in that case still indicate that the
853 * device is actually down.
854 */
855 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
856 flags &= ~BIT(HCI_UP);
857
858 (dr + n)->dev_id = hdev->id;
859 (dr + n)->dev_opt = flags;
860
861 if (++n >= dev_num)
862 break;
863 }
864 read_unlock(&hci_dev_list_lock);
865
866 dl->dev_num = n;
867 size = sizeof(*dl) + n * sizeof(*dr);
868
869 err = copy_to_user(arg, dl, size);
870 kfree(dl);
871
872 return err ? -EFAULT : 0;
873 }
874
hci_get_dev_info(void __user * arg)875 int hci_get_dev_info(void __user *arg)
876 {
877 struct hci_dev *hdev;
878 struct hci_dev_info di;
879 unsigned long flags;
880 int err = 0;
881
882 if (copy_from_user(&di, arg, sizeof(di)))
883 return -EFAULT;
884
885 hdev = hci_dev_get(di.dev_id);
886 if (!hdev)
887 return -ENODEV;
888
889 /* When the auto-off is configured it means the transport
890 * is running, but in that case still indicate that the
891 * device is actually down.
892 */
893 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
894 flags = hdev->flags & ~BIT(HCI_UP);
895 else
896 flags = hdev->flags;
897
898 strcpy(di.name, hdev->name);
899 di.bdaddr = hdev->bdaddr;
900 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
901 di.flags = flags;
902 di.pkt_type = hdev->pkt_type;
903 if (lmp_bredr_capable(hdev)) {
904 di.acl_mtu = hdev->acl_mtu;
905 di.acl_pkts = hdev->acl_pkts;
906 di.sco_mtu = hdev->sco_mtu;
907 di.sco_pkts = hdev->sco_pkts;
908 } else {
909 di.acl_mtu = hdev->le_mtu;
910 di.acl_pkts = hdev->le_pkts;
911 di.sco_mtu = 0;
912 di.sco_pkts = 0;
913 }
914 di.link_policy = hdev->link_policy;
915 di.link_mode = hdev->link_mode;
916
917 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
918 memcpy(&di.features, &hdev->features, sizeof(di.features));
919
920 if (copy_to_user(arg, &di, sizeof(di)))
921 err = -EFAULT;
922
923 hci_dev_put(hdev);
924
925 return err;
926 }
927
928 /* ---- Interface to HCI drivers ---- */
929
hci_rfkill_set_block(void * data,bool blocked)930 static int hci_rfkill_set_block(void *data, bool blocked)
931 {
932 struct hci_dev *hdev = data;
933
934 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
935
936 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
937 return -EBUSY;
938
939 if (blocked) {
940 hci_dev_set_flag(hdev, HCI_RFKILLED);
941 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
942 !hci_dev_test_flag(hdev, HCI_CONFIG))
943 hci_dev_do_close(hdev);
944 } else {
945 hci_dev_clear_flag(hdev, HCI_RFKILLED);
946 }
947
948 return 0;
949 }
950
951 static const struct rfkill_ops hci_rfkill_ops = {
952 .set_block = hci_rfkill_set_block,
953 };
954
hci_power_on(struct work_struct * work)955 static void hci_power_on(struct work_struct *work)
956 {
957 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
958 int err;
959
960 BT_DBG("%s", hdev->name);
961
962 if (test_bit(HCI_UP, &hdev->flags) &&
963 hci_dev_test_flag(hdev, HCI_MGMT) &&
964 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
965 cancel_delayed_work(&hdev->power_off);
966 err = hci_powered_update_sync(hdev);
967 mgmt_power_on(hdev, err);
968 return;
969 }
970
971 err = hci_dev_do_open(hdev);
972 if (err < 0) {
973 hci_dev_lock(hdev);
974 mgmt_set_powered_failed(hdev, err);
975 hci_dev_unlock(hdev);
976 return;
977 }
978
979 /* During the HCI setup phase, a few error conditions are
980 * ignored and they need to be checked now. If they are still
981 * valid, it is important to turn the device back off.
982 */
983 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
984 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
985 (hdev->dev_type == HCI_PRIMARY &&
986 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
987 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
988 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
989 hci_dev_do_close(hdev);
990 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
991 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
992 HCI_AUTO_OFF_TIMEOUT);
993 }
994
995 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
996 /* For unconfigured devices, set the HCI_RAW flag
997 * so that userspace can easily identify them.
998 */
999 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1000 set_bit(HCI_RAW, &hdev->flags);
1001
1002 /* For fully configured devices, this will send
1003 * the Index Added event. For unconfigured devices,
1004 * it will send Unconfigued Index Added event.
1005 *
1006 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1007 * and no event will be send.
1008 */
1009 mgmt_index_added(hdev);
1010 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1011 /* When the controller is now configured, then it
1012 * is important to clear the HCI_RAW flag.
1013 */
1014 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1015 clear_bit(HCI_RAW, &hdev->flags);
1016
1017 /* Powering on the controller with HCI_CONFIG set only
1018 * happens with the transition from unconfigured to
1019 * configured. This will send the Index Added event.
1020 */
1021 mgmt_index_added(hdev);
1022 }
1023 }
1024
hci_power_off(struct work_struct * work)1025 static void hci_power_off(struct work_struct *work)
1026 {
1027 struct hci_dev *hdev = container_of(work, struct hci_dev,
1028 power_off.work);
1029
1030 BT_DBG("%s", hdev->name);
1031
1032 hci_dev_do_close(hdev);
1033 }
1034
hci_error_reset(struct work_struct * work)1035 static void hci_error_reset(struct work_struct *work)
1036 {
1037 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1038
1039 BT_DBG("%s", hdev->name);
1040
1041 if (hdev->hw_error)
1042 hdev->hw_error(hdev, hdev->hw_error_code);
1043 else
1044 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1045
1046 if (hci_dev_do_close(hdev))
1047 return;
1048
1049 hci_dev_do_open(hdev);
1050 }
1051
hci_uuids_clear(struct hci_dev * hdev)1052 void hci_uuids_clear(struct hci_dev *hdev)
1053 {
1054 struct bt_uuid *uuid, *tmp;
1055
1056 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1057 list_del(&uuid->list);
1058 kfree(uuid);
1059 }
1060 }
1061
hci_link_keys_clear(struct hci_dev * hdev)1062 void hci_link_keys_clear(struct hci_dev *hdev)
1063 {
1064 struct link_key *key;
1065
1066 list_for_each_entry(key, &hdev->link_keys, list) {
1067 list_del_rcu(&key->list);
1068 kfree_rcu(key, rcu);
1069 }
1070 }
1071
hci_smp_ltks_clear(struct hci_dev * hdev)1072 void hci_smp_ltks_clear(struct hci_dev *hdev)
1073 {
1074 struct smp_ltk *k;
1075
1076 list_for_each_entry(k, &hdev->long_term_keys, list) {
1077 list_del_rcu(&k->list);
1078 kfree_rcu(k, rcu);
1079 }
1080 }
1081
hci_smp_irks_clear(struct hci_dev * hdev)1082 void hci_smp_irks_clear(struct hci_dev *hdev)
1083 {
1084 struct smp_irk *k;
1085
1086 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1087 list_del_rcu(&k->list);
1088 kfree_rcu(k, rcu);
1089 }
1090 }
1091
hci_blocked_keys_clear(struct hci_dev * hdev)1092 void hci_blocked_keys_clear(struct hci_dev *hdev)
1093 {
1094 struct blocked_key *b;
1095
1096 list_for_each_entry(b, &hdev->blocked_keys, list) {
1097 list_del_rcu(&b->list);
1098 kfree_rcu(b, rcu);
1099 }
1100 }
1101
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])1102 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1103 {
1104 bool blocked = false;
1105 struct blocked_key *b;
1106
1107 rcu_read_lock();
1108 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1109 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1110 blocked = true;
1111 break;
1112 }
1113 }
1114
1115 rcu_read_unlock();
1116 return blocked;
1117 }
1118
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1119 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1120 {
1121 struct link_key *k;
1122
1123 rcu_read_lock();
1124 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1125 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1126 rcu_read_unlock();
1127
1128 if (hci_is_blocked_key(hdev,
1129 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1130 k->val)) {
1131 bt_dev_warn_ratelimited(hdev,
1132 "Link key blocked for %pMR",
1133 &k->bdaddr);
1134 return NULL;
1135 }
1136
1137 return k;
1138 }
1139 }
1140 rcu_read_unlock();
1141
1142 return NULL;
1143 }
1144
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1145 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1146 u8 key_type, u8 old_key_type)
1147 {
1148 /* Legacy key */
1149 if (key_type < 0x03)
1150 return true;
1151
1152 /* Debug keys are insecure so don't store them persistently */
1153 if (key_type == HCI_LK_DEBUG_COMBINATION)
1154 return false;
1155
1156 /* Changed combination key and there's no previous one */
1157 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1158 return false;
1159
1160 /* Security mode 3 case */
1161 if (!conn)
1162 return true;
1163
1164 /* BR/EDR key derived using SC from an LE link */
1165 if (conn->type == LE_LINK)
1166 return true;
1167
1168 /* Neither local nor remote side had no-bonding as requirement */
1169 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1170 return true;
1171
1172 /* Local side had dedicated bonding as requirement */
1173 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1174 return true;
1175
1176 /* Remote side had dedicated bonding as requirement */
1177 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1178 return true;
1179
1180 /* If none of the above criteria match, then don't store the key
1181 * persistently */
1182 return false;
1183 }
1184
ltk_role(u8 type)1185 static u8 ltk_role(u8 type)
1186 {
1187 if (type == SMP_LTK)
1188 return HCI_ROLE_MASTER;
1189
1190 return HCI_ROLE_SLAVE;
1191 }
1192
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)1193 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1194 u8 addr_type, u8 role)
1195 {
1196 struct smp_ltk *k;
1197
1198 rcu_read_lock();
1199 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1200 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1201 continue;
1202
1203 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1204 rcu_read_unlock();
1205
1206 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1207 k->val)) {
1208 bt_dev_warn_ratelimited(hdev,
1209 "LTK blocked for %pMR",
1210 &k->bdaddr);
1211 return NULL;
1212 }
1213
1214 return k;
1215 }
1216 }
1217 rcu_read_unlock();
1218
1219 return NULL;
1220 }
1221
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)1222 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1223 {
1224 struct smp_irk *irk_to_return = NULL;
1225 struct smp_irk *irk;
1226
1227 rcu_read_lock();
1228 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1229 if (!bacmp(&irk->rpa, rpa)) {
1230 irk_to_return = irk;
1231 goto done;
1232 }
1233 }
1234
1235 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1236 if (smp_irk_matches(hdev, irk->val, rpa)) {
1237 bacpy(&irk->rpa, rpa);
1238 irk_to_return = irk;
1239 goto done;
1240 }
1241 }
1242
1243 done:
1244 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1245 irk_to_return->val)) {
1246 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1247 &irk_to_return->bdaddr);
1248 irk_to_return = NULL;
1249 }
1250
1251 rcu_read_unlock();
1252
1253 return irk_to_return;
1254 }
1255
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1256 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1257 u8 addr_type)
1258 {
1259 struct smp_irk *irk_to_return = NULL;
1260 struct smp_irk *irk;
1261
1262 /* Identity Address must be public or static random */
1263 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1264 return NULL;
1265
1266 rcu_read_lock();
1267 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1268 if (addr_type == irk->addr_type &&
1269 bacmp(bdaddr, &irk->bdaddr) == 0) {
1270 irk_to_return = irk;
1271 goto done;
1272 }
1273 }
1274
1275 done:
1276
1277 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1278 irk_to_return->val)) {
1279 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1280 &irk_to_return->bdaddr);
1281 irk_to_return = NULL;
1282 }
1283
1284 rcu_read_unlock();
1285
1286 return irk_to_return;
1287 }
1288
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)1289 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1290 bdaddr_t *bdaddr, u8 *val, u8 type,
1291 u8 pin_len, bool *persistent)
1292 {
1293 struct link_key *key, *old_key;
1294 u8 old_key_type;
1295
1296 old_key = hci_find_link_key(hdev, bdaddr);
1297 if (old_key) {
1298 old_key_type = old_key->type;
1299 key = old_key;
1300 } else {
1301 old_key_type = conn ? conn->key_type : 0xff;
1302 key = kzalloc(sizeof(*key), GFP_KERNEL);
1303 if (!key)
1304 return NULL;
1305 list_add_rcu(&key->list, &hdev->link_keys);
1306 }
1307
1308 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1309
1310 /* Some buggy controller combinations generate a changed
1311 * combination key for legacy pairing even when there's no
1312 * previous key */
1313 if (type == HCI_LK_CHANGED_COMBINATION &&
1314 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1315 type = HCI_LK_COMBINATION;
1316 if (conn)
1317 conn->key_type = type;
1318 }
1319
1320 bacpy(&key->bdaddr, bdaddr);
1321 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1322 key->pin_len = pin_len;
1323
1324 if (type == HCI_LK_CHANGED_COMBINATION)
1325 key->type = old_key_type;
1326 else
1327 key->type = type;
1328
1329 if (persistent)
1330 *persistent = hci_persistent_key(hdev, conn, type,
1331 old_key_type);
1332
1333 return key;
1334 }
1335
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)1336 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1337 u8 addr_type, u8 type, u8 authenticated,
1338 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1339 {
1340 struct smp_ltk *key, *old_key;
1341 u8 role = ltk_role(type);
1342
1343 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1344 if (old_key)
1345 key = old_key;
1346 else {
1347 key = kzalloc(sizeof(*key), GFP_KERNEL);
1348 if (!key)
1349 return NULL;
1350 list_add_rcu(&key->list, &hdev->long_term_keys);
1351 }
1352
1353 bacpy(&key->bdaddr, bdaddr);
1354 key->bdaddr_type = addr_type;
1355 memcpy(key->val, tk, sizeof(key->val));
1356 key->authenticated = authenticated;
1357 key->ediv = ediv;
1358 key->rand = rand;
1359 key->enc_size = enc_size;
1360 key->type = type;
1361
1362 return key;
1363 }
1364
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)1365 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1366 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1367 {
1368 struct smp_irk *irk;
1369
1370 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1371 if (!irk) {
1372 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1373 if (!irk)
1374 return NULL;
1375
1376 bacpy(&irk->bdaddr, bdaddr);
1377 irk->addr_type = addr_type;
1378
1379 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1380 }
1381
1382 memcpy(irk->val, val, 16);
1383 bacpy(&irk->rpa, rpa);
1384
1385 return irk;
1386 }
1387
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1388 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1389 {
1390 struct link_key *key;
1391
1392 key = hci_find_link_key(hdev, bdaddr);
1393 if (!key)
1394 return -ENOENT;
1395
1396 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1397
1398 list_del_rcu(&key->list);
1399 kfree_rcu(key, rcu);
1400
1401 return 0;
1402 }
1403
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1404 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1405 {
1406 struct smp_ltk *k;
1407 int removed = 0;
1408
1409 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1410 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1411 continue;
1412
1413 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1414
1415 list_del_rcu(&k->list);
1416 kfree_rcu(k, rcu);
1417 removed++;
1418 }
1419
1420 return removed ? 0 : -ENOENT;
1421 }
1422
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1423 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1424 {
1425 struct smp_irk *k;
1426
1427 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
1428 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1429 continue;
1430
1431 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1432
1433 list_del_rcu(&k->list);
1434 kfree_rcu(k, rcu);
1435 }
1436 }
1437
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1438 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1439 {
1440 struct smp_ltk *k;
1441 struct smp_irk *irk;
1442 u8 addr_type;
1443
1444 if (type == BDADDR_BREDR) {
1445 if (hci_find_link_key(hdev, bdaddr))
1446 return true;
1447 return false;
1448 }
1449
1450 /* Convert to HCI addr type which struct smp_ltk uses */
1451 if (type == BDADDR_LE_PUBLIC)
1452 addr_type = ADDR_LE_DEV_PUBLIC;
1453 else
1454 addr_type = ADDR_LE_DEV_RANDOM;
1455
1456 irk = hci_get_irk(hdev, bdaddr, addr_type);
1457 if (irk) {
1458 bdaddr = &irk->bdaddr;
1459 addr_type = irk->addr_type;
1460 }
1461
1462 rcu_read_lock();
1463 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1464 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1465 rcu_read_unlock();
1466 return true;
1467 }
1468 }
1469 rcu_read_unlock();
1470
1471 return false;
1472 }
1473
1474 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)1475 static void hci_cmd_timeout(struct work_struct *work)
1476 {
1477 struct hci_dev *hdev = container_of(work, struct hci_dev,
1478 cmd_timer.work);
1479
1480 if (hdev->sent_cmd) {
1481 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1482 u16 opcode = __le16_to_cpu(sent->opcode);
1483
1484 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1485 } else {
1486 bt_dev_err(hdev, "command tx timeout");
1487 }
1488
1489 if (hdev->cmd_timeout)
1490 hdev->cmd_timeout(hdev);
1491
1492 atomic_set(&hdev->cmd_cnt, 1);
1493 queue_work(hdev->workqueue, &hdev->cmd_work);
1494 }
1495
1496 /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)1497 static void hci_ncmd_timeout(struct work_struct *work)
1498 {
1499 struct hci_dev *hdev = container_of(work, struct hci_dev,
1500 ncmd_timer.work);
1501
1502 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1503
1504 /* During HCI_INIT phase no events can be injected if the ncmd timer
1505 * triggers since the procedure has its own timeout handling.
1506 */
1507 if (test_bit(HCI_INIT, &hdev->flags))
1508 return;
1509
1510 /* This is an irrecoverable state, inject hardware error event */
1511 hci_reset_dev(hdev);
1512 }
1513
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1514 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1515 bdaddr_t *bdaddr, u8 bdaddr_type)
1516 {
1517 struct oob_data *data;
1518
1519 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1520 if (bacmp(bdaddr, &data->bdaddr) != 0)
1521 continue;
1522 if (data->bdaddr_type != bdaddr_type)
1523 continue;
1524 return data;
1525 }
1526
1527 return NULL;
1528 }
1529
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1530 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1531 u8 bdaddr_type)
1532 {
1533 struct oob_data *data;
1534
1535 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1536 if (!data)
1537 return -ENOENT;
1538
1539 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1540
1541 list_del(&data->list);
1542 kfree(data);
1543
1544 return 0;
1545 }
1546
hci_remote_oob_data_clear(struct hci_dev * hdev)1547 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1548 {
1549 struct oob_data *data, *n;
1550
1551 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1552 list_del(&data->list);
1553 kfree(data);
1554 }
1555 }
1556
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)1557 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1558 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1559 u8 *hash256, u8 *rand256)
1560 {
1561 struct oob_data *data;
1562
1563 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1564 if (!data) {
1565 data = kmalloc(sizeof(*data), GFP_KERNEL);
1566 if (!data)
1567 return -ENOMEM;
1568
1569 bacpy(&data->bdaddr, bdaddr);
1570 data->bdaddr_type = bdaddr_type;
1571 list_add(&data->list, &hdev->remote_oob_data);
1572 }
1573
1574 if (hash192 && rand192) {
1575 memcpy(data->hash192, hash192, sizeof(data->hash192));
1576 memcpy(data->rand192, rand192, sizeof(data->rand192));
1577 if (hash256 && rand256)
1578 data->present = 0x03;
1579 } else {
1580 memset(data->hash192, 0, sizeof(data->hash192));
1581 memset(data->rand192, 0, sizeof(data->rand192));
1582 if (hash256 && rand256)
1583 data->present = 0x02;
1584 else
1585 data->present = 0x00;
1586 }
1587
1588 if (hash256 && rand256) {
1589 memcpy(data->hash256, hash256, sizeof(data->hash256));
1590 memcpy(data->rand256, rand256, sizeof(data->rand256));
1591 } else {
1592 memset(data->hash256, 0, sizeof(data->hash256));
1593 memset(data->rand256, 0, sizeof(data->rand256));
1594 if (hash192 && rand192)
1595 data->present = 0x01;
1596 }
1597
1598 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1599
1600 return 0;
1601 }
1602
1603 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)1604 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1605 {
1606 struct adv_info *adv_instance;
1607
1608 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1609 if (adv_instance->instance == instance)
1610 return adv_instance;
1611 }
1612
1613 return NULL;
1614 }
1615
1616 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)1617 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1618 {
1619 struct adv_info *cur_instance;
1620
1621 cur_instance = hci_find_adv_instance(hdev, instance);
1622 if (!cur_instance)
1623 return NULL;
1624
1625 if (cur_instance == list_last_entry(&hdev->adv_instances,
1626 struct adv_info, list))
1627 return list_first_entry(&hdev->adv_instances,
1628 struct adv_info, list);
1629 else
1630 return list_next_entry(cur_instance, list);
1631 }
1632
1633 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)1634 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1635 {
1636 struct adv_info *adv_instance;
1637
1638 adv_instance = hci_find_adv_instance(hdev, instance);
1639 if (!adv_instance)
1640 return -ENOENT;
1641
1642 BT_DBG("%s removing %dMR", hdev->name, instance);
1643
1644 if (hdev->cur_adv_instance == instance) {
1645 if (hdev->adv_instance_timeout) {
1646 cancel_delayed_work(&hdev->adv_instance_expire);
1647 hdev->adv_instance_timeout = 0;
1648 }
1649 hdev->cur_adv_instance = 0x00;
1650 }
1651
1652 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1653
1654 list_del(&adv_instance->list);
1655 kfree(adv_instance);
1656
1657 hdev->adv_instance_cnt--;
1658
1659 return 0;
1660 }
1661
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)1662 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1663 {
1664 struct adv_info *adv_instance, *n;
1665
1666 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1667 adv_instance->rpa_expired = rpa_expired;
1668 }
1669
1670 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)1671 void hci_adv_instances_clear(struct hci_dev *hdev)
1672 {
1673 struct adv_info *adv_instance, *n;
1674
1675 if (hdev->adv_instance_timeout) {
1676 cancel_delayed_work(&hdev->adv_instance_expire);
1677 hdev->adv_instance_timeout = 0;
1678 }
1679
1680 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1681 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1682 list_del(&adv_instance->list);
1683 kfree(adv_instance);
1684 }
1685
1686 hdev->adv_instance_cnt = 0;
1687 hdev->cur_adv_instance = 0x00;
1688 }
1689
adv_instance_rpa_expired(struct work_struct * work)1690 static void adv_instance_rpa_expired(struct work_struct *work)
1691 {
1692 struct adv_info *adv_instance = container_of(work, struct adv_info,
1693 rpa_expired_cb.work);
1694
1695 BT_DBG("");
1696
1697 adv_instance->rpa_expired = true;
1698 }
1699
1700 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval)1701 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
1702 u16 adv_data_len, u8 *adv_data,
1703 u16 scan_rsp_len, u8 *scan_rsp_data,
1704 u16 timeout, u16 duration, s8 tx_power,
1705 u32 min_interval, u32 max_interval)
1706 {
1707 struct adv_info *adv_instance;
1708
1709 adv_instance = hci_find_adv_instance(hdev, instance);
1710 if (adv_instance) {
1711 memset(adv_instance->adv_data, 0,
1712 sizeof(adv_instance->adv_data));
1713 memset(adv_instance->scan_rsp_data, 0,
1714 sizeof(adv_instance->scan_rsp_data));
1715 } else {
1716 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1717 instance < 1 || instance > hdev->le_num_of_adv_sets)
1718 return -EOVERFLOW;
1719
1720 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
1721 if (!adv_instance)
1722 return -ENOMEM;
1723
1724 adv_instance->pending = true;
1725 adv_instance->instance = instance;
1726 list_add(&adv_instance->list, &hdev->adv_instances);
1727 hdev->adv_instance_cnt++;
1728 }
1729
1730 adv_instance->flags = flags;
1731 adv_instance->adv_data_len = adv_data_len;
1732 adv_instance->scan_rsp_len = scan_rsp_len;
1733 adv_instance->min_interval = min_interval;
1734 adv_instance->max_interval = max_interval;
1735 adv_instance->tx_power = tx_power;
1736
1737 if (adv_data_len)
1738 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1739
1740 if (scan_rsp_len)
1741 memcpy(adv_instance->scan_rsp_data,
1742 scan_rsp_data, scan_rsp_len);
1743
1744 adv_instance->timeout = timeout;
1745 adv_instance->remaining_time = timeout;
1746
1747 if (duration == 0)
1748 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
1749 else
1750 adv_instance->duration = duration;
1751
1752 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
1753 adv_instance_rpa_expired);
1754
1755 BT_DBG("%s for %dMR", hdev->name, instance);
1756
1757 return 0;
1758 }
1759
1760 /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)1761 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1762 u16 adv_data_len, u8 *adv_data,
1763 u16 scan_rsp_len, u8 *scan_rsp_data)
1764 {
1765 struct adv_info *adv_instance;
1766
1767 adv_instance = hci_find_adv_instance(hdev, instance);
1768
1769 /* If advertisement doesn't exist, we can't modify its data */
1770 if (!adv_instance)
1771 return -ENOENT;
1772
1773 if (adv_data_len) {
1774 memset(adv_instance->adv_data, 0,
1775 sizeof(adv_instance->adv_data));
1776 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1777 adv_instance->adv_data_len = adv_data_len;
1778 }
1779
1780 if (scan_rsp_len) {
1781 memset(adv_instance->scan_rsp_data, 0,
1782 sizeof(adv_instance->scan_rsp_data));
1783 memcpy(adv_instance->scan_rsp_data,
1784 scan_rsp_data, scan_rsp_len);
1785 adv_instance->scan_rsp_len = scan_rsp_len;
1786 }
1787
1788 return 0;
1789 }
1790
1791 /* This function requires the caller holds hdev->lock */
hci_adv_instance_flags(struct hci_dev * hdev,u8 instance)1792 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1793 {
1794 u32 flags;
1795 struct adv_info *adv;
1796
1797 if (instance == 0x00) {
1798 /* Instance 0 always manages the "Tx Power" and "Flags"
1799 * fields
1800 */
1801 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1802
1803 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1804 * corresponds to the "connectable" instance flag.
1805 */
1806 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1807 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1808
1809 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1810 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1811 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1812 flags |= MGMT_ADV_FLAG_DISCOV;
1813
1814 return flags;
1815 }
1816
1817 adv = hci_find_adv_instance(hdev, instance);
1818
1819 /* Return 0 when we got an invalid instance identifier. */
1820 if (!adv)
1821 return 0;
1822
1823 return adv->flags;
1824 }
1825
hci_adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1826 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1827 {
1828 struct adv_info *adv;
1829
1830 /* Instance 0x00 always set local name */
1831 if (instance == 0x00)
1832 return true;
1833
1834 adv = hci_find_adv_instance(hdev, instance);
1835 if (!adv)
1836 return false;
1837
1838 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1839 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1840 return true;
1841
1842 return adv->scan_rsp_len ? true : false;
1843 }
1844
1845 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)1846 void hci_adv_monitors_clear(struct hci_dev *hdev)
1847 {
1848 struct adv_monitor *monitor;
1849 int handle;
1850
1851 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1852 hci_free_adv_monitor(hdev, monitor);
1853
1854 idr_destroy(&hdev->adv_monitors_idr);
1855 }
1856
1857 /* Frees the monitor structure and do some bookkeepings.
1858 * This function requires the caller holds hdev->lock.
1859 */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1860 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1861 {
1862 struct adv_pattern *pattern;
1863 struct adv_pattern *tmp;
1864
1865 if (!monitor)
1866 return;
1867
1868 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1869 list_del(&pattern->list);
1870 kfree(pattern);
1871 }
1872
1873 if (monitor->handle)
1874 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1875
1876 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1877 hdev->adv_monitors_cnt--;
1878 mgmt_adv_monitor_removed(hdev, monitor->handle);
1879 }
1880
1881 kfree(monitor);
1882 }
1883
hci_add_adv_patterns_monitor_complete(struct hci_dev * hdev,u8 status)1884 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
1885 {
1886 return mgmt_add_adv_patterns_monitor_complete(hdev, status);
1887 }
1888
hci_remove_adv_monitor_complete(struct hci_dev * hdev,u8 status)1889 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
1890 {
1891 return mgmt_remove_adv_monitor_complete(hdev, status);
1892 }
1893
1894 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1895 * also attempts to forward the request to the controller.
1896 * Returns true if request is forwarded (result is pending), false otherwise.
1897 * This function requires the caller holds hdev->lock.
1898 */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor,int * err)1899 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
1900 int *err)
1901 {
1902 int min, max, handle;
1903
1904 *err = 0;
1905
1906 if (!monitor) {
1907 *err = -EINVAL;
1908 return false;
1909 }
1910
1911 min = HCI_MIN_ADV_MONITOR_HANDLE;
1912 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1913 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1914 GFP_KERNEL);
1915 if (handle < 0) {
1916 *err = handle;
1917 return false;
1918 }
1919
1920 monitor->handle = handle;
1921
1922 if (!hdev_is_powered(hdev))
1923 return false;
1924
1925 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1926 case HCI_ADV_MONITOR_EXT_NONE:
1927 hci_update_passive_scan(hdev);
1928 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
1929 /* Message was not forwarded to controller - not an error */
1930 return false;
1931 case HCI_ADV_MONITOR_EXT_MSFT:
1932 *err = msft_add_monitor_pattern(hdev, monitor);
1933 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
1934 *err);
1935 break;
1936 }
1937
1938 return (*err == 0);
1939 }
1940
1941 /* Attempts to tell the controller and free the monitor. If somehow the
1942 * controller doesn't have a corresponding handle, remove anyway.
1943 * Returns true if request is forwarded (result is pending), false otherwise.
1944 * This function requires the caller holds hdev->lock.
1945 */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor,u16 handle,int * err)1946 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
1947 struct adv_monitor *monitor,
1948 u16 handle, int *err)
1949 {
1950 *err = 0;
1951
1952 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1953 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1954 goto free_monitor;
1955 case HCI_ADV_MONITOR_EXT_MSFT:
1956 *err = msft_remove_monitor(hdev, monitor, handle);
1957 break;
1958 }
1959
1960 /* In case no matching handle registered, just free the monitor */
1961 if (*err == -ENOENT)
1962 goto free_monitor;
1963
1964 return (*err == 0);
1965
1966 free_monitor:
1967 if (*err == -ENOENT)
1968 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1969 monitor->handle);
1970 hci_free_adv_monitor(hdev, monitor);
1971
1972 *err = 0;
1973 return false;
1974 }
1975
1976 /* Returns true if request is forwarded (result is pending), false otherwise.
1977 * This function requires the caller holds hdev->lock.
1978 */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle,int * err)1979 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
1980 {
1981 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1982 bool pending;
1983
1984 if (!monitor) {
1985 *err = -EINVAL;
1986 return false;
1987 }
1988
1989 pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
1990 if (!*err && !pending)
1991 hci_update_passive_scan(hdev);
1992
1993 bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
1994 hdev->name, handle, *err, pending ? "" : "not ");
1995
1996 return pending;
1997 }
1998
1999 /* Returns true if request is forwarded (result is pending), false otherwise.
2000 * This function requires the caller holds hdev->lock.
2001 */
hci_remove_all_adv_monitor(struct hci_dev * hdev,int * err)2002 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
2003 {
2004 struct adv_monitor *monitor;
2005 int idr_next_id = 0;
2006 bool pending = false;
2007 bool update = false;
2008
2009 *err = 0;
2010
2011 while (!*err && !pending) {
2012 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2013 if (!monitor)
2014 break;
2015
2016 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
2017
2018 if (!*err && !pending)
2019 update = true;
2020 }
2021
2022 if (update)
2023 hci_update_passive_scan(hdev);
2024
2025 bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
2026 hdev->name, *err, pending ? "" : "not ");
2027
2028 return pending;
2029 }
2030
2031 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)2032 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2033 {
2034 return !idr_is_empty(&hdev->adv_monitors_idr);
2035 }
2036
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)2037 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2038 {
2039 if (msft_monitor_supported(hdev))
2040 return HCI_ADV_MONITOR_EXT_MSFT;
2041
2042 return HCI_ADV_MONITOR_EXT_NONE;
2043 }
2044
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2045 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2046 bdaddr_t *bdaddr, u8 type)
2047 {
2048 struct bdaddr_list *b;
2049
2050 list_for_each_entry(b, bdaddr_list, list) {
2051 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2052 return b;
2053 }
2054
2055 return NULL;
2056 }
2057
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2058 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2059 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2060 u8 type)
2061 {
2062 struct bdaddr_list_with_irk *b;
2063
2064 list_for_each_entry(b, bdaddr_list, list) {
2065 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2066 return b;
2067 }
2068
2069 return NULL;
2070 }
2071
2072 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2073 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2074 bdaddr_t *bdaddr, u8 type)
2075 {
2076 struct bdaddr_list_with_flags *b;
2077
2078 list_for_each_entry(b, bdaddr_list, list) {
2079 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2080 return b;
2081 }
2082
2083 return NULL;
2084 }
2085
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2086 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2087 {
2088 struct bdaddr_list *b, *n;
2089
2090 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2091 list_del(&b->list);
2092 kfree(b);
2093 }
2094 }
2095
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2096 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2097 {
2098 struct bdaddr_list *entry;
2099
2100 if (!bacmp(bdaddr, BDADDR_ANY))
2101 return -EBADF;
2102
2103 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2104 return -EEXIST;
2105
2106 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2107 if (!entry)
2108 return -ENOMEM;
2109
2110 bacpy(&entry->bdaddr, bdaddr);
2111 entry->bdaddr_type = type;
2112
2113 list_add(&entry->list, list);
2114
2115 return 0;
2116 }
2117
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2118 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2119 u8 type, u8 *peer_irk, u8 *local_irk)
2120 {
2121 struct bdaddr_list_with_irk *entry;
2122
2123 if (!bacmp(bdaddr, BDADDR_ANY))
2124 return -EBADF;
2125
2126 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2127 return -EEXIST;
2128
2129 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2130 if (!entry)
2131 return -ENOMEM;
2132
2133 bacpy(&entry->bdaddr, bdaddr);
2134 entry->bdaddr_type = type;
2135
2136 if (peer_irk)
2137 memcpy(entry->peer_irk, peer_irk, 16);
2138
2139 if (local_irk)
2140 memcpy(entry->local_irk, local_irk, 16);
2141
2142 list_add(&entry->list, list);
2143
2144 return 0;
2145 }
2146
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)2147 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2148 u8 type, u32 flags)
2149 {
2150 struct bdaddr_list_with_flags *entry;
2151
2152 if (!bacmp(bdaddr, BDADDR_ANY))
2153 return -EBADF;
2154
2155 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2156 return -EEXIST;
2157
2158 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2159 if (!entry)
2160 return -ENOMEM;
2161
2162 bacpy(&entry->bdaddr, bdaddr);
2163 entry->bdaddr_type = type;
2164 entry->flags = flags;
2165
2166 list_add(&entry->list, list);
2167
2168 return 0;
2169 }
2170
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2171 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2172 {
2173 struct bdaddr_list *entry;
2174
2175 if (!bacmp(bdaddr, BDADDR_ANY)) {
2176 hci_bdaddr_list_clear(list);
2177 return 0;
2178 }
2179
2180 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2181 if (!entry)
2182 return -ENOENT;
2183
2184 list_del(&entry->list);
2185 kfree(entry);
2186
2187 return 0;
2188 }
2189
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2190 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2191 u8 type)
2192 {
2193 struct bdaddr_list_with_irk *entry;
2194
2195 if (!bacmp(bdaddr, BDADDR_ANY)) {
2196 hci_bdaddr_list_clear(list);
2197 return 0;
2198 }
2199
2200 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2201 if (!entry)
2202 return -ENOENT;
2203
2204 list_del(&entry->list);
2205 kfree(entry);
2206
2207 return 0;
2208 }
2209
hci_bdaddr_list_del_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type)2210 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2211 u8 type)
2212 {
2213 struct bdaddr_list_with_flags *entry;
2214
2215 if (!bacmp(bdaddr, BDADDR_ANY)) {
2216 hci_bdaddr_list_clear(list);
2217 return 0;
2218 }
2219
2220 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2221 if (!entry)
2222 return -ENOENT;
2223
2224 list_del(&entry->list);
2225 kfree(entry);
2226
2227 return 0;
2228 }
2229
2230 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2231 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2232 bdaddr_t *addr, u8 addr_type)
2233 {
2234 struct hci_conn_params *params;
2235
2236 list_for_each_entry(params, &hdev->le_conn_params, list) {
2237 if (bacmp(¶ms->addr, addr) == 0 &&
2238 params->addr_type == addr_type) {
2239 return params;
2240 }
2241 }
2242
2243 return NULL;
2244 }
2245
2246 /* This function requires the caller holds hdev->lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2247 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2248 bdaddr_t *addr, u8 addr_type)
2249 {
2250 struct hci_conn_params *param;
2251
2252 list_for_each_entry(param, list, action) {
2253 if (bacmp(¶m->addr, addr) == 0 &&
2254 param->addr_type == addr_type)
2255 return param;
2256 }
2257
2258 return NULL;
2259 }
2260
2261 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2262 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2263 bdaddr_t *addr, u8 addr_type)
2264 {
2265 struct hci_conn_params *params;
2266
2267 params = hci_conn_params_lookup(hdev, addr, addr_type);
2268 if (params)
2269 return params;
2270
2271 params = kzalloc(sizeof(*params), GFP_KERNEL);
2272 if (!params) {
2273 bt_dev_err(hdev, "out of memory");
2274 return NULL;
2275 }
2276
2277 bacpy(¶ms->addr, addr);
2278 params->addr_type = addr_type;
2279
2280 list_add(¶ms->list, &hdev->le_conn_params);
2281 INIT_LIST_HEAD(¶ms->action);
2282
2283 params->conn_min_interval = hdev->le_conn_min_interval;
2284 params->conn_max_interval = hdev->le_conn_max_interval;
2285 params->conn_latency = hdev->le_conn_latency;
2286 params->supervision_timeout = hdev->le_supv_timeout;
2287 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2288
2289 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2290
2291 return params;
2292 }
2293
hci_conn_params_free(struct hci_conn_params * params)2294 static void hci_conn_params_free(struct hci_conn_params *params)
2295 {
2296 if (params->conn) {
2297 hci_conn_drop(params->conn);
2298 hci_conn_put(params->conn);
2299 }
2300
2301 list_del(¶ms->action);
2302 list_del(¶ms->list);
2303 kfree(params);
2304 }
2305
2306 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2307 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2308 {
2309 struct hci_conn_params *params;
2310
2311 params = hci_conn_params_lookup(hdev, addr, addr_type);
2312 if (!params)
2313 return;
2314
2315 hci_conn_params_free(params);
2316
2317 hci_update_passive_scan(hdev);
2318
2319 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2320 }
2321
2322 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2323 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2324 {
2325 struct hci_conn_params *params, *tmp;
2326
2327 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2328 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2329 continue;
2330
2331 /* If trying to establish one time connection to disabled
2332 * device, leave the params, but mark them as just once.
2333 */
2334 if (params->explicit_connect) {
2335 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2336 continue;
2337 }
2338
2339 list_del(¶ms->list);
2340 kfree(params);
2341 }
2342
2343 BT_DBG("All LE disabled connection parameters were removed");
2344 }
2345
2346 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2347 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2348 {
2349 struct hci_conn_params *params, *tmp;
2350
2351 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2352 hci_conn_params_free(params);
2353
2354 BT_DBG("All LE connection parameters were removed");
2355 }
2356
2357 /* Copy the Identity Address of the controller.
2358 *
2359 * If the controller has a public BD_ADDR, then by default use that one.
2360 * If this is a LE only controller without a public address, default to
2361 * the static random address.
2362 *
2363 * For debugging purposes it is possible to force controllers with a
2364 * public address to use the static random address instead.
2365 *
2366 * In case BR/EDR has been disabled on a dual-mode controller and
2367 * userspace has configured a static address, then that address
2368 * becomes the identity address instead of the public BR/EDR address.
2369 */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2370 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2371 u8 *bdaddr_type)
2372 {
2373 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2374 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2375 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2376 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2377 bacpy(bdaddr, &hdev->static_addr);
2378 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2379 } else {
2380 bacpy(bdaddr, &hdev->bdaddr);
2381 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2382 }
2383 }
2384
hci_clear_wake_reason(struct hci_dev * hdev)2385 static void hci_clear_wake_reason(struct hci_dev *hdev)
2386 {
2387 hci_dev_lock(hdev);
2388
2389 hdev->wake_reason = 0;
2390 bacpy(&hdev->wake_addr, BDADDR_ANY);
2391 hdev->wake_addr_type = 0;
2392
2393 hci_dev_unlock(hdev);
2394 }
2395
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)2396 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2397 void *data)
2398 {
2399 struct hci_dev *hdev =
2400 container_of(nb, struct hci_dev, suspend_notifier);
2401 int ret = 0;
2402
2403 if (action == PM_SUSPEND_PREPARE)
2404 ret = hci_suspend_dev(hdev);
2405 else if (action == PM_POST_SUSPEND)
2406 ret = hci_resume_dev(hdev);
2407
2408 if (ret)
2409 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2410 action, ret);
2411
2412 return NOTIFY_DONE;
2413 }
2414
2415 /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)2416 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2417 {
2418 struct hci_dev *hdev;
2419 unsigned int alloc_size;
2420
2421 alloc_size = sizeof(*hdev);
2422 if (sizeof_priv) {
2423 /* Fixme: May need ALIGN-ment? */
2424 alloc_size += sizeof_priv;
2425 }
2426
2427 hdev = kzalloc(alloc_size, GFP_KERNEL);
2428 if (!hdev)
2429 return NULL;
2430
2431 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2432 hdev->esco_type = (ESCO_HV1);
2433 hdev->link_mode = (HCI_LM_ACCEPT);
2434 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2435 hdev->io_capability = 0x03; /* No Input No Output */
2436 hdev->manufacturer = 0xffff; /* Default to internal use */
2437 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2438 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2439 hdev->adv_instance_cnt = 0;
2440 hdev->cur_adv_instance = 0x00;
2441 hdev->adv_instance_timeout = 0;
2442
2443 hdev->advmon_allowlist_duration = 300;
2444 hdev->advmon_no_filter_duration = 500;
2445 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2446
2447 hdev->sniff_max_interval = 800;
2448 hdev->sniff_min_interval = 80;
2449
2450 hdev->le_adv_channel_map = 0x07;
2451 hdev->le_adv_min_interval = 0x0800;
2452 hdev->le_adv_max_interval = 0x0800;
2453 hdev->le_scan_interval = 0x0060;
2454 hdev->le_scan_window = 0x0030;
2455 hdev->le_scan_int_suspend = 0x0400;
2456 hdev->le_scan_window_suspend = 0x0012;
2457 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2458 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2459 hdev->le_scan_int_adv_monitor = 0x0060;
2460 hdev->le_scan_window_adv_monitor = 0x0030;
2461 hdev->le_scan_int_connect = 0x0060;
2462 hdev->le_scan_window_connect = 0x0060;
2463 hdev->le_conn_min_interval = 0x0018;
2464 hdev->le_conn_max_interval = 0x0028;
2465 hdev->le_conn_latency = 0x0000;
2466 hdev->le_supv_timeout = 0x002a;
2467 hdev->le_def_tx_len = 0x001b;
2468 hdev->le_def_tx_time = 0x0148;
2469 hdev->le_max_tx_len = 0x001b;
2470 hdev->le_max_tx_time = 0x0148;
2471 hdev->le_max_rx_len = 0x001b;
2472 hdev->le_max_rx_time = 0x0148;
2473 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2474 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2475 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2476 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2477 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2478 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2479 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2480 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2481 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2482
2483 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2484 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2485 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2486 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2487 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2488 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2489
2490 /* default 1.28 sec page scan */
2491 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2492 hdev->def_page_scan_int = 0x0800;
2493 hdev->def_page_scan_window = 0x0012;
2494
2495 mutex_init(&hdev->lock);
2496 mutex_init(&hdev->req_lock);
2497
2498 INIT_LIST_HEAD(&hdev->mgmt_pending);
2499 INIT_LIST_HEAD(&hdev->reject_list);
2500 INIT_LIST_HEAD(&hdev->accept_list);
2501 INIT_LIST_HEAD(&hdev->uuids);
2502 INIT_LIST_HEAD(&hdev->link_keys);
2503 INIT_LIST_HEAD(&hdev->long_term_keys);
2504 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2505 INIT_LIST_HEAD(&hdev->remote_oob_data);
2506 INIT_LIST_HEAD(&hdev->le_accept_list);
2507 INIT_LIST_HEAD(&hdev->le_resolv_list);
2508 INIT_LIST_HEAD(&hdev->le_conn_params);
2509 INIT_LIST_HEAD(&hdev->pend_le_conns);
2510 INIT_LIST_HEAD(&hdev->pend_le_reports);
2511 INIT_LIST_HEAD(&hdev->conn_hash.list);
2512 INIT_LIST_HEAD(&hdev->adv_instances);
2513 INIT_LIST_HEAD(&hdev->blocked_keys);
2514 INIT_LIST_HEAD(&hdev->monitored_devices);
2515
2516 INIT_LIST_HEAD(&hdev->local_codecs);
2517 INIT_WORK(&hdev->rx_work, hci_rx_work);
2518 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2519 INIT_WORK(&hdev->tx_work, hci_tx_work);
2520 INIT_WORK(&hdev->power_on, hci_power_on);
2521 INIT_WORK(&hdev->error_reset, hci_error_reset);
2522
2523 hci_cmd_sync_init(hdev);
2524
2525 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2526
2527 skb_queue_head_init(&hdev->rx_q);
2528 skb_queue_head_init(&hdev->cmd_q);
2529 skb_queue_head_init(&hdev->raw_q);
2530
2531 init_waitqueue_head(&hdev->req_wait_q);
2532
2533 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2534 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2535
2536 hci_request_setup(hdev);
2537
2538 hci_init_sysfs(hdev);
2539 discovery_init(hdev);
2540
2541 return hdev;
2542 }
2543 EXPORT_SYMBOL(hci_alloc_dev_priv);
2544
2545 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2546 void hci_free_dev(struct hci_dev *hdev)
2547 {
2548 /* will free via device release */
2549 put_device(&hdev->dev);
2550 }
2551 EXPORT_SYMBOL(hci_free_dev);
2552
2553 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2554 int hci_register_dev(struct hci_dev *hdev)
2555 {
2556 int id, error;
2557
2558 if (!hdev->open || !hdev->close || !hdev->send)
2559 return -EINVAL;
2560
2561 /* Do not allow HCI_AMP devices to register at index 0,
2562 * so the index can be used as the AMP controller ID.
2563 */
2564 switch (hdev->dev_type) {
2565 case HCI_PRIMARY:
2566 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2567 break;
2568 case HCI_AMP:
2569 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2570 break;
2571 default:
2572 return -EINVAL;
2573 }
2574
2575 if (id < 0)
2576 return id;
2577
2578 snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2579 hdev->id = id;
2580
2581 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2582
2583 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2584 if (!hdev->workqueue) {
2585 error = -ENOMEM;
2586 goto err;
2587 }
2588
2589 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2590 hdev->name);
2591 if (!hdev->req_workqueue) {
2592 destroy_workqueue(hdev->workqueue);
2593 error = -ENOMEM;
2594 goto err;
2595 }
2596
2597 if (!IS_ERR_OR_NULL(bt_debugfs))
2598 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2599
2600 dev_set_name(&hdev->dev, "%s", hdev->name);
2601
2602 error = device_add(&hdev->dev);
2603 if (error < 0)
2604 goto err_wqueue;
2605
2606 hci_leds_init(hdev);
2607
2608 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2609 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2610 hdev);
2611 if (hdev->rfkill) {
2612 if (rfkill_register(hdev->rfkill) < 0) {
2613 rfkill_destroy(hdev->rfkill);
2614 hdev->rfkill = NULL;
2615 }
2616 }
2617
2618 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2619 hci_dev_set_flag(hdev, HCI_RFKILLED);
2620
2621 hci_dev_set_flag(hdev, HCI_SETUP);
2622 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2623
2624 if (hdev->dev_type == HCI_PRIMARY) {
2625 /* Assume BR/EDR support until proven otherwise (such as
2626 * through reading supported features during init.
2627 */
2628 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2629 }
2630
2631 write_lock(&hci_dev_list_lock);
2632 list_add(&hdev->list, &hci_dev_list);
2633 write_unlock(&hci_dev_list_lock);
2634
2635 /* Devices that are marked for raw-only usage are unconfigured
2636 * and should not be included in normal operation.
2637 */
2638 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2639 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2640
2641 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2642 * callback.
2643 */
2644 if (hdev->wakeup)
2645 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2646
2647 hci_sock_dev_event(hdev, HCI_DEV_REG);
2648 hci_dev_hold(hdev);
2649
2650 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2651 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2652 error = register_pm_notifier(&hdev->suspend_notifier);
2653 if (error)
2654 goto err_wqueue;
2655 }
2656
2657 queue_work(hdev->req_workqueue, &hdev->power_on);
2658
2659 idr_init(&hdev->adv_monitors_idr);
2660 msft_register(hdev);
2661
2662 return id;
2663
2664 err_wqueue:
2665 debugfs_remove_recursive(hdev->debugfs);
2666 destroy_workqueue(hdev->workqueue);
2667 destroy_workqueue(hdev->req_workqueue);
2668 err:
2669 ida_simple_remove(&hci_index_ida, hdev->id);
2670
2671 return error;
2672 }
2673 EXPORT_SYMBOL(hci_register_dev);
2674
2675 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2676 void hci_unregister_dev(struct hci_dev *hdev)
2677 {
2678 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2679
2680 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2681
2682 write_lock(&hci_dev_list_lock);
2683 list_del(&hdev->list);
2684 write_unlock(&hci_dev_list_lock);
2685
2686 cancel_work_sync(&hdev->power_on);
2687
2688 hci_cmd_sync_clear(hdev);
2689
2690 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks))
2691 unregister_pm_notifier(&hdev->suspend_notifier);
2692
2693 msft_unregister(hdev);
2694
2695 hci_dev_do_close(hdev);
2696
2697 if (!test_bit(HCI_INIT, &hdev->flags) &&
2698 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2699 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2700 hci_dev_lock(hdev);
2701 mgmt_index_removed(hdev);
2702 hci_dev_unlock(hdev);
2703 }
2704
2705 /* mgmt_index_removed should take care of emptying the
2706 * pending list */
2707 BUG_ON(!list_empty(&hdev->mgmt_pending));
2708
2709 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2710
2711 if (hdev->rfkill) {
2712 rfkill_unregister(hdev->rfkill);
2713 rfkill_destroy(hdev->rfkill);
2714 }
2715
2716 device_del(&hdev->dev);
2717 /* Actual cleanup is deferred until hci_release_dev(). */
2718 hci_dev_put(hdev);
2719 }
2720 EXPORT_SYMBOL(hci_unregister_dev);
2721
2722 /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)2723 void hci_release_dev(struct hci_dev *hdev)
2724 {
2725 debugfs_remove_recursive(hdev->debugfs);
2726 kfree_const(hdev->hw_info);
2727 kfree_const(hdev->fw_info);
2728
2729 destroy_workqueue(hdev->workqueue);
2730 destroy_workqueue(hdev->req_workqueue);
2731
2732 hci_dev_lock(hdev);
2733 hci_bdaddr_list_clear(&hdev->reject_list);
2734 hci_bdaddr_list_clear(&hdev->accept_list);
2735 hci_uuids_clear(hdev);
2736 hci_link_keys_clear(hdev);
2737 hci_smp_ltks_clear(hdev);
2738 hci_smp_irks_clear(hdev);
2739 hci_remote_oob_data_clear(hdev);
2740 hci_adv_instances_clear(hdev);
2741 hci_adv_monitors_clear(hdev);
2742 hci_bdaddr_list_clear(&hdev->le_accept_list);
2743 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2744 hci_conn_params_clear_all(hdev);
2745 hci_discovery_filter_clear(hdev);
2746 hci_blocked_keys_clear(hdev);
2747 hci_dev_unlock(hdev);
2748
2749 ida_simple_remove(&hci_index_ida, hdev->id);
2750 kfree_skb(hdev->sent_cmd);
2751 kfree(hdev);
2752 }
2753 EXPORT_SYMBOL(hci_release_dev);
2754
2755 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2756 int hci_suspend_dev(struct hci_dev *hdev)
2757 {
2758 int ret;
2759
2760 bt_dev_dbg(hdev, "");
2761
2762 /* Suspend should only act on when powered. */
2763 if (!hdev_is_powered(hdev) ||
2764 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2765 return 0;
2766
2767 /* If powering down don't attempt to suspend */
2768 if (mgmt_powering_down(hdev))
2769 return 0;
2770
2771 hci_req_sync_lock(hdev);
2772 ret = hci_suspend_sync(hdev);
2773 hci_req_sync_unlock(hdev);
2774
2775 hci_clear_wake_reason(hdev);
2776 mgmt_suspending(hdev, hdev->suspend_state);
2777
2778 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2779 return ret;
2780 }
2781 EXPORT_SYMBOL(hci_suspend_dev);
2782
2783 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2784 int hci_resume_dev(struct hci_dev *hdev)
2785 {
2786 int ret;
2787
2788 bt_dev_dbg(hdev, "");
2789
2790 /* Resume should only act on when powered. */
2791 if (!hdev_is_powered(hdev) ||
2792 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2793 return 0;
2794
2795 /* If powering down don't attempt to resume */
2796 if (mgmt_powering_down(hdev))
2797 return 0;
2798
2799 hci_req_sync_lock(hdev);
2800 ret = hci_resume_sync(hdev);
2801 hci_req_sync_unlock(hdev);
2802
2803 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2804 hdev->wake_addr_type);
2805
2806 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2807 return ret;
2808 }
2809 EXPORT_SYMBOL(hci_resume_dev);
2810
2811 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)2812 int hci_reset_dev(struct hci_dev *hdev)
2813 {
2814 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2815 struct sk_buff *skb;
2816
2817 skb = bt_skb_alloc(3, GFP_ATOMIC);
2818 if (!skb)
2819 return -ENOMEM;
2820
2821 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2822 skb_put_data(skb, hw_err, 3);
2823
2824 bt_dev_err(hdev, "Injecting HCI hardware error event");
2825
2826 /* Send Hardware Error to upper stack */
2827 return hci_recv_frame(hdev, skb);
2828 }
2829 EXPORT_SYMBOL(hci_reset_dev);
2830
2831 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)2832 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2833 {
2834 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2835 && !test_bit(HCI_INIT, &hdev->flags))) {
2836 kfree_skb(skb);
2837 return -ENXIO;
2838 }
2839
2840 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
2841 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
2842 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
2843 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
2844 kfree_skb(skb);
2845 return -EINVAL;
2846 }
2847
2848 /* Incoming skb */
2849 bt_cb(skb)->incoming = 1;
2850
2851 /* Time stamp */
2852 __net_timestamp(skb);
2853
2854 skb_queue_tail(&hdev->rx_q, skb);
2855 queue_work(hdev->workqueue, &hdev->rx_work);
2856
2857 return 0;
2858 }
2859 EXPORT_SYMBOL(hci_recv_frame);
2860
2861 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)2862 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2863 {
2864 /* Mark as diagnostic packet */
2865 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2866
2867 /* Time stamp */
2868 __net_timestamp(skb);
2869
2870 skb_queue_tail(&hdev->rx_q, skb);
2871 queue_work(hdev->workqueue, &hdev->rx_work);
2872
2873 return 0;
2874 }
2875 EXPORT_SYMBOL(hci_recv_diag);
2876
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)2877 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2878 {
2879 va_list vargs;
2880
2881 va_start(vargs, fmt);
2882 kfree_const(hdev->hw_info);
2883 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2884 va_end(vargs);
2885 }
2886 EXPORT_SYMBOL(hci_set_hw_info);
2887
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)2888 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2889 {
2890 va_list vargs;
2891
2892 va_start(vargs, fmt);
2893 kfree_const(hdev->fw_info);
2894 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2895 va_end(vargs);
2896 }
2897 EXPORT_SYMBOL(hci_set_fw_info);
2898
2899 /* ---- Interface to upper protocols ---- */
2900
hci_register_cb(struct hci_cb * cb)2901 int hci_register_cb(struct hci_cb *cb)
2902 {
2903 BT_DBG("%p name %s", cb, cb->name);
2904
2905 mutex_lock(&hci_cb_list_lock);
2906 list_add_tail(&cb->list, &hci_cb_list);
2907 mutex_unlock(&hci_cb_list_lock);
2908
2909 return 0;
2910 }
2911 EXPORT_SYMBOL(hci_register_cb);
2912
hci_unregister_cb(struct hci_cb * cb)2913 int hci_unregister_cb(struct hci_cb *cb)
2914 {
2915 BT_DBG("%p name %s", cb, cb->name);
2916
2917 mutex_lock(&hci_cb_list_lock);
2918 list_del(&cb->list);
2919 mutex_unlock(&hci_cb_list_lock);
2920
2921 return 0;
2922 }
2923 EXPORT_SYMBOL(hci_unregister_cb);
2924
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)2925 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2926 {
2927 int err;
2928
2929 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2930 skb->len);
2931
2932 /* Time stamp */
2933 __net_timestamp(skb);
2934
2935 /* Send copy to monitor */
2936 hci_send_to_monitor(hdev, skb);
2937
2938 if (atomic_read(&hdev->promisc)) {
2939 /* Send copy to the sockets */
2940 hci_send_to_sock(hdev, skb);
2941 }
2942
2943 /* Get rid of skb owner, prior to sending to the driver. */
2944 skb_orphan(skb);
2945
2946 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
2947 kfree_skb(skb);
2948 return -EINVAL;
2949 }
2950
2951 err = hdev->send(hdev, skb);
2952 if (err < 0) {
2953 bt_dev_err(hdev, "sending frame failed (%d)", err);
2954 kfree_skb(skb);
2955 return err;
2956 }
2957
2958 return 0;
2959 }
2960
2961 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)2962 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2963 const void *param)
2964 {
2965 struct sk_buff *skb;
2966
2967 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2968
2969 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2970 if (!skb) {
2971 bt_dev_err(hdev, "no memory for command");
2972 return -ENOMEM;
2973 }
2974
2975 /* Stand-alone HCI commands must be flagged as
2976 * single-command requests.
2977 */
2978 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
2979
2980 skb_queue_tail(&hdev->cmd_q, skb);
2981 queue_work(hdev->workqueue, &hdev->cmd_work);
2982
2983 return 0;
2984 }
2985
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)2986 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
2987 const void *param)
2988 {
2989 struct sk_buff *skb;
2990
2991 if (hci_opcode_ogf(opcode) != 0x3f) {
2992 /* A controller receiving a command shall respond with either
2993 * a Command Status Event or a Command Complete Event.
2994 * Therefore, all standard HCI commands must be sent via the
2995 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
2996 * Some vendors do not comply with this rule for vendor-specific
2997 * commands and do not return any event. We want to support
2998 * unresponded commands for such cases only.
2999 */
3000 bt_dev_err(hdev, "unresponded command not supported");
3001 return -EINVAL;
3002 }
3003
3004 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3005 if (!skb) {
3006 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3007 opcode);
3008 return -ENOMEM;
3009 }
3010
3011 hci_send_frame(hdev, skb);
3012
3013 return 0;
3014 }
3015 EXPORT_SYMBOL(__hci_cmd_send);
3016
3017 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3018 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3019 {
3020 struct hci_command_hdr *hdr;
3021
3022 if (!hdev->sent_cmd)
3023 return NULL;
3024
3025 hdr = (void *) hdev->sent_cmd->data;
3026
3027 if (hdr->opcode != cpu_to_le16(opcode))
3028 return NULL;
3029
3030 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3031
3032 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3033 }
3034
3035 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3036 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3037 {
3038 struct hci_acl_hdr *hdr;
3039 int len = skb->len;
3040
3041 skb_push(skb, HCI_ACL_HDR_SIZE);
3042 skb_reset_transport_header(skb);
3043 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3044 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3045 hdr->dlen = cpu_to_le16(len);
3046 }
3047
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3048 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3049 struct sk_buff *skb, __u16 flags)
3050 {
3051 struct hci_conn *conn = chan->conn;
3052 struct hci_dev *hdev = conn->hdev;
3053 struct sk_buff *list;
3054
3055 skb->len = skb_headlen(skb);
3056 skb->data_len = 0;
3057
3058 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3059
3060 switch (hdev->dev_type) {
3061 case HCI_PRIMARY:
3062 hci_add_acl_hdr(skb, conn->handle, flags);
3063 break;
3064 case HCI_AMP:
3065 hci_add_acl_hdr(skb, chan->handle, flags);
3066 break;
3067 default:
3068 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3069 return;
3070 }
3071
3072 list = skb_shinfo(skb)->frag_list;
3073 if (!list) {
3074 /* Non fragmented */
3075 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3076
3077 skb_queue_tail(queue, skb);
3078 } else {
3079 /* Fragmented */
3080 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3081
3082 skb_shinfo(skb)->frag_list = NULL;
3083
3084 /* Queue all fragments atomically. We need to use spin_lock_bh
3085 * here because of 6LoWPAN links, as there this function is
3086 * called from softirq and using normal spin lock could cause
3087 * deadlocks.
3088 */
3089 spin_lock_bh(&queue->lock);
3090
3091 __skb_queue_tail(queue, skb);
3092
3093 flags &= ~ACL_START;
3094 flags |= ACL_CONT;
3095 do {
3096 skb = list; list = list->next;
3097
3098 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3099 hci_add_acl_hdr(skb, conn->handle, flags);
3100
3101 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3102
3103 __skb_queue_tail(queue, skb);
3104 } while (list);
3105
3106 spin_unlock_bh(&queue->lock);
3107 }
3108 }
3109
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3110 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3111 {
3112 struct hci_dev *hdev = chan->conn->hdev;
3113
3114 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3115
3116 hci_queue_acl(chan, &chan->data_q, skb, flags);
3117
3118 queue_work(hdev->workqueue, &hdev->tx_work);
3119 }
3120
3121 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3122 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3123 {
3124 struct hci_dev *hdev = conn->hdev;
3125 struct hci_sco_hdr hdr;
3126
3127 BT_DBG("%s len %d", hdev->name, skb->len);
3128
3129 hdr.handle = cpu_to_le16(conn->handle);
3130 hdr.dlen = skb->len;
3131
3132 skb_push(skb, HCI_SCO_HDR_SIZE);
3133 skb_reset_transport_header(skb);
3134 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3135
3136 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3137
3138 skb_queue_tail(&conn->data_q, skb);
3139 queue_work(hdev->workqueue, &hdev->tx_work);
3140 }
3141
3142 /* ---- HCI TX task (outgoing data) ---- */
3143
3144 /* HCI Connection scheduler */
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3145 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3146 int *quote)
3147 {
3148 struct hci_conn_hash *h = &hdev->conn_hash;
3149 struct hci_conn *conn = NULL, *c;
3150 unsigned int num = 0, min = ~0;
3151
3152 /* We don't have to lock device here. Connections are always
3153 * added and removed with TX task disabled. */
3154
3155 rcu_read_lock();
3156
3157 list_for_each_entry_rcu(c, &h->list, list) {
3158 if (c->type != type || skb_queue_empty(&c->data_q))
3159 continue;
3160
3161 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3162 continue;
3163
3164 num++;
3165
3166 if (c->sent < min) {
3167 min = c->sent;
3168 conn = c;
3169 }
3170
3171 if (hci_conn_num(hdev, type) == num)
3172 break;
3173 }
3174
3175 rcu_read_unlock();
3176
3177 if (conn) {
3178 int cnt, q;
3179
3180 switch (conn->type) {
3181 case ACL_LINK:
3182 cnt = hdev->acl_cnt;
3183 break;
3184 case SCO_LINK:
3185 case ESCO_LINK:
3186 cnt = hdev->sco_cnt;
3187 break;
3188 case LE_LINK:
3189 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3190 break;
3191 default:
3192 cnt = 0;
3193 bt_dev_err(hdev, "unknown link type %d", conn->type);
3194 }
3195
3196 q = cnt / num;
3197 *quote = q ? q : 1;
3198 } else
3199 *quote = 0;
3200
3201 BT_DBG("conn %p quote %d", conn, *quote);
3202 return conn;
3203 }
3204
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3205 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3206 {
3207 struct hci_conn_hash *h = &hdev->conn_hash;
3208 struct hci_conn *c;
3209
3210 bt_dev_err(hdev, "link tx timeout");
3211
3212 rcu_read_lock();
3213
3214 /* Kill stalled connections */
3215 list_for_each_entry_rcu(c, &h->list, list) {
3216 if (c->type == type && c->sent) {
3217 bt_dev_err(hdev, "killing stalled connection %pMR",
3218 &c->dst);
3219 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3220 }
3221 }
3222
3223 rcu_read_unlock();
3224 }
3225
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3226 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3227 int *quote)
3228 {
3229 struct hci_conn_hash *h = &hdev->conn_hash;
3230 struct hci_chan *chan = NULL;
3231 unsigned int num = 0, min = ~0, cur_prio = 0;
3232 struct hci_conn *conn;
3233 int cnt, q, conn_num = 0;
3234
3235 BT_DBG("%s", hdev->name);
3236
3237 rcu_read_lock();
3238
3239 list_for_each_entry_rcu(conn, &h->list, list) {
3240 struct hci_chan *tmp;
3241
3242 if (conn->type != type)
3243 continue;
3244
3245 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3246 continue;
3247
3248 conn_num++;
3249
3250 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3251 struct sk_buff *skb;
3252
3253 if (skb_queue_empty(&tmp->data_q))
3254 continue;
3255
3256 skb = skb_peek(&tmp->data_q);
3257 if (skb->priority < cur_prio)
3258 continue;
3259
3260 if (skb->priority > cur_prio) {
3261 num = 0;
3262 min = ~0;
3263 cur_prio = skb->priority;
3264 }
3265
3266 num++;
3267
3268 if (conn->sent < min) {
3269 min = conn->sent;
3270 chan = tmp;
3271 }
3272 }
3273
3274 if (hci_conn_num(hdev, type) == conn_num)
3275 break;
3276 }
3277
3278 rcu_read_unlock();
3279
3280 if (!chan)
3281 return NULL;
3282
3283 switch (chan->conn->type) {
3284 case ACL_LINK:
3285 cnt = hdev->acl_cnt;
3286 break;
3287 case AMP_LINK:
3288 cnt = hdev->block_cnt;
3289 break;
3290 case SCO_LINK:
3291 case ESCO_LINK:
3292 cnt = hdev->sco_cnt;
3293 break;
3294 case LE_LINK:
3295 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3296 break;
3297 default:
3298 cnt = 0;
3299 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3300 }
3301
3302 q = cnt / num;
3303 *quote = q ? q : 1;
3304 BT_DBG("chan %p quote %d", chan, *quote);
3305 return chan;
3306 }
3307
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3308 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3309 {
3310 struct hci_conn_hash *h = &hdev->conn_hash;
3311 struct hci_conn *conn;
3312 int num = 0;
3313
3314 BT_DBG("%s", hdev->name);
3315
3316 rcu_read_lock();
3317
3318 list_for_each_entry_rcu(conn, &h->list, list) {
3319 struct hci_chan *chan;
3320
3321 if (conn->type != type)
3322 continue;
3323
3324 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3325 continue;
3326
3327 num++;
3328
3329 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3330 struct sk_buff *skb;
3331
3332 if (chan->sent) {
3333 chan->sent = 0;
3334 continue;
3335 }
3336
3337 if (skb_queue_empty(&chan->data_q))
3338 continue;
3339
3340 skb = skb_peek(&chan->data_q);
3341 if (skb->priority >= HCI_PRIO_MAX - 1)
3342 continue;
3343
3344 skb->priority = HCI_PRIO_MAX - 1;
3345
3346 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3347 skb->priority);
3348 }
3349
3350 if (hci_conn_num(hdev, type) == num)
3351 break;
3352 }
3353
3354 rcu_read_unlock();
3355
3356 }
3357
__get_blocks(struct hci_dev * hdev,struct sk_buff * skb)3358 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3359 {
3360 /* Calculate count of blocks used by this packet */
3361 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3362 }
3363
__check_timeout(struct hci_dev * hdev,unsigned int cnt)3364 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3365 {
3366 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3367 /* ACL tx timeout must be longer than maximum
3368 * link supervision timeout (40.9 seconds) */
3369 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3370 HCI_ACL_TX_TIMEOUT))
3371 hci_link_tx_to(hdev, ACL_LINK);
3372 }
3373 }
3374
3375 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)3376 static void hci_sched_sco(struct hci_dev *hdev)
3377 {
3378 struct hci_conn *conn;
3379 struct sk_buff *skb;
3380 int quote;
3381
3382 BT_DBG("%s", hdev->name);
3383
3384 if (!hci_conn_num(hdev, SCO_LINK))
3385 return;
3386
3387 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3388 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3389 BT_DBG("skb %p len %d", skb, skb->len);
3390 hci_send_frame(hdev, skb);
3391
3392 conn->sent++;
3393 if (conn->sent == ~0)
3394 conn->sent = 0;
3395 }
3396 }
3397 }
3398
hci_sched_esco(struct hci_dev * hdev)3399 static void hci_sched_esco(struct hci_dev *hdev)
3400 {
3401 struct hci_conn *conn;
3402 struct sk_buff *skb;
3403 int quote;
3404
3405 BT_DBG("%s", hdev->name);
3406
3407 if (!hci_conn_num(hdev, ESCO_LINK))
3408 return;
3409
3410 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3411 "e))) {
3412 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3413 BT_DBG("skb %p len %d", skb, skb->len);
3414 hci_send_frame(hdev, skb);
3415
3416 conn->sent++;
3417 if (conn->sent == ~0)
3418 conn->sent = 0;
3419 }
3420 }
3421 }
3422
hci_sched_acl_pkt(struct hci_dev * hdev)3423 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3424 {
3425 unsigned int cnt = hdev->acl_cnt;
3426 struct hci_chan *chan;
3427 struct sk_buff *skb;
3428 int quote;
3429
3430 __check_timeout(hdev, cnt);
3431
3432 while (hdev->acl_cnt &&
3433 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3434 u32 priority = (skb_peek(&chan->data_q))->priority;
3435 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3436 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3437 skb->len, skb->priority);
3438
3439 /* Stop if priority has changed */
3440 if (skb->priority < priority)
3441 break;
3442
3443 skb = skb_dequeue(&chan->data_q);
3444
3445 hci_conn_enter_active_mode(chan->conn,
3446 bt_cb(skb)->force_active);
3447
3448 hci_send_frame(hdev, skb);
3449 hdev->acl_last_tx = jiffies;
3450
3451 hdev->acl_cnt--;
3452 chan->sent++;
3453 chan->conn->sent++;
3454
3455 /* Send pending SCO packets right away */
3456 hci_sched_sco(hdev);
3457 hci_sched_esco(hdev);
3458 }
3459 }
3460
3461 if (cnt != hdev->acl_cnt)
3462 hci_prio_recalculate(hdev, ACL_LINK);
3463 }
3464
hci_sched_acl_blk(struct hci_dev * hdev)3465 static void hci_sched_acl_blk(struct hci_dev *hdev)
3466 {
3467 unsigned int cnt = hdev->block_cnt;
3468 struct hci_chan *chan;
3469 struct sk_buff *skb;
3470 int quote;
3471 u8 type;
3472
3473 __check_timeout(hdev, cnt);
3474
3475 BT_DBG("%s", hdev->name);
3476
3477 if (hdev->dev_type == HCI_AMP)
3478 type = AMP_LINK;
3479 else
3480 type = ACL_LINK;
3481
3482 while (hdev->block_cnt > 0 &&
3483 (chan = hci_chan_sent(hdev, type, "e))) {
3484 u32 priority = (skb_peek(&chan->data_q))->priority;
3485 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3486 int blocks;
3487
3488 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3489 skb->len, skb->priority);
3490
3491 /* Stop if priority has changed */
3492 if (skb->priority < priority)
3493 break;
3494
3495 skb = skb_dequeue(&chan->data_q);
3496
3497 blocks = __get_blocks(hdev, skb);
3498 if (blocks > hdev->block_cnt)
3499 return;
3500
3501 hci_conn_enter_active_mode(chan->conn,
3502 bt_cb(skb)->force_active);
3503
3504 hci_send_frame(hdev, skb);
3505 hdev->acl_last_tx = jiffies;
3506
3507 hdev->block_cnt -= blocks;
3508 quote -= blocks;
3509
3510 chan->sent += blocks;
3511 chan->conn->sent += blocks;
3512 }
3513 }
3514
3515 if (cnt != hdev->block_cnt)
3516 hci_prio_recalculate(hdev, type);
3517 }
3518
hci_sched_acl(struct hci_dev * hdev)3519 static void hci_sched_acl(struct hci_dev *hdev)
3520 {
3521 BT_DBG("%s", hdev->name);
3522
3523 /* No ACL link over BR/EDR controller */
3524 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3525 return;
3526
3527 /* No AMP link over AMP controller */
3528 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3529 return;
3530
3531 switch (hdev->flow_ctl_mode) {
3532 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3533 hci_sched_acl_pkt(hdev);
3534 break;
3535
3536 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3537 hci_sched_acl_blk(hdev);
3538 break;
3539 }
3540 }
3541
hci_sched_le(struct hci_dev * hdev)3542 static void hci_sched_le(struct hci_dev *hdev)
3543 {
3544 struct hci_chan *chan;
3545 struct sk_buff *skb;
3546 int quote, cnt, tmp;
3547
3548 BT_DBG("%s", hdev->name);
3549
3550 if (!hci_conn_num(hdev, LE_LINK))
3551 return;
3552
3553 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3554
3555 __check_timeout(hdev, cnt);
3556
3557 tmp = cnt;
3558 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3559 u32 priority = (skb_peek(&chan->data_q))->priority;
3560 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3561 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3562 skb->len, skb->priority);
3563
3564 /* Stop if priority has changed */
3565 if (skb->priority < priority)
3566 break;
3567
3568 skb = skb_dequeue(&chan->data_q);
3569
3570 hci_send_frame(hdev, skb);
3571 hdev->le_last_tx = jiffies;
3572
3573 cnt--;
3574 chan->sent++;
3575 chan->conn->sent++;
3576
3577 /* Send pending SCO packets right away */
3578 hci_sched_sco(hdev);
3579 hci_sched_esco(hdev);
3580 }
3581 }
3582
3583 if (hdev->le_pkts)
3584 hdev->le_cnt = cnt;
3585 else
3586 hdev->acl_cnt = cnt;
3587
3588 if (cnt != tmp)
3589 hci_prio_recalculate(hdev, LE_LINK);
3590 }
3591
hci_tx_work(struct work_struct * work)3592 static void hci_tx_work(struct work_struct *work)
3593 {
3594 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3595 struct sk_buff *skb;
3596
3597 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3598 hdev->sco_cnt, hdev->le_cnt);
3599
3600 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3601 /* Schedule queues and send stuff to HCI driver */
3602 hci_sched_sco(hdev);
3603 hci_sched_esco(hdev);
3604 hci_sched_acl(hdev);
3605 hci_sched_le(hdev);
3606 }
3607
3608 /* Send next queued raw (unknown type) packet */
3609 while ((skb = skb_dequeue(&hdev->raw_q)))
3610 hci_send_frame(hdev, skb);
3611 }
3612
3613 /* ----- HCI RX task (incoming data processing) ----- */
3614
3615 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3616 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3617 {
3618 struct hci_acl_hdr *hdr = (void *) skb->data;
3619 struct hci_conn *conn;
3620 __u16 handle, flags;
3621
3622 skb_pull(skb, HCI_ACL_HDR_SIZE);
3623
3624 handle = __le16_to_cpu(hdr->handle);
3625 flags = hci_flags(handle);
3626 handle = hci_handle(handle);
3627
3628 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3629 handle, flags);
3630
3631 hdev->stat.acl_rx++;
3632
3633 hci_dev_lock(hdev);
3634 conn = hci_conn_hash_lookup_handle(hdev, handle);
3635 hci_dev_unlock(hdev);
3636
3637 if (conn) {
3638 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3639
3640 /* Send to upper protocol */
3641 l2cap_recv_acldata(conn, skb, flags);
3642 return;
3643 } else {
3644 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3645 handle);
3646 }
3647
3648 kfree_skb(skb);
3649 }
3650
3651 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3652 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3653 {
3654 struct hci_sco_hdr *hdr = (void *) skb->data;
3655 struct hci_conn *conn;
3656 __u16 handle, flags;
3657
3658 skb_pull(skb, HCI_SCO_HDR_SIZE);
3659
3660 handle = __le16_to_cpu(hdr->handle);
3661 flags = hci_flags(handle);
3662 handle = hci_handle(handle);
3663
3664 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3665 handle, flags);
3666
3667 hdev->stat.sco_rx++;
3668
3669 hci_dev_lock(hdev);
3670 conn = hci_conn_hash_lookup_handle(hdev, handle);
3671 hci_dev_unlock(hdev);
3672
3673 if (conn) {
3674 /* Send to upper protocol */
3675 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3676 sco_recv_scodata(conn, skb);
3677 return;
3678 } else {
3679 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3680 handle);
3681 }
3682
3683 kfree_skb(skb);
3684 }
3685
hci_req_is_complete(struct hci_dev * hdev)3686 static bool hci_req_is_complete(struct hci_dev *hdev)
3687 {
3688 struct sk_buff *skb;
3689
3690 skb = skb_peek(&hdev->cmd_q);
3691 if (!skb)
3692 return true;
3693
3694 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3695 }
3696
hci_resend_last(struct hci_dev * hdev)3697 static void hci_resend_last(struct hci_dev *hdev)
3698 {
3699 struct hci_command_hdr *sent;
3700 struct sk_buff *skb;
3701 u16 opcode;
3702
3703 if (!hdev->sent_cmd)
3704 return;
3705
3706 sent = (void *) hdev->sent_cmd->data;
3707 opcode = __le16_to_cpu(sent->opcode);
3708 if (opcode == HCI_OP_RESET)
3709 return;
3710
3711 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3712 if (!skb)
3713 return;
3714
3715 skb_queue_head(&hdev->cmd_q, skb);
3716 queue_work(hdev->workqueue, &hdev->cmd_work);
3717 }
3718
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3719 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3720 hci_req_complete_t *req_complete,
3721 hci_req_complete_skb_t *req_complete_skb)
3722 {
3723 struct sk_buff *skb;
3724 unsigned long flags;
3725
3726 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3727
3728 /* If the completed command doesn't match the last one that was
3729 * sent we need to do special handling of it.
3730 */
3731 if (!hci_sent_cmd_data(hdev, opcode)) {
3732 /* Some CSR based controllers generate a spontaneous
3733 * reset complete event during init and any pending
3734 * command will never be completed. In such a case we
3735 * need to resend whatever was the last sent
3736 * command.
3737 */
3738 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3739 hci_resend_last(hdev);
3740
3741 return;
3742 }
3743
3744 /* If we reach this point this event matches the last command sent */
3745 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3746
3747 /* If the command succeeded and there's still more commands in
3748 * this request the request is not yet complete.
3749 */
3750 if (!status && !hci_req_is_complete(hdev))
3751 return;
3752
3753 /* If this was the last command in a request the complete
3754 * callback would be found in hdev->sent_cmd instead of the
3755 * command queue (hdev->cmd_q).
3756 */
3757 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
3758 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
3759 return;
3760 }
3761
3762 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
3763 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
3764 return;
3765 }
3766
3767 /* Remove all pending commands belonging to this request */
3768 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3769 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3770 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3771 __skb_queue_head(&hdev->cmd_q, skb);
3772 break;
3773 }
3774
3775 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3776 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3777 else
3778 *req_complete = bt_cb(skb)->hci.req_complete;
3779 kfree_skb(skb);
3780 }
3781 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3782 }
3783
hci_rx_work(struct work_struct * work)3784 static void hci_rx_work(struct work_struct *work)
3785 {
3786 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3787 struct sk_buff *skb;
3788
3789 BT_DBG("%s", hdev->name);
3790
3791 while ((skb = skb_dequeue(&hdev->rx_q))) {
3792 /* Send copy to monitor */
3793 hci_send_to_monitor(hdev, skb);
3794
3795 if (atomic_read(&hdev->promisc)) {
3796 /* Send copy to the sockets */
3797 hci_send_to_sock(hdev, skb);
3798 }
3799
3800 /* If the device has been opened in HCI_USER_CHANNEL,
3801 * the userspace has exclusive access to device.
3802 * When device is HCI_INIT, we still need to process
3803 * the data packets to the driver in order
3804 * to complete its setup().
3805 */
3806 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3807 !test_bit(HCI_INIT, &hdev->flags)) {
3808 kfree_skb(skb);
3809 continue;
3810 }
3811
3812 if (test_bit(HCI_INIT, &hdev->flags)) {
3813 /* Don't process data packets in this states. */
3814 switch (hci_skb_pkt_type(skb)) {
3815 case HCI_ACLDATA_PKT:
3816 case HCI_SCODATA_PKT:
3817 case HCI_ISODATA_PKT:
3818 kfree_skb(skb);
3819 continue;
3820 }
3821 }
3822
3823 /* Process frame */
3824 switch (hci_skb_pkt_type(skb)) {
3825 case HCI_EVENT_PKT:
3826 BT_DBG("%s Event packet", hdev->name);
3827 hci_event_packet(hdev, skb);
3828 break;
3829
3830 case HCI_ACLDATA_PKT:
3831 BT_DBG("%s ACL data packet", hdev->name);
3832 hci_acldata_packet(hdev, skb);
3833 break;
3834
3835 case HCI_SCODATA_PKT:
3836 BT_DBG("%s SCO data packet", hdev->name);
3837 hci_scodata_packet(hdev, skb);
3838 break;
3839
3840 default:
3841 kfree_skb(skb);
3842 break;
3843 }
3844 }
3845 }
3846
hci_cmd_work(struct work_struct * work)3847 static void hci_cmd_work(struct work_struct *work)
3848 {
3849 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3850 struct sk_buff *skb;
3851
3852 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3853 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3854
3855 /* Send queued commands */
3856 if (atomic_read(&hdev->cmd_cnt)) {
3857 skb = skb_dequeue(&hdev->cmd_q);
3858 if (!skb)
3859 return;
3860
3861 kfree_skb(hdev->sent_cmd);
3862
3863 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3864 if (hdev->sent_cmd) {
3865 int res;
3866 if (hci_req_status_pend(hdev))
3867 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
3868 atomic_dec(&hdev->cmd_cnt);
3869
3870 res = hci_send_frame(hdev, skb);
3871 if (res < 0)
3872 __hci_cmd_sync_cancel(hdev, -res);
3873
3874 if (test_bit(HCI_RESET, &hdev->flags) ||
3875 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3876 cancel_delayed_work(&hdev->cmd_timer);
3877 else
3878 schedule_delayed_work(&hdev->cmd_timer,
3879 HCI_CMD_TIMEOUT);
3880 } else {
3881 skb_queue_head(&hdev->cmd_q, skb);
3882 queue_work(hdev->workqueue, &hdev->cmd_work);
3883 }
3884 }
3885 }
3886