1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65
hci_scan_req(struct hci_request * req,unsigned long opt)66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
67 {
68 __u8 scan = opt;
69
70 BT_DBG("%s %x", req->hdev->name, scan);
71
72 /* Inquiry and Page scans */
73 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
74 return 0;
75 }
76
hci_auth_req(struct hci_request * req,unsigned long opt)77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
78 {
79 __u8 auth = opt;
80
81 BT_DBG("%s %x", req->hdev->name, auth);
82
83 /* Authentication */
84 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
85 return 0;
86 }
87
hci_encrypt_req(struct hci_request * req,unsigned long opt)88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
89 {
90 __u8 encrypt = opt;
91
92 BT_DBG("%s %x", req->hdev->name, encrypt);
93
94 /* Encryption */
95 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
96 return 0;
97 }
98
hci_linkpol_req(struct hci_request * req,unsigned long opt)99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
100 {
101 __le16 policy = cpu_to_le16(opt);
102
103 BT_DBG("%s %x", req->hdev->name, policy);
104
105 /* Default link policy */
106 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
107 return 0;
108 }
109
110 /* Get HCI device by index.
111 * Device is held on return. */
hci_dev_get(int index)112 struct hci_dev *hci_dev_get(int index)
113 {
114 struct hci_dev *hdev = NULL, *d;
115
116 BT_DBG("%d", index);
117
118 if (index < 0)
119 return NULL;
120
121 read_lock(&hci_dev_list_lock);
122 list_for_each_entry(d, &hci_dev_list, list) {
123 if (d->id == index) {
124 hdev = hci_dev_hold(d);
125 break;
126 }
127 }
128 read_unlock(&hci_dev_list_lock);
129 return hdev;
130 }
131
132 /* ---- Inquiry support ---- */
133
hci_discovery_active(struct hci_dev * hdev)134 bool hci_discovery_active(struct hci_dev *hdev)
135 {
136 struct discovery_state *discov = &hdev->discovery;
137
138 switch (discov->state) {
139 case DISCOVERY_FINDING:
140 case DISCOVERY_RESOLVING:
141 return true;
142
143 default:
144 return false;
145 }
146 }
147
hci_discovery_set_state(struct hci_dev * hdev,int state)148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
149 {
150 int old_state = hdev->discovery.state;
151
152 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
153
154 if (old_state == state)
155 return;
156
157 hdev->discovery.state = state;
158
159 switch (state) {
160 case DISCOVERY_STOPPED:
161 hci_update_passive_scan(hdev);
162
163 if (old_state != DISCOVERY_STARTING)
164 mgmt_discovering(hdev, 0);
165 break;
166 case DISCOVERY_STARTING:
167 break;
168 case DISCOVERY_FINDING:
169 mgmt_discovering(hdev, 1);
170 break;
171 case DISCOVERY_RESOLVING:
172 break;
173 case DISCOVERY_STOPPING:
174 break;
175 }
176 }
177
hci_inquiry_cache_flush(struct hci_dev * hdev)178 void hci_inquiry_cache_flush(struct hci_dev *hdev)
179 {
180 struct discovery_state *cache = &hdev->discovery;
181 struct inquiry_entry *p, *n;
182
183 list_for_each_entry_safe(p, n, &cache->all, all) {
184 list_del(&p->all);
185 kfree(p);
186 }
187
188 INIT_LIST_HEAD(&cache->unknown);
189 INIT_LIST_HEAD(&cache->resolve);
190 }
191
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
193 bdaddr_t *bdaddr)
194 {
195 struct discovery_state *cache = &hdev->discovery;
196 struct inquiry_entry *e;
197
198 BT_DBG("cache %p, %pMR", cache, bdaddr);
199
200 list_for_each_entry(e, &cache->all, all) {
201 if (!bacmp(&e->data.bdaddr, bdaddr))
202 return e;
203 }
204
205 return NULL;
206 }
207
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
209 bdaddr_t *bdaddr)
210 {
211 struct discovery_state *cache = &hdev->discovery;
212 struct inquiry_entry *e;
213
214 BT_DBG("cache %p, %pMR", cache, bdaddr);
215
216 list_for_each_entry(e, &cache->unknown, list) {
217 if (!bacmp(&e->data.bdaddr, bdaddr))
218 return e;
219 }
220
221 return NULL;
222 }
223
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
225 bdaddr_t *bdaddr,
226 int state)
227 {
228 struct discovery_state *cache = &hdev->discovery;
229 struct inquiry_entry *e;
230
231 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
232
233 list_for_each_entry(e, &cache->resolve, list) {
234 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
235 return e;
236 if (!bacmp(&e->data.bdaddr, bdaddr))
237 return e;
238 }
239
240 return NULL;
241 }
242
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
244 struct inquiry_entry *ie)
245 {
246 struct discovery_state *cache = &hdev->discovery;
247 struct list_head *pos = &cache->resolve;
248 struct inquiry_entry *p;
249
250 list_del(&ie->list);
251
252 list_for_each_entry(p, &cache->resolve, list) {
253 if (p->name_state != NAME_PENDING &&
254 abs(p->data.rssi) >= abs(ie->data.rssi))
255 break;
256 pos = &p->list;
257 }
258
259 list_add(&ie->list, pos);
260 }
261
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
263 bool name_known)
264 {
265 struct discovery_state *cache = &hdev->discovery;
266 struct inquiry_entry *ie;
267 u32 flags = 0;
268
269 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
270
271 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
272
273 if (!data->ssp_mode)
274 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
275
276 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
277 if (ie) {
278 if (!ie->data.ssp_mode)
279 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
280
281 if (ie->name_state == NAME_NEEDED &&
282 data->rssi != ie->data.rssi) {
283 ie->data.rssi = data->rssi;
284 hci_inquiry_cache_update_resolve(hdev, ie);
285 }
286
287 goto update;
288 }
289
290 /* Entry not in the cache. Add new one. */
291 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
292 if (!ie) {
293 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
294 goto done;
295 }
296
297 list_add(&ie->all, &cache->all);
298
299 if (name_known) {
300 ie->name_state = NAME_KNOWN;
301 } else {
302 ie->name_state = NAME_NOT_KNOWN;
303 list_add(&ie->list, &cache->unknown);
304 }
305
306 update:
307 if (name_known && ie->name_state != NAME_KNOWN &&
308 ie->name_state != NAME_PENDING) {
309 ie->name_state = NAME_KNOWN;
310 list_del(&ie->list);
311 }
312
313 memcpy(&ie->data, data, sizeof(*data));
314 ie->timestamp = jiffies;
315 cache->timestamp = jiffies;
316
317 if (ie->name_state == NAME_NOT_KNOWN)
318 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
319
320 done:
321 return flags;
322 }
323
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
325 {
326 struct discovery_state *cache = &hdev->discovery;
327 struct inquiry_info *info = (struct inquiry_info *) buf;
328 struct inquiry_entry *e;
329 int copied = 0;
330
331 list_for_each_entry(e, &cache->all, all) {
332 struct inquiry_data *data = &e->data;
333
334 if (copied >= num)
335 break;
336
337 bacpy(&info->bdaddr, &data->bdaddr);
338 info->pscan_rep_mode = data->pscan_rep_mode;
339 info->pscan_period_mode = data->pscan_period_mode;
340 info->pscan_mode = data->pscan_mode;
341 memcpy(info->dev_class, data->dev_class, 3);
342 info->clock_offset = data->clock_offset;
343
344 info++;
345 copied++;
346 }
347
348 BT_DBG("cache %p, copied %d", cache, copied);
349 return copied;
350 }
351
hci_inq_req(struct hci_request * req,unsigned long opt)352 static int hci_inq_req(struct hci_request *req, unsigned long opt)
353 {
354 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355 struct hci_dev *hdev = req->hdev;
356 struct hci_cp_inquiry cp;
357
358 BT_DBG("%s", hdev->name);
359
360 if (test_bit(HCI_INQUIRY, &hdev->flags))
361 return 0;
362
363 /* Start Inquiry */
364 memcpy(&cp.lap, &ir->lap, 3);
365 cp.length = ir->length;
366 cp.num_rsp = ir->num_rsp;
367 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
368
369 return 0;
370 }
371
hci_inquiry(void __user * arg)372 int hci_inquiry(void __user *arg)
373 {
374 __u8 __user *ptr = arg;
375 struct hci_inquiry_req ir;
376 struct hci_dev *hdev;
377 int err = 0, do_inquiry = 0, max_rsp;
378 long timeo;
379 __u8 *buf;
380
381 if (copy_from_user(&ir, ptr, sizeof(ir)))
382 return -EFAULT;
383
384 hdev = hci_dev_get(ir.dev_id);
385 if (!hdev)
386 return -ENODEV;
387
388 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
389 err = -EBUSY;
390 goto done;
391 }
392
393 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
394 err = -EOPNOTSUPP;
395 goto done;
396 }
397
398 if (hdev->dev_type != HCI_PRIMARY) {
399 err = -EOPNOTSUPP;
400 goto done;
401 }
402
403 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
404 err = -EOPNOTSUPP;
405 goto done;
406 }
407
408 /* Restrict maximum inquiry length to 60 seconds */
409 if (ir.length > 60) {
410 err = -EINVAL;
411 goto done;
412 }
413
414 hci_dev_lock(hdev);
415 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
416 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
417 hci_inquiry_cache_flush(hdev);
418 do_inquiry = 1;
419 }
420 hci_dev_unlock(hdev);
421
422 timeo = ir.length * msecs_to_jiffies(2000);
423
424 if (do_inquiry) {
425 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
426 timeo, NULL);
427 if (err < 0)
428 goto done;
429
430 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
431 * cleared). If it is interrupted by a signal, return -EINTR.
432 */
433 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
434 TASK_INTERRUPTIBLE)) {
435 err = -EINTR;
436 goto done;
437 }
438 }
439
440 /* for unlimited number of responses we will use buffer with
441 * 255 entries
442 */
443 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
444
445 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
446 * copy it to the user space.
447 */
448 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
449 if (!buf) {
450 err = -ENOMEM;
451 goto done;
452 }
453
454 hci_dev_lock(hdev);
455 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
456 hci_dev_unlock(hdev);
457
458 BT_DBG("num_rsp %d", ir.num_rsp);
459
460 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
461 ptr += sizeof(ir);
462 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
463 ir.num_rsp))
464 err = -EFAULT;
465 } else
466 err = -EFAULT;
467
468 kfree(buf);
469
470 done:
471 hci_dev_put(hdev);
472 return err;
473 }
474
hci_dev_do_open(struct hci_dev * hdev)475 static int hci_dev_do_open(struct hci_dev *hdev)
476 {
477 int ret = 0;
478
479 BT_DBG("%s %p", hdev->name, hdev);
480
481 hci_req_sync_lock(hdev);
482
483 ret = hci_dev_open_sync(hdev);
484
485 hci_req_sync_unlock(hdev);
486 return ret;
487 }
488
489 /* ---- HCI ioctl helpers ---- */
490
hci_dev_open(__u16 dev)491 int hci_dev_open(__u16 dev)
492 {
493 struct hci_dev *hdev;
494 int err;
495
496 hdev = hci_dev_get(dev);
497 if (!hdev)
498 return -ENODEV;
499
500 /* Devices that are marked as unconfigured can only be powered
501 * up as user channel. Trying to bring them up as normal devices
502 * will result into a failure. Only user channel operation is
503 * possible.
504 *
505 * When this function is called for a user channel, the flag
506 * HCI_USER_CHANNEL will be set first before attempting to
507 * open the device.
508 */
509 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
510 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
511 err = -EOPNOTSUPP;
512 goto done;
513 }
514
515 /* We need to ensure that no other power on/off work is pending
516 * before proceeding to call hci_dev_do_open. This is
517 * particularly important if the setup procedure has not yet
518 * completed.
519 */
520 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
521 cancel_delayed_work(&hdev->power_off);
522
523 /* After this call it is guaranteed that the setup procedure
524 * has finished. This means that error conditions like RFKILL
525 * or no valid public or static random address apply.
526 */
527 flush_workqueue(hdev->req_workqueue);
528
529 /* For controllers not using the management interface and that
530 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
531 * so that pairing works for them. Once the management interface
532 * is in use this bit will be cleared again and userspace has
533 * to explicitly enable it.
534 */
535 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
536 !hci_dev_test_flag(hdev, HCI_MGMT))
537 hci_dev_set_flag(hdev, HCI_BONDABLE);
538
539 err = hci_dev_do_open(hdev);
540
541 done:
542 hci_dev_put(hdev);
543 return err;
544 }
545
hci_dev_do_close(struct hci_dev * hdev)546 int hci_dev_do_close(struct hci_dev *hdev)
547 {
548 int err;
549
550 BT_DBG("%s %p", hdev->name, hdev);
551
552 hci_req_sync_lock(hdev);
553
554 err = hci_dev_close_sync(hdev);
555
556 hci_req_sync_unlock(hdev);
557
558 return err;
559 }
560
hci_dev_close(__u16 dev)561 int hci_dev_close(__u16 dev)
562 {
563 struct hci_dev *hdev;
564 int err;
565
566 hdev = hci_dev_get(dev);
567 if (!hdev)
568 return -ENODEV;
569
570 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
571 err = -EBUSY;
572 goto done;
573 }
574
575 cancel_work_sync(&hdev->power_on);
576 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
577 cancel_delayed_work(&hdev->power_off);
578
579 err = hci_dev_do_close(hdev);
580
581 done:
582 hci_dev_put(hdev);
583 return err;
584 }
585
hci_dev_do_reset(struct hci_dev * hdev)586 static int hci_dev_do_reset(struct hci_dev *hdev)
587 {
588 int ret;
589
590 BT_DBG("%s %p", hdev->name, hdev);
591
592 hci_req_sync_lock(hdev);
593
594 /* Drop queues */
595 skb_queue_purge(&hdev->rx_q);
596 skb_queue_purge(&hdev->cmd_q);
597
598 /* Cancel these to avoid queueing non-chained pending work */
599 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
600 /* Wait for
601 *
602 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
603 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
604 *
605 * inside RCU section to see the flag or complete scheduling.
606 */
607 synchronize_rcu();
608 /* Explicitly cancel works in case scheduled after setting the flag. */
609 cancel_delayed_work(&hdev->cmd_timer);
610 cancel_delayed_work(&hdev->ncmd_timer);
611
612 /* Avoid potential lockdep warnings from the *_flush() calls by
613 * ensuring the workqueue is empty up front.
614 */
615 drain_workqueue(hdev->workqueue);
616
617 hci_dev_lock(hdev);
618 hci_inquiry_cache_flush(hdev);
619 hci_conn_hash_flush(hdev);
620 hci_dev_unlock(hdev);
621
622 if (hdev->flush)
623 hdev->flush(hdev);
624
625 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
626
627 atomic_set(&hdev->cmd_cnt, 1);
628 hdev->acl_cnt = 0;
629 hdev->sco_cnt = 0;
630 hdev->le_cnt = 0;
631 hdev->iso_cnt = 0;
632
633 ret = hci_reset_sync(hdev);
634
635 hci_req_sync_unlock(hdev);
636 return ret;
637 }
638
hci_dev_reset(__u16 dev)639 int hci_dev_reset(__u16 dev)
640 {
641 struct hci_dev *hdev;
642 int err;
643
644 hdev = hci_dev_get(dev);
645 if (!hdev)
646 return -ENODEV;
647
648 if (!test_bit(HCI_UP, &hdev->flags)) {
649 err = -ENETDOWN;
650 goto done;
651 }
652
653 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
654 err = -EBUSY;
655 goto done;
656 }
657
658 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
659 err = -EOPNOTSUPP;
660 goto done;
661 }
662
663 err = hci_dev_do_reset(hdev);
664
665 done:
666 hci_dev_put(hdev);
667 return err;
668 }
669
hci_dev_reset_stat(__u16 dev)670 int hci_dev_reset_stat(__u16 dev)
671 {
672 struct hci_dev *hdev;
673 int ret = 0;
674
675 hdev = hci_dev_get(dev);
676 if (!hdev)
677 return -ENODEV;
678
679 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
680 ret = -EBUSY;
681 goto done;
682 }
683
684 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
685 ret = -EOPNOTSUPP;
686 goto done;
687 }
688
689 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
690
691 done:
692 hci_dev_put(hdev);
693 return ret;
694 }
695
hci_update_passive_scan_state(struct hci_dev * hdev,u8 scan)696 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
697 {
698 bool conn_changed, discov_changed;
699
700 BT_DBG("%s scan 0x%02x", hdev->name, scan);
701
702 if ((scan & SCAN_PAGE))
703 conn_changed = !hci_dev_test_and_set_flag(hdev,
704 HCI_CONNECTABLE);
705 else
706 conn_changed = hci_dev_test_and_clear_flag(hdev,
707 HCI_CONNECTABLE);
708
709 if ((scan & SCAN_INQUIRY)) {
710 discov_changed = !hci_dev_test_and_set_flag(hdev,
711 HCI_DISCOVERABLE);
712 } else {
713 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
714 discov_changed = hci_dev_test_and_clear_flag(hdev,
715 HCI_DISCOVERABLE);
716 }
717
718 if (!hci_dev_test_flag(hdev, HCI_MGMT))
719 return;
720
721 if (conn_changed || discov_changed) {
722 /* In case this was disabled through mgmt */
723 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
724
725 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
726 hci_update_adv_data(hdev, hdev->cur_adv_instance);
727
728 mgmt_new_settings(hdev);
729 }
730 }
731
hci_dev_cmd(unsigned int cmd,void __user * arg)732 int hci_dev_cmd(unsigned int cmd, void __user *arg)
733 {
734 struct hci_dev *hdev;
735 struct hci_dev_req dr;
736 int err = 0;
737
738 if (copy_from_user(&dr, arg, sizeof(dr)))
739 return -EFAULT;
740
741 hdev = hci_dev_get(dr.dev_id);
742 if (!hdev)
743 return -ENODEV;
744
745 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
746 err = -EBUSY;
747 goto done;
748 }
749
750 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
751 err = -EOPNOTSUPP;
752 goto done;
753 }
754
755 if (hdev->dev_type != HCI_PRIMARY) {
756 err = -EOPNOTSUPP;
757 goto done;
758 }
759
760 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
761 err = -EOPNOTSUPP;
762 goto done;
763 }
764
765 switch (cmd) {
766 case HCISETAUTH:
767 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
768 HCI_INIT_TIMEOUT, NULL);
769 break;
770
771 case HCISETENCRYPT:
772 if (!lmp_encrypt_capable(hdev)) {
773 err = -EOPNOTSUPP;
774 break;
775 }
776
777 if (!test_bit(HCI_AUTH, &hdev->flags)) {
778 /* Auth must be enabled first */
779 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
780 HCI_INIT_TIMEOUT, NULL);
781 if (err)
782 break;
783 }
784
785 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
786 HCI_INIT_TIMEOUT, NULL);
787 break;
788
789 case HCISETSCAN:
790 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
791 HCI_INIT_TIMEOUT, NULL);
792
793 /* Ensure that the connectable and discoverable states
794 * get correctly modified as this was a non-mgmt change.
795 */
796 if (!err)
797 hci_update_passive_scan_state(hdev, dr.dev_opt);
798 break;
799
800 case HCISETLINKPOL:
801 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
802 HCI_INIT_TIMEOUT, NULL);
803 break;
804
805 case HCISETLINKMODE:
806 hdev->link_mode = ((__u16) dr.dev_opt) &
807 (HCI_LM_MASTER | HCI_LM_ACCEPT);
808 break;
809
810 case HCISETPTYPE:
811 if (hdev->pkt_type == (__u16) dr.dev_opt)
812 break;
813
814 hdev->pkt_type = (__u16) dr.dev_opt;
815 mgmt_phy_configuration_changed(hdev, NULL);
816 break;
817
818 case HCISETACLMTU:
819 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
820 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
821 break;
822
823 case HCISETSCOMTU:
824 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
825 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
826 break;
827
828 default:
829 err = -EINVAL;
830 break;
831 }
832
833 done:
834 hci_dev_put(hdev);
835 return err;
836 }
837
hci_get_dev_list(void __user * arg)838 int hci_get_dev_list(void __user *arg)
839 {
840 struct hci_dev *hdev;
841 struct hci_dev_list_req *dl;
842 struct hci_dev_req *dr;
843 int n = 0, size, err;
844 __u16 dev_num;
845
846 if (get_user(dev_num, (__u16 __user *) arg))
847 return -EFAULT;
848
849 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
850 return -EINVAL;
851
852 size = sizeof(*dl) + dev_num * sizeof(*dr);
853
854 dl = kzalloc(size, GFP_KERNEL);
855 if (!dl)
856 return -ENOMEM;
857
858 dr = dl->dev_req;
859
860 read_lock(&hci_dev_list_lock);
861 list_for_each_entry(hdev, &hci_dev_list, list) {
862 unsigned long flags = hdev->flags;
863
864 /* When the auto-off is configured it means the transport
865 * is running, but in that case still indicate that the
866 * device is actually down.
867 */
868 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
869 flags &= ~BIT(HCI_UP);
870
871 (dr + n)->dev_id = hdev->id;
872 (dr + n)->dev_opt = flags;
873
874 if (++n >= dev_num)
875 break;
876 }
877 read_unlock(&hci_dev_list_lock);
878
879 dl->dev_num = n;
880 size = sizeof(*dl) + n * sizeof(*dr);
881
882 err = copy_to_user(arg, dl, size);
883 kfree(dl);
884
885 return err ? -EFAULT : 0;
886 }
887
hci_get_dev_info(void __user * arg)888 int hci_get_dev_info(void __user *arg)
889 {
890 struct hci_dev *hdev;
891 struct hci_dev_info di;
892 unsigned long flags;
893 int err = 0;
894
895 if (copy_from_user(&di, arg, sizeof(di)))
896 return -EFAULT;
897
898 hdev = hci_dev_get(di.dev_id);
899 if (!hdev)
900 return -ENODEV;
901
902 /* When the auto-off is configured it means the transport
903 * is running, but in that case still indicate that the
904 * device is actually down.
905 */
906 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
907 flags = hdev->flags & ~BIT(HCI_UP);
908 else
909 flags = hdev->flags;
910
911 strcpy(di.name, hdev->name);
912 di.bdaddr = hdev->bdaddr;
913 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
914 di.flags = flags;
915 di.pkt_type = hdev->pkt_type;
916 if (lmp_bredr_capable(hdev)) {
917 di.acl_mtu = hdev->acl_mtu;
918 di.acl_pkts = hdev->acl_pkts;
919 di.sco_mtu = hdev->sco_mtu;
920 di.sco_pkts = hdev->sco_pkts;
921 } else {
922 di.acl_mtu = hdev->le_mtu;
923 di.acl_pkts = hdev->le_pkts;
924 di.sco_mtu = 0;
925 di.sco_pkts = 0;
926 }
927 di.link_policy = hdev->link_policy;
928 di.link_mode = hdev->link_mode;
929
930 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
931 memcpy(&di.features, &hdev->features, sizeof(di.features));
932
933 if (copy_to_user(arg, &di, sizeof(di)))
934 err = -EFAULT;
935
936 hci_dev_put(hdev);
937
938 return err;
939 }
940
941 /* ---- Interface to HCI drivers ---- */
942
hci_rfkill_set_block(void * data,bool blocked)943 static int hci_rfkill_set_block(void *data, bool blocked)
944 {
945 struct hci_dev *hdev = data;
946
947 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
948
949 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
950 return -EBUSY;
951
952 if (blocked) {
953 hci_dev_set_flag(hdev, HCI_RFKILLED);
954 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
955 !hci_dev_test_flag(hdev, HCI_CONFIG))
956 hci_dev_do_close(hdev);
957 } else {
958 hci_dev_clear_flag(hdev, HCI_RFKILLED);
959 }
960
961 return 0;
962 }
963
964 static const struct rfkill_ops hci_rfkill_ops = {
965 .set_block = hci_rfkill_set_block,
966 };
967
hci_power_on(struct work_struct * work)968 static void hci_power_on(struct work_struct *work)
969 {
970 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
971 int err;
972
973 BT_DBG("%s", hdev->name);
974
975 if (test_bit(HCI_UP, &hdev->flags) &&
976 hci_dev_test_flag(hdev, HCI_MGMT) &&
977 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
978 cancel_delayed_work(&hdev->power_off);
979 err = hci_powered_update_sync(hdev);
980 mgmt_power_on(hdev, err);
981 return;
982 }
983
984 err = hci_dev_do_open(hdev);
985 if (err < 0) {
986 hci_dev_lock(hdev);
987 mgmt_set_powered_failed(hdev, err);
988 hci_dev_unlock(hdev);
989 return;
990 }
991
992 /* During the HCI setup phase, a few error conditions are
993 * ignored and they need to be checked now. If they are still
994 * valid, it is important to turn the device back off.
995 */
996 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
997 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
998 (hdev->dev_type == HCI_PRIMARY &&
999 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1000 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1001 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1002 hci_dev_do_close(hdev);
1003 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1004 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1005 HCI_AUTO_OFF_TIMEOUT);
1006 }
1007
1008 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1009 /* For unconfigured devices, set the HCI_RAW flag
1010 * so that userspace can easily identify them.
1011 */
1012 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1013 set_bit(HCI_RAW, &hdev->flags);
1014
1015 /* For fully configured devices, this will send
1016 * the Index Added event. For unconfigured devices,
1017 * it will send Unconfigued Index Added event.
1018 *
1019 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1020 * and no event will be send.
1021 */
1022 mgmt_index_added(hdev);
1023 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1024 /* When the controller is now configured, then it
1025 * is important to clear the HCI_RAW flag.
1026 */
1027 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1028 clear_bit(HCI_RAW, &hdev->flags);
1029
1030 /* Powering on the controller with HCI_CONFIG set only
1031 * happens with the transition from unconfigured to
1032 * configured. This will send the Index Added event.
1033 */
1034 mgmt_index_added(hdev);
1035 }
1036 }
1037
hci_power_off(struct work_struct * work)1038 static void hci_power_off(struct work_struct *work)
1039 {
1040 struct hci_dev *hdev = container_of(work, struct hci_dev,
1041 power_off.work);
1042
1043 BT_DBG("%s", hdev->name);
1044
1045 hci_dev_do_close(hdev);
1046 }
1047
hci_error_reset(struct work_struct * work)1048 static void hci_error_reset(struct work_struct *work)
1049 {
1050 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1051
1052 BT_DBG("%s", hdev->name);
1053
1054 if (hdev->hw_error)
1055 hdev->hw_error(hdev, hdev->hw_error_code);
1056 else
1057 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1058
1059 if (hci_dev_do_close(hdev))
1060 return;
1061
1062 hci_dev_do_open(hdev);
1063 }
1064
hci_uuids_clear(struct hci_dev * hdev)1065 void hci_uuids_clear(struct hci_dev *hdev)
1066 {
1067 struct bt_uuid *uuid, *tmp;
1068
1069 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1070 list_del(&uuid->list);
1071 kfree(uuid);
1072 }
1073 }
1074
hci_link_keys_clear(struct hci_dev * hdev)1075 void hci_link_keys_clear(struct hci_dev *hdev)
1076 {
1077 struct link_key *key;
1078
1079 list_for_each_entry(key, &hdev->link_keys, list) {
1080 list_del_rcu(&key->list);
1081 kfree_rcu(key, rcu);
1082 }
1083 }
1084
hci_smp_ltks_clear(struct hci_dev * hdev)1085 void hci_smp_ltks_clear(struct hci_dev *hdev)
1086 {
1087 struct smp_ltk *k;
1088
1089 list_for_each_entry(k, &hdev->long_term_keys, list) {
1090 list_del_rcu(&k->list);
1091 kfree_rcu(k, rcu);
1092 }
1093 }
1094
hci_smp_irks_clear(struct hci_dev * hdev)1095 void hci_smp_irks_clear(struct hci_dev *hdev)
1096 {
1097 struct smp_irk *k;
1098
1099 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1100 list_del_rcu(&k->list);
1101 kfree_rcu(k, rcu);
1102 }
1103 }
1104
hci_blocked_keys_clear(struct hci_dev * hdev)1105 void hci_blocked_keys_clear(struct hci_dev *hdev)
1106 {
1107 struct blocked_key *b;
1108
1109 list_for_each_entry(b, &hdev->blocked_keys, list) {
1110 list_del_rcu(&b->list);
1111 kfree_rcu(b, rcu);
1112 }
1113 }
1114
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])1115 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1116 {
1117 bool blocked = false;
1118 struct blocked_key *b;
1119
1120 rcu_read_lock();
1121 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1122 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1123 blocked = true;
1124 break;
1125 }
1126 }
1127
1128 rcu_read_unlock();
1129 return blocked;
1130 }
1131
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1132 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1133 {
1134 struct link_key *k;
1135
1136 rcu_read_lock();
1137 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1138 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1139 rcu_read_unlock();
1140
1141 if (hci_is_blocked_key(hdev,
1142 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1143 k->val)) {
1144 bt_dev_warn_ratelimited(hdev,
1145 "Link key blocked for %pMR",
1146 &k->bdaddr);
1147 return NULL;
1148 }
1149
1150 return k;
1151 }
1152 }
1153 rcu_read_unlock();
1154
1155 return NULL;
1156 }
1157
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1158 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1159 u8 key_type, u8 old_key_type)
1160 {
1161 /* Legacy key */
1162 if (key_type < 0x03)
1163 return true;
1164
1165 /* Debug keys are insecure so don't store them persistently */
1166 if (key_type == HCI_LK_DEBUG_COMBINATION)
1167 return false;
1168
1169 /* Changed combination key and there's no previous one */
1170 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1171 return false;
1172
1173 /* Security mode 3 case */
1174 if (!conn)
1175 return true;
1176
1177 /* BR/EDR key derived using SC from an LE link */
1178 if (conn->type == LE_LINK)
1179 return true;
1180
1181 /* Neither local nor remote side had no-bonding as requirement */
1182 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1183 return true;
1184
1185 /* Local side had dedicated bonding as requirement */
1186 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1187 return true;
1188
1189 /* Remote side had dedicated bonding as requirement */
1190 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1191 return true;
1192
1193 /* If none of the above criteria match, then don't store the key
1194 * persistently */
1195 return false;
1196 }
1197
ltk_role(u8 type)1198 static u8 ltk_role(u8 type)
1199 {
1200 if (type == SMP_LTK)
1201 return HCI_ROLE_MASTER;
1202
1203 return HCI_ROLE_SLAVE;
1204 }
1205
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)1206 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1207 u8 addr_type, u8 role)
1208 {
1209 struct smp_ltk *k;
1210
1211 rcu_read_lock();
1212 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1213 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1214 continue;
1215
1216 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1217 rcu_read_unlock();
1218
1219 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1220 k->val)) {
1221 bt_dev_warn_ratelimited(hdev,
1222 "LTK blocked for %pMR",
1223 &k->bdaddr);
1224 return NULL;
1225 }
1226
1227 return k;
1228 }
1229 }
1230 rcu_read_unlock();
1231
1232 return NULL;
1233 }
1234
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)1235 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1236 {
1237 struct smp_irk *irk_to_return = NULL;
1238 struct smp_irk *irk;
1239
1240 rcu_read_lock();
1241 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1242 if (!bacmp(&irk->rpa, rpa)) {
1243 irk_to_return = irk;
1244 goto done;
1245 }
1246 }
1247
1248 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1249 if (smp_irk_matches(hdev, irk->val, rpa)) {
1250 bacpy(&irk->rpa, rpa);
1251 irk_to_return = irk;
1252 goto done;
1253 }
1254 }
1255
1256 done:
1257 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1258 irk_to_return->val)) {
1259 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1260 &irk_to_return->bdaddr);
1261 irk_to_return = NULL;
1262 }
1263
1264 rcu_read_unlock();
1265
1266 return irk_to_return;
1267 }
1268
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1269 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1270 u8 addr_type)
1271 {
1272 struct smp_irk *irk_to_return = NULL;
1273 struct smp_irk *irk;
1274
1275 /* Identity Address must be public or static random */
1276 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1277 return NULL;
1278
1279 rcu_read_lock();
1280 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1281 if (addr_type == irk->addr_type &&
1282 bacmp(bdaddr, &irk->bdaddr) == 0) {
1283 irk_to_return = irk;
1284 goto done;
1285 }
1286 }
1287
1288 done:
1289
1290 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1291 irk_to_return->val)) {
1292 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1293 &irk_to_return->bdaddr);
1294 irk_to_return = NULL;
1295 }
1296
1297 rcu_read_unlock();
1298
1299 return irk_to_return;
1300 }
1301
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)1302 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1303 bdaddr_t *bdaddr, u8 *val, u8 type,
1304 u8 pin_len, bool *persistent)
1305 {
1306 struct link_key *key, *old_key;
1307 u8 old_key_type;
1308
1309 old_key = hci_find_link_key(hdev, bdaddr);
1310 if (old_key) {
1311 old_key_type = old_key->type;
1312 key = old_key;
1313 } else {
1314 old_key_type = conn ? conn->key_type : 0xff;
1315 key = kzalloc(sizeof(*key), GFP_KERNEL);
1316 if (!key)
1317 return NULL;
1318 list_add_rcu(&key->list, &hdev->link_keys);
1319 }
1320
1321 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1322
1323 /* Some buggy controller combinations generate a changed
1324 * combination key for legacy pairing even when there's no
1325 * previous key */
1326 if (type == HCI_LK_CHANGED_COMBINATION &&
1327 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1328 type = HCI_LK_COMBINATION;
1329 if (conn)
1330 conn->key_type = type;
1331 }
1332
1333 bacpy(&key->bdaddr, bdaddr);
1334 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1335 key->pin_len = pin_len;
1336
1337 if (type == HCI_LK_CHANGED_COMBINATION)
1338 key->type = old_key_type;
1339 else
1340 key->type = type;
1341
1342 if (persistent)
1343 *persistent = hci_persistent_key(hdev, conn, type,
1344 old_key_type);
1345
1346 return key;
1347 }
1348
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)1349 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1350 u8 addr_type, u8 type, u8 authenticated,
1351 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1352 {
1353 struct smp_ltk *key, *old_key;
1354 u8 role = ltk_role(type);
1355
1356 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1357 if (old_key)
1358 key = old_key;
1359 else {
1360 key = kzalloc(sizeof(*key), GFP_KERNEL);
1361 if (!key)
1362 return NULL;
1363 list_add_rcu(&key->list, &hdev->long_term_keys);
1364 }
1365
1366 bacpy(&key->bdaddr, bdaddr);
1367 key->bdaddr_type = addr_type;
1368 memcpy(key->val, tk, sizeof(key->val));
1369 key->authenticated = authenticated;
1370 key->ediv = ediv;
1371 key->rand = rand;
1372 key->enc_size = enc_size;
1373 key->type = type;
1374
1375 return key;
1376 }
1377
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)1378 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1379 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1380 {
1381 struct smp_irk *irk;
1382
1383 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1384 if (!irk) {
1385 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1386 if (!irk)
1387 return NULL;
1388
1389 bacpy(&irk->bdaddr, bdaddr);
1390 irk->addr_type = addr_type;
1391
1392 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1393 }
1394
1395 memcpy(irk->val, val, 16);
1396 bacpy(&irk->rpa, rpa);
1397
1398 return irk;
1399 }
1400
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1401 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1402 {
1403 struct link_key *key;
1404
1405 key = hci_find_link_key(hdev, bdaddr);
1406 if (!key)
1407 return -ENOENT;
1408
1409 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1410
1411 list_del_rcu(&key->list);
1412 kfree_rcu(key, rcu);
1413
1414 return 0;
1415 }
1416
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1417 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1418 {
1419 struct smp_ltk *k;
1420 int removed = 0;
1421
1422 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1423 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1424 continue;
1425
1426 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1427
1428 list_del_rcu(&k->list);
1429 kfree_rcu(k, rcu);
1430 removed++;
1431 }
1432
1433 return removed ? 0 : -ENOENT;
1434 }
1435
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1436 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1437 {
1438 struct smp_irk *k;
1439
1440 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
1441 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1442 continue;
1443
1444 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1445
1446 list_del_rcu(&k->list);
1447 kfree_rcu(k, rcu);
1448 }
1449 }
1450
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1451 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1452 {
1453 struct smp_ltk *k;
1454 struct smp_irk *irk;
1455 u8 addr_type;
1456
1457 if (type == BDADDR_BREDR) {
1458 if (hci_find_link_key(hdev, bdaddr))
1459 return true;
1460 return false;
1461 }
1462
1463 /* Convert to HCI addr type which struct smp_ltk uses */
1464 if (type == BDADDR_LE_PUBLIC)
1465 addr_type = ADDR_LE_DEV_PUBLIC;
1466 else
1467 addr_type = ADDR_LE_DEV_RANDOM;
1468
1469 irk = hci_get_irk(hdev, bdaddr, addr_type);
1470 if (irk) {
1471 bdaddr = &irk->bdaddr;
1472 addr_type = irk->addr_type;
1473 }
1474
1475 rcu_read_lock();
1476 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1477 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1478 rcu_read_unlock();
1479 return true;
1480 }
1481 }
1482 rcu_read_unlock();
1483
1484 return false;
1485 }
1486
1487 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)1488 static void hci_cmd_timeout(struct work_struct *work)
1489 {
1490 struct hci_dev *hdev = container_of(work, struct hci_dev,
1491 cmd_timer.work);
1492
1493 if (hdev->sent_cmd) {
1494 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1495 u16 opcode = __le16_to_cpu(sent->opcode);
1496
1497 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1498 } else {
1499 bt_dev_err(hdev, "command tx timeout");
1500 }
1501
1502 if (hdev->cmd_timeout)
1503 hdev->cmd_timeout(hdev);
1504
1505 atomic_set(&hdev->cmd_cnt, 1);
1506 queue_work(hdev->workqueue, &hdev->cmd_work);
1507 }
1508
1509 /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)1510 static void hci_ncmd_timeout(struct work_struct *work)
1511 {
1512 struct hci_dev *hdev = container_of(work, struct hci_dev,
1513 ncmd_timer.work);
1514
1515 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1516
1517 /* During HCI_INIT phase no events can be injected if the ncmd timer
1518 * triggers since the procedure has its own timeout handling.
1519 */
1520 if (test_bit(HCI_INIT, &hdev->flags))
1521 return;
1522
1523 /* This is an irrecoverable state, inject hardware error event */
1524 hci_reset_dev(hdev);
1525 }
1526
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1527 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1528 bdaddr_t *bdaddr, u8 bdaddr_type)
1529 {
1530 struct oob_data *data;
1531
1532 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1533 if (bacmp(bdaddr, &data->bdaddr) != 0)
1534 continue;
1535 if (data->bdaddr_type != bdaddr_type)
1536 continue;
1537 return data;
1538 }
1539
1540 return NULL;
1541 }
1542
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1543 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1544 u8 bdaddr_type)
1545 {
1546 struct oob_data *data;
1547
1548 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1549 if (!data)
1550 return -ENOENT;
1551
1552 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1553
1554 list_del(&data->list);
1555 kfree(data);
1556
1557 return 0;
1558 }
1559
hci_remote_oob_data_clear(struct hci_dev * hdev)1560 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1561 {
1562 struct oob_data *data, *n;
1563
1564 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1565 list_del(&data->list);
1566 kfree(data);
1567 }
1568 }
1569
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)1570 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1571 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1572 u8 *hash256, u8 *rand256)
1573 {
1574 struct oob_data *data;
1575
1576 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1577 if (!data) {
1578 data = kmalloc(sizeof(*data), GFP_KERNEL);
1579 if (!data)
1580 return -ENOMEM;
1581
1582 bacpy(&data->bdaddr, bdaddr);
1583 data->bdaddr_type = bdaddr_type;
1584 list_add(&data->list, &hdev->remote_oob_data);
1585 }
1586
1587 if (hash192 && rand192) {
1588 memcpy(data->hash192, hash192, sizeof(data->hash192));
1589 memcpy(data->rand192, rand192, sizeof(data->rand192));
1590 if (hash256 && rand256)
1591 data->present = 0x03;
1592 } else {
1593 memset(data->hash192, 0, sizeof(data->hash192));
1594 memset(data->rand192, 0, sizeof(data->rand192));
1595 if (hash256 && rand256)
1596 data->present = 0x02;
1597 else
1598 data->present = 0x00;
1599 }
1600
1601 if (hash256 && rand256) {
1602 memcpy(data->hash256, hash256, sizeof(data->hash256));
1603 memcpy(data->rand256, rand256, sizeof(data->rand256));
1604 } else {
1605 memset(data->hash256, 0, sizeof(data->hash256));
1606 memset(data->rand256, 0, sizeof(data->rand256));
1607 if (hash192 && rand192)
1608 data->present = 0x01;
1609 }
1610
1611 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1612
1613 return 0;
1614 }
1615
1616 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)1617 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1618 {
1619 struct adv_info *adv_instance;
1620
1621 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1622 if (adv_instance->instance == instance)
1623 return adv_instance;
1624 }
1625
1626 return NULL;
1627 }
1628
1629 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)1630 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1631 {
1632 struct adv_info *cur_instance;
1633
1634 cur_instance = hci_find_adv_instance(hdev, instance);
1635 if (!cur_instance)
1636 return NULL;
1637
1638 if (cur_instance == list_last_entry(&hdev->adv_instances,
1639 struct adv_info, list))
1640 return list_first_entry(&hdev->adv_instances,
1641 struct adv_info, list);
1642 else
1643 return list_next_entry(cur_instance, list);
1644 }
1645
1646 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)1647 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1648 {
1649 struct adv_info *adv_instance;
1650
1651 adv_instance = hci_find_adv_instance(hdev, instance);
1652 if (!adv_instance)
1653 return -ENOENT;
1654
1655 BT_DBG("%s removing %dMR", hdev->name, instance);
1656
1657 if (hdev->cur_adv_instance == instance) {
1658 if (hdev->adv_instance_timeout) {
1659 cancel_delayed_work(&hdev->adv_instance_expire);
1660 hdev->adv_instance_timeout = 0;
1661 }
1662 hdev->cur_adv_instance = 0x00;
1663 }
1664
1665 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1666
1667 list_del(&adv_instance->list);
1668 kfree(adv_instance);
1669
1670 hdev->adv_instance_cnt--;
1671
1672 return 0;
1673 }
1674
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)1675 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1676 {
1677 struct adv_info *adv_instance, *n;
1678
1679 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1680 adv_instance->rpa_expired = rpa_expired;
1681 }
1682
1683 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)1684 void hci_adv_instances_clear(struct hci_dev *hdev)
1685 {
1686 struct adv_info *adv_instance, *n;
1687
1688 if (hdev->adv_instance_timeout) {
1689 cancel_delayed_work(&hdev->adv_instance_expire);
1690 hdev->adv_instance_timeout = 0;
1691 }
1692
1693 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1694 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1695 list_del(&adv_instance->list);
1696 kfree(adv_instance);
1697 }
1698
1699 hdev->adv_instance_cnt = 0;
1700 hdev->cur_adv_instance = 0x00;
1701 }
1702
adv_instance_rpa_expired(struct work_struct * work)1703 static void adv_instance_rpa_expired(struct work_struct *work)
1704 {
1705 struct adv_info *adv_instance = container_of(work, struct adv_info,
1706 rpa_expired_cb.work);
1707
1708 BT_DBG("");
1709
1710 adv_instance->rpa_expired = true;
1711 }
1712
1713 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval,u8 mesh_handle)1714 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1715 u32 flags, u16 adv_data_len, u8 *adv_data,
1716 u16 scan_rsp_len, u8 *scan_rsp_data,
1717 u16 timeout, u16 duration, s8 tx_power,
1718 u32 min_interval, u32 max_interval,
1719 u8 mesh_handle)
1720 {
1721 struct adv_info *adv;
1722
1723 adv = hci_find_adv_instance(hdev, instance);
1724 if (adv) {
1725 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1726 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1727 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1728 } else {
1729 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1730 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1731 return ERR_PTR(-EOVERFLOW);
1732
1733 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1734 if (!adv)
1735 return ERR_PTR(-ENOMEM);
1736
1737 adv->pending = true;
1738 adv->instance = instance;
1739 list_add(&adv->list, &hdev->adv_instances);
1740 hdev->adv_instance_cnt++;
1741 }
1742
1743 adv->flags = flags;
1744 adv->min_interval = min_interval;
1745 adv->max_interval = max_interval;
1746 adv->tx_power = tx_power;
1747 /* Defining a mesh_handle changes the timing units to ms,
1748 * rather than seconds, and ties the instance to the requested
1749 * mesh_tx queue.
1750 */
1751 adv->mesh = mesh_handle;
1752
1753 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1754 scan_rsp_len, scan_rsp_data);
1755
1756 adv->timeout = timeout;
1757 adv->remaining_time = timeout;
1758
1759 if (duration == 0)
1760 adv->duration = hdev->def_multi_adv_rotation_duration;
1761 else
1762 adv->duration = duration;
1763
1764 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1765
1766 BT_DBG("%s for %dMR", hdev->name, instance);
1767
1768 return adv;
1769 }
1770
1771 /* This function requires the caller holds hdev->lock */
hci_add_per_instance(struct hci_dev * hdev,u8 instance,u32 flags,u8 data_len,u8 * data,u32 min_interval,u32 max_interval)1772 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1773 u32 flags, u8 data_len, u8 *data,
1774 u32 min_interval, u32 max_interval)
1775 {
1776 struct adv_info *adv;
1777
1778 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1779 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1780 min_interval, max_interval, 0);
1781 if (IS_ERR(adv))
1782 return adv;
1783
1784 adv->periodic = true;
1785 adv->per_adv_data_len = data_len;
1786
1787 if (data)
1788 memcpy(adv->per_adv_data, data, data_len);
1789
1790 return adv;
1791 }
1792
1793 /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)1794 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1795 u16 adv_data_len, u8 *adv_data,
1796 u16 scan_rsp_len, u8 *scan_rsp_data)
1797 {
1798 struct adv_info *adv;
1799
1800 adv = hci_find_adv_instance(hdev, instance);
1801
1802 /* If advertisement doesn't exist, we can't modify its data */
1803 if (!adv)
1804 return -ENOENT;
1805
1806 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1807 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1808 memcpy(adv->adv_data, adv_data, adv_data_len);
1809 adv->adv_data_len = adv_data_len;
1810 adv->adv_data_changed = true;
1811 }
1812
1813 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1814 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1815 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1816 adv->scan_rsp_len = scan_rsp_len;
1817 adv->scan_rsp_changed = true;
1818 }
1819
1820 /* Mark as changed if there are flags which would affect it */
1821 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1822 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1823 adv->scan_rsp_changed = true;
1824
1825 return 0;
1826 }
1827
1828 /* This function requires the caller holds hdev->lock */
hci_adv_instance_flags(struct hci_dev * hdev,u8 instance)1829 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1830 {
1831 u32 flags;
1832 struct adv_info *adv;
1833
1834 if (instance == 0x00) {
1835 /* Instance 0 always manages the "Tx Power" and "Flags"
1836 * fields
1837 */
1838 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1839
1840 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1841 * corresponds to the "connectable" instance flag.
1842 */
1843 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1844 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1845
1846 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1847 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1848 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1849 flags |= MGMT_ADV_FLAG_DISCOV;
1850
1851 return flags;
1852 }
1853
1854 adv = hci_find_adv_instance(hdev, instance);
1855
1856 /* Return 0 when we got an invalid instance identifier. */
1857 if (!adv)
1858 return 0;
1859
1860 return adv->flags;
1861 }
1862
hci_adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1863 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1864 {
1865 struct adv_info *adv;
1866
1867 /* Instance 0x00 always set local name */
1868 if (instance == 0x00)
1869 return true;
1870
1871 adv = hci_find_adv_instance(hdev, instance);
1872 if (!adv)
1873 return false;
1874
1875 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1876 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1877 return true;
1878
1879 return adv->scan_rsp_len ? true : false;
1880 }
1881
1882 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)1883 void hci_adv_monitors_clear(struct hci_dev *hdev)
1884 {
1885 struct adv_monitor *monitor;
1886 int handle;
1887
1888 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1889 hci_free_adv_monitor(hdev, monitor);
1890
1891 idr_destroy(&hdev->adv_monitors_idr);
1892 }
1893
1894 /* Frees the monitor structure and do some bookkeepings.
1895 * This function requires the caller holds hdev->lock.
1896 */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1897 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1898 {
1899 struct adv_pattern *pattern;
1900 struct adv_pattern *tmp;
1901
1902 if (!monitor)
1903 return;
1904
1905 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1906 list_del(&pattern->list);
1907 kfree(pattern);
1908 }
1909
1910 if (monitor->handle)
1911 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1912
1913 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1914 hdev->adv_monitors_cnt--;
1915 mgmt_adv_monitor_removed(hdev, monitor->handle);
1916 }
1917
1918 kfree(monitor);
1919 }
1920
1921 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1922 * also attempts to forward the request to the controller.
1923 * This function requires the caller holds hci_req_sync_lock.
1924 */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1925 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1926 {
1927 int min, max, handle;
1928 int status = 0;
1929
1930 if (!monitor)
1931 return -EINVAL;
1932
1933 hci_dev_lock(hdev);
1934
1935 min = HCI_MIN_ADV_MONITOR_HANDLE;
1936 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1937 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1938 GFP_KERNEL);
1939
1940 hci_dev_unlock(hdev);
1941
1942 if (handle < 0)
1943 return handle;
1944
1945 monitor->handle = handle;
1946
1947 if (!hdev_is_powered(hdev))
1948 return status;
1949
1950 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1951 case HCI_ADV_MONITOR_EXT_NONE:
1952 bt_dev_dbg(hdev, "%s add monitor %d status %d", hdev->name,
1953 monitor->handle, status);
1954 /* Message was not forwarded to controller - not an error */
1955 break;
1956
1957 case HCI_ADV_MONITOR_EXT_MSFT:
1958 status = msft_add_monitor_pattern(hdev, monitor);
1959 bt_dev_dbg(hdev, "%s add monitor %d msft status %d", hdev->name,
1960 monitor->handle, status);
1961 break;
1962 }
1963
1964 return status;
1965 }
1966
1967 /* Attempts to tell the controller and free the monitor. If somehow the
1968 * controller doesn't have a corresponding handle, remove anyway.
1969 * This function requires the caller holds hci_req_sync_lock.
1970 */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1971 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1972 struct adv_monitor *monitor)
1973 {
1974 int status = 0;
1975
1976 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1977 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1978 bt_dev_dbg(hdev, "%s remove monitor %d status %d", hdev->name,
1979 monitor->handle, status);
1980 goto free_monitor;
1981
1982 case HCI_ADV_MONITOR_EXT_MSFT:
1983 status = msft_remove_monitor(hdev, monitor);
1984 bt_dev_dbg(hdev, "%s remove monitor %d msft status %d",
1985 hdev->name, monitor->handle, status);
1986 break;
1987 }
1988
1989 /* In case no matching handle registered, just free the monitor */
1990 if (status == -ENOENT)
1991 goto free_monitor;
1992
1993 return status;
1994
1995 free_monitor:
1996 if (status == -ENOENT)
1997 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1998 monitor->handle);
1999 hci_free_adv_monitor(hdev, monitor);
2000
2001 return status;
2002 }
2003
2004 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle)2005 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2006 {
2007 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2008
2009 if (!monitor)
2010 return -EINVAL;
2011
2012 return hci_remove_adv_monitor(hdev, monitor);
2013 }
2014
2015 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_all_adv_monitor(struct hci_dev * hdev)2016 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2017 {
2018 struct adv_monitor *monitor;
2019 int idr_next_id = 0;
2020 int status = 0;
2021
2022 while (1) {
2023 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2024 if (!monitor)
2025 break;
2026
2027 status = hci_remove_adv_monitor(hdev, monitor);
2028 if (status)
2029 return status;
2030
2031 idr_next_id++;
2032 }
2033
2034 return status;
2035 }
2036
2037 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)2038 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2039 {
2040 return !idr_is_empty(&hdev->adv_monitors_idr);
2041 }
2042
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)2043 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2044 {
2045 if (msft_monitor_supported(hdev))
2046 return HCI_ADV_MONITOR_EXT_MSFT;
2047
2048 return HCI_ADV_MONITOR_EXT_NONE;
2049 }
2050
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2051 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2052 bdaddr_t *bdaddr, u8 type)
2053 {
2054 struct bdaddr_list *b;
2055
2056 list_for_each_entry(b, bdaddr_list, list) {
2057 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2058 return b;
2059 }
2060
2061 return NULL;
2062 }
2063
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2064 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2065 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2066 u8 type)
2067 {
2068 struct bdaddr_list_with_irk *b;
2069
2070 list_for_each_entry(b, bdaddr_list, list) {
2071 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2072 return b;
2073 }
2074
2075 return NULL;
2076 }
2077
2078 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2079 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2080 bdaddr_t *bdaddr, u8 type)
2081 {
2082 struct bdaddr_list_with_flags *b;
2083
2084 list_for_each_entry(b, bdaddr_list, list) {
2085 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2086 return b;
2087 }
2088
2089 return NULL;
2090 }
2091
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2092 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2093 {
2094 struct bdaddr_list *b, *n;
2095
2096 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2097 list_del(&b->list);
2098 kfree(b);
2099 }
2100 }
2101
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2102 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2103 {
2104 struct bdaddr_list *entry;
2105
2106 if (!bacmp(bdaddr, BDADDR_ANY))
2107 return -EBADF;
2108
2109 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2110 return -EEXIST;
2111
2112 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2113 if (!entry)
2114 return -ENOMEM;
2115
2116 bacpy(&entry->bdaddr, bdaddr);
2117 entry->bdaddr_type = type;
2118
2119 list_add(&entry->list, list);
2120
2121 return 0;
2122 }
2123
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2124 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2125 u8 type, u8 *peer_irk, u8 *local_irk)
2126 {
2127 struct bdaddr_list_with_irk *entry;
2128
2129 if (!bacmp(bdaddr, BDADDR_ANY))
2130 return -EBADF;
2131
2132 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2133 return -EEXIST;
2134
2135 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2136 if (!entry)
2137 return -ENOMEM;
2138
2139 bacpy(&entry->bdaddr, bdaddr);
2140 entry->bdaddr_type = type;
2141
2142 if (peer_irk)
2143 memcpy(entry->peer_irk, peer_irk, 16);
2144
2145 if (local_irk)
2146 memcpy(entry->local_irk, local_irk, 16);
2147
2148 list_add(&entry->list, list);
2149
2150 return 0;
2151 }
2152
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)2153 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2154 u8 type, u32 flags)
2155 {
2156 struct bdaddr_list_with_flags *entry;
2157
2158 if (!bacmp(bdaddr, BDADDR_ANY))
2159 return -EBADF;
2160
2161 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2162 return -EEXIST;
2163
2164 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2165 if (!entry)
2166 return -ENOMEM;
2167
2168 bacpy(&entry->bdaddr, bdaddr);
2169 entry->bdaddr_type = type;
2170 entry->flags = flags;
2171
2172 list_add(&entry->list, list);
2173
2174 return 0;
2175 }
2176
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2177 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2178 {
2179 struct bdaddr_list *entry;
2180
2181 if (!bacmp(bdaddr, BDADDR_ANY)) {
2182 hci_bdaddr_list_clear(list);
2183 return 0;
2184 }
2185
2186 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2187 if (!entry)
2188 return -ENOENT;
2189
2190 list_del(&entry->list);
2191 kfree(entry);
2192
2193 return 0;
2194 }
2195
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2196 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2197 u8 type)
2198 {
2199 struct bdaddr_list_with_irk *entry;
2200
2201 if (!bacmp(bdaddr, BDADDR_ANY)) {
2202 hci_bdaddr_list_clear(list);
2203 return 0;
2204 }
2205
2206 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2207 if (!entry)
2208 return -ENOENT;
2209
2210 list_del(&entry->list);
2211 kfree(entry);
2212
2213 return 0;
2214 }
2215
hci_bdaddr_list_del_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type)2216 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2217 u8 type)
2218 {
2219 struct bdaddr_list_with_flags *entry;
2220
2221 if (!bacmp(bdaddr, BDADDR_ANY)) {
2222 hci_bdaddr_list_clear(list);
2223 return 0;
2224 }
2225
2226 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2227 if (!entry)
2228 return -ENOENT;
2229
2230 list_del(&entry->list);
2231 kfree(entry);
2232
2233 return 0;
2234 }
2235
2236 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2237 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2238 bdaddr_t *addr, u8 addr_type)
2239 {
2240 struct hci_conn_params *params;
2241
2242 list_for_each_entry(params, &hdev->le_conn_params, list) {
2243 if (bacmp(¶ms->addr, addr) == 0 &&
2244 params->addr_type == addr_type) {
2245 return params;
2246 }
2247 }
2248
2249 return NULL;
2250 }
2251
2252 /* This function requires the caller holds hdev->lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2253 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2254 bdaddr_t *addr, u8 addr_type)
2255 {
2256 struct hci_conn_params *param;
2257
2258 list_for_each_entry(param, list, action) {
2259 if (bacmp(¶m->addr, addr) == 0 &&
2260 param->addr_type == addr_type)
2261 return param;
2262 }
2263
2264 return NULL;
2265 }
2266
2267 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2268 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2269 bdaddr_t *addr, u8 addr_type)
2270 {
2271 struct hci_conn_params *params;
2272
2273 params = hci_conn_params_lookup(hdev, addr, addr_type);
2274 if (params)
2275 return params;
2276
2277 params = kzalloc(sizeof(*params), GFP_KERNEL);
2278 if (!params) {
2279 bt_dev_err(hdev, "out of memory");
2280 return NULL;
2281 }
2282
2283 bacpy(¶ms->addr, addr);
2284 params->addr_type = addr_type;
2285
2286 list_add(¶ms->list, &hdev->le_conn_params);
2287 INIT_LIST_HEAD(¶ms->action);
2288
2289 params->conn_min_interval = hdev->le_conn_min_interval;
2290 params->conn_max_interval = hdev->le_conn_max_interval;
2291 params->conn_latency = hdev->le_conn_latency;
2292 params->supervision_timeout = hdev->le_supv_timeout;
2293 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2294
2295 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2296
2297 return params;
2298 }
2299
hci_conn_params_free(struct hci_conn_params * params)2300 static void hci_conn_params_free(struct hci_conn_params *params)
2301 {
2302 if (params->conn) {
2303 hci_conn_drop(params->conn);
2304 hci_conn_put(params->conn);
2305 }
2306
2307 list_del(¶ms->action);
2308 list_del(¶ms->list);
2309 kfree(params);
2310 }
2311
2312 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2313 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2314 {
2315 struct hci_conn_params *params;
2316
2317 params = hci_conn_params_lookup(hdev, addr, addr_type);
2318 if (!params)
2319 return;
2320
2321 hci_conn_params_free(params);
2322
2323 hci_update_passive_scan(hdev);
2324
2325 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2326 }
2327
2328 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2329 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2330 {
2331 struct hci_conn_params *params, *tmp;
2332
2333 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2334 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2335 continue;
2336
2337 /* If trying to establish one time connection to disabled
2338 * device, leave the params, but mark them as just once.
2339 */
2340 if (params->explicit_connect) {
2341 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2342 continue;
2343 }
2344
2345 list_del(¶ms->list);
2346 kfree(params);
2347 }
2348
2349 BT_DBG("All LE disabled connection parameters were removed");
2350 }
2351
2352 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2353 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2354 {
2355 struct hci_conn_params *params, *tmp;
2356
2357 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2358 hci_conn_params_free(params);
2359
2360 BT_DBG("All LE connection parameters were removed");
2361 }
2362
2363 /* Copy the Identity Address of the controller.
2364 *
2365 * If the controller has a public BD_ADDR, then by default use that one.
2366 * If this is a LE only controller without a public address, default to
2367 * the static random address.
2368 *
2369 * For debugging purposes it is possible to force controllers with a
2370 * public address to use the static random address instead.
2371 *
2372 * In case BR/EDR has been disabled on a dual-mode controller and
2373 * userspace has configured a static address, then that address
2374 * becomes the identity address instead of the public BR/EDR address.
2375 */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2376 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2377 u8 *bdaddr_type)
2378 {
2379 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2380 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2381 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2382 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2383 bacpy(bdaddr, &hdev->static_addr);
2384 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2385 } else {
2386 bacpy(bdaddr, &hdev->bdaddr);
2387 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2388 }
2389 }
2390
hci_clear_wake_reason(struct hci_dev * hdev)2391 static void hci_clear_wake_reason(struct hci_dev *hdev)
2392 {
2393 hci_dev_lock(hdev);
2394
2395 hdev->wake_reason = 0;
2396 bacpy(&hdev->wake_addr, BDADDR_ANY);
2397 hdev->wake_addr_type = 0;
2398
2399 hci_dev_unlock(hdev);
2400 }
2401
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)2402 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2403 void *data)
2404 {
2405 struct hci_dev *hdev =
2406 container_of(nb, struct hci_dev, suspend_notifier);
2407 int ret = 0;
2408
2409 /* Userspace has full control of this device. Do nothing. */
2410 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2411 return NOTIFY_DONE;
2412
2413 if (action == PM_SUSPEND_PREPARE)
2414 ret = hci_suspend_dev(hdev);
2415 else if (action == PM_POST_SUSPEND)
2416 ret = hci_resume_dev(hdev);
2417
2418 if (ret)
2419 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2420 action, ret);
2421
2422 return NOTIFY_DONE;
2423 }
2424
2425 /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)2426 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2427 {
2428 struct hci_dev *hdev;
2429 unsigned int alloc_size;
2430
2431 alloc_size = sizeof(*hdev);
2432 if (sizeof_priv) {
2433 /* Fixme: May need ALIGN-ment? */
2434 alloc_size += sizeof_priv;
2435 }
2436
2437 hdev = kzalloc(alloc_size, GFP_KERNEL);
2438 if (!hdev)
2439 return NULL;
2440
2441 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2442 hdev->esco_type = (ESCO_HV1);
2443 hdev->link_mode = (HCI_LM_ACCEPT);
2444 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2445 hdev->io_capability = 0x03; /* No Input No Output */
2446 hdev->manufacturer = 0xffff; /* Default to internal use */
2447 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2448 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2449 hdev->adv_instance_cnt = 0;
2450 hdev->cur_adv_instance = 0x00;
2451 hdev->adv_instance_timeout = 0;
2452
2453 hdev->advmon_allowlist_duration = 300;
2454 hdev->advmon_no_filter_duration = 500;
2455 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2456
2457 hdev->sniff_max_interval = 800;
2458 hdev->sniff_min_interval = 80;
2459
2460 hdev->le_adv_channel_map = 0x07;
2461 hdev->le_adv_min_interval = 0x0800;
2462 hdev->le_adv_max_interval = 0x0800;
2463 hdev->le_scan_interval = 0x0060;
2464 hdev->le_scan_window = 0x0030;
2465 hdev->le_scan_int_suspend = 0x0400;
2466 hdev->le_scan_window_suspend = 0x0012;
2467 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2468 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2469 hdev->le_scan_int_adv_monitor = 0x0060;
2470 hdev->le_scan_window_adv_monitor = 0x0030;
2471 hdev->le_scan_int_connect = 0x0060;
2472 hdev->le_scan_window_connect = 0x0060;
2473 hdev->le_conn_min_interval = 0x0018;
2474 hdev->le_conn_max_interval = 0x0028;
2475 hdev->le_conn_latency = 0x0000;
2476 hdev->le_supv_timeout = 0x002a;
2477 hdev->le_def_tx_len = 0x001b;
2478 hdev->le_def_tx_time = 0x0148;
2479 hdev->le_max_tx_len = 0x001b;
2480 hdev->le_max_tx_time = 0x0148;
2481 hdev->le_max_rx_len = 0x001b;
2482 hdev->le_max_rx_time = 0x0148;
2483 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2484 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2485 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2486 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2487 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2488 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2489 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2490 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2491 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2492
2493 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2494 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2495 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2496 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2497 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2498 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2499
2500 /* default 1.28 sec page scan */
2501 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2502 hdev->def_page_scan_int = 0x0800;
2503 hdev->def_page_scan_window = 0x0012;
2504
2505 mutex_init(&hdev->lock);
2506 mutex_init(&hdev->req_lock);
2507
2508 INIT_LIST_HEAD(&hdev->mesh_pending);
2509 INIT_LIST_HEAD(&hdev->mgmt_pending);
2510 INIT_LIST_HEAD(&hdev->reject_list);
2511 INIT_LIST_HEAD(&hdev->accept_list);
2512 INIT_LIST_HEAD(&hdev->uuids);
2513 INIT_LIST_HEAD(&hdev->link_keys);
2514 INIT_LIST_HEAD(&hdev->long_term_keys);
2515 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2516 INIT_LIST_HEAD(&hdev->remote_oob_data);
2517 INIT_LIST_HEAD(&hdev->le_accept_list);
2518 INIT_LIST_HEAD(&hdev->le_resolv_list);
2519 INIT_LIST_HEAD(&hdev->le_conn_params);
2520 INIT_LIST_HEAD(&hdev->pend_le_conns);
2521 INIT_LIST_HEAD(&hdev->pend_le_reports);
2522 INIT_LIST_HEAD(&hdev->conn_hash.list);
2523 INIT_LIST_HEAD(&hdev->adv_instances);
2524 INIT_LIST_HEAD(&hdev->blocked_keys);
2525 INIT_LIST_HEAD(&hdev->monitored_devices);
2526
2527 INIT_LIST_HEAD(&hdev->local_codecs);
2528 INIT_WORK(&hdev->rx_work, hci_rx_work);
2529 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2530 INIT_WORK(&hdev->tx_work, hci_tx_work);
2531 INIT_WORK(&hdev->power_on, hci_power_on);
2532 INIT_WORK(&hdev->error_reset, hci_error_reset);
2533
2534 hci_cmd_sync_init(hdev);
2535
2536 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2537
2538 skb_queue_head_init(&hdev->rx_q);
2539 skb_queue_head_init(&hdev->cmd_q);
2540 skb_queue_head_init(&hdev->raw_q);
2541
2542 init_waitqueue_head(&hdev->req_wait_q);
2543
2544 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2545 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2546
2547 hci_request_setup(hdev);
2548
2549 hci_init_sysfs(hdev);
2550 discovery_init(hdev);
2551
2552 return hdev;
2553 }
2554 EXPORT_SYMBOL(hci_alloc_dev_priv);
2555
2556 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2557 void hci_free_dev(struct hci_dev *hdev)
2558 {
2559 /* will free via device release */
2560 put_device(&hdev->dev);
2561 }
2562 EXPORT_SYMBOL(hci_free_dev);
2563
2564 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2565 int hci_register_dev(struct hci_dev *hdev)
2566 {
2567 int id, error;
2568
2569 if (!hdev->open || !hdev->close || !hdev->send)
2570 return -EINVAL;
2571
2572 /* Do not allow HCI_AMP devices to register at index 0,
2573 * so the index can be used as the AMP controller ID.
2574 */
2575 switch (hdev->dev_type) {
2576 case HCI_PRIMARY:
2577 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2578 break;
2579 case HCI_AMP:
2580 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2581 break;
2582 default:
2583 return -EINVAL;
2584 }
2585
2586 if (id < 0)
2587 return id;
2588
2589 snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2590 hdev->id = id;
2591
2592 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2593
2594 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2595 if (!hdev->workqueue) {
2596 error = -ENOMEM;
2597 goto err;
2598 }
2599
2600 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2601 hdev->name);
2602 if (!hdev->req_workqueue) {
2603 destroy_workqueue(hdev->workqueue);
2604 error = -ENOMEM;
2605 goto err;
2606 }
2607
2608 if (!IS_ERR_OR_NULL(bt_debugfs))
2609 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2610
2611 dev_set_name(&hdev->dev, "%s", hdev->name);
2612
2613 error = device_add(&hdev->dev);
2614 if (error < 0)
2615 goto err_wqueue;
2616
2617 hci_leds_init(hdev);
2618
2619 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2620 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2621 hdev);
2622 if (hdev->rfkill) {
2623 if (rfkill_register(hdev->rfkill) < 0) {
2624 rfkill_destroy(hdev->rfkill);
2625 hdev->rfkill = NULL;
2626 }
2627 }
2628
2629 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2630 hci_dev_set_flag(hdev, HCI_RFKILLED);
2631
2632 hci_dev_set_flag(hdev, HCI_SETUP);
2633 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2634
2635 if (hdev->dev_type == HCI_PRIMARY) {
2636 /* Assume BR/EDR support until proven otherwise (such as
2637 * through reading supported features during init.
2638 */
2639 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2640 }
2641
2642 write_lock(&hci_dev_list_lock);
2643 list_add(&hdev->list, &hci_dev_list);
2644 write_unlock(&hci_dev_list_lock);
2645
2646 /* Devices that are marked for raw-only usage are unconfigured
2647 * and should not be included in normal operation.
2648 */
2649 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2650 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2651
2652 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2653 * callback.
2654 */
2655 if (hdev->wakeup)
2656 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2657
2658 hci_sock_dev_event(hdev, HCI_DEV_REG);
2659 hci_dev_hold(hdev);
2660
2661 error = hci_register_suspend_notifier(hdev);
2662 if (error)
2663 BT_WARN("register suspend notifier failed error:%d\n", error);
2664
2665 queue_work(hdev->req_workqueue, &hdev->power_on);
2666
2667 idr_init(&hdev->adv_monitors_idr);
2668 msft_register(hdev);
2669
2670 return id;
2671
2672 err_wqueue:
2673 debugfs_remove_recursive(hdev->debugfs);
2674 destroy_workqueue(hdev->workqueue);
2675 destroy_workqueue(hdev->req_workqueue);
2676 err:
2677 ida_simple_remove(&hci_index_ida, hdev->id);
2678
2679 return error;
2680 }
2681 EXPORT_SYMBOL(hci_register_dev);
2682
2683 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2684 void hci_unregister_dev(struct hci_dev *hdev)
2685 {
2686 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2687
2688 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2689
2690 write_lock(&hci_dev_list_lock);
2691 list_del(&hdev->list);
2692 write_unlock(&hci_dev_list_lock);
2693
2694 cancel_work_sync(&hdev->power_on);
2695
2696 hci_cmd_sync_clear(hdev);
2697
2698 hci_unregister_suspend_notifier(hdev);
2699
2700 msft_unregister(hdev);
2701
2702 hci_dev_do_close(hdev);
2703
2704 if (!test_bit(HCI_INIT, &hdev->flags) &&
2705 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2706 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2707 hci_dev_lock(hdev);
2708 mgmt_index_removed(hdev);
2709 hci_dev_unlock(hdev);
2710 }
2711
2712 /* mgmt_index_removed should take care of emptying the
2713 * pending list */
2714 BUG_ON(!list_empty(&hdev->mgmt_pending));
2715
2716 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2717
2718 if (hdev->rfkill) {
2719 rfkill_unregister(hdev->rfkill);
2720 rfkill_destroy(hdev->rfkill);
2721 }
2722
2723 device_del(&hdev->dev);
2724 /* Actual cleanup is deferred until hci_release_dev(). */
2725 hci_dev_put(hdev);
2726 }
2727 EXPORT_SYMBOL(hci_unregister_dev);
2728
2729 /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)2730 void hci_release_dev(struct hci_dev *hdev)
2731 {
2732 debugfs_remove_recursive(hdev->debugfs);
2733 kfree_const(hdev->hw_info);
2734 kfree_const(hdev->fw_info);
2735
2736 destroy_workqueue(hdev->workqueue);
2737 destroy_workqueue(hdev->req_workqueue);
2738
2739 hci_dev_lock(hdev);
2740 hci_bdaddr_list_clear(&hdev->reject_list);
2741 hci_bdaddr_list_clear(&hdev->accept_list);
2742 hci_uuids_clear(hdev);
2743 hci_link_keys_clear(hdev);
2744 hci_smp_ltks_clear(hdev);
2745 hci_smp_irks_clear(hdev);
2746 hci_remote_oob_data_clear(hdev);
2747 hci_adv_instances_clear(hdev);
2748 hci_adv_monitors_clear(hdev);
2749 hci_bdaddr_list_clear(&hdev->le_accept_list);
2750 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2751 hci_conn_params_clear_all(hdev);
2752 hci_discovery_filter_clear(hdev);
2753 hci_blocked_keys_clear(hdev);
2754 hci_dev_unlock(hdev);
2755
2756 ida_simple_remove(&hci_index_ida, hdev->id);
2757 kfree_skb(hdev->sent_cmd);
2758 kfree_skb(hdev->recv_event);
2759 kfree(hdev);
2760 }
2761 EXPORT_SYMBOL(hci_release_dev);
2762
hci_register_suspend_notifier(struct hci_dev * hdev)2763 int hci_register_suspend_notifier(struct hci_dev *hdev)
2764 {
2765 int ret = 0;
2766
2767 if (!hdev->suspend_notifier.notifier_call &&
2768 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2769 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2770 ret = register_pm_notifier(&hdev->suspend_notifier);
2771 }
2772
2773 return ret;
2774 }
2775
hci_unregister_suspend_notifier(struct hci_dev * hdev)2776 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2777 {
2778 int ret = 0;
2779
2780 if (hdev->suspend_notifier.notifier_call) {
2781 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2782 if (!ret)
2783 hdev->suspend_notifier.notifier_call = NULL;
2784 }
2785
2786 return ret;
2787 }
2788
2789 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2790 int hci_suspend_dev(struct hci_dev *hdev)
2791 {
2792 int ret;
2793
2794 bt_dev_dbg(hdev, "");
2795
2796 /* Suspend should only act on when powered. */
2797 if (!hdev_is_powered(hdev) ||
2798 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2799 return 0;
2800
2801 /* If powering down don't attempt to suspend */
2802 if (mgmt_powering_down(hdev))
2803 return 0;
2804
2805 hci_req_sync_lock(hdev);
2806 ret = hci_suspend_sync(hdev);
2807 hci_req_sync_unlock(hdev);
2808
2809 hci_clear_wake_reason(hdev);
2810 mgmt_suspending(hdev, hdev->suspend_state);
2811
2812 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2813 return ret;
2814 }
2815 EXPORT_SYMBOL(hci_suspend_dev);
2816
2817 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2818 int hci_resume_dev(struct hci_dev *hdev)
2819 {
2820 int ret;
2821
2822 bt_dev_dbg(hdev, "");
2823
2824 /* Resume should only act on when powered. */
2825 if (!hdev_is_powered(hdev) ||
2826 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2827 return 0;
2828
2829 /* If powering down don't attempt to resume */
2830 if (mgmt_powering_down(hdev))
2831 return 0;
2832
2833 hci_req_sync_lock(hdev);
2834 ret = hci_resume_sync(hdev);
2835 hci_req_sync_unlock(hdev);
2836
2837 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2838 hdev->wake_addr_type);
2839
2840 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2841 return ret;
2842 }
2843 EXPORT_SYMBOL(hci_resume_dev);
2844
2845 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)2846 int hci_reset_dev(struct hci_dev *hdev)
2847 {
2848 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2849 struct sk_buff *skb;
2850
2851 skb = bt_skb_alloc(3, GFP_ATOMIC);
2852 if (!skb)
2853 return -ENOMEM;
2854
2855 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2856 skb_put_data(skb, hw_err, 3);
2857
2858 bt_dev_err(hdev, "Injecting HCI hardware error event");
2859
2860 /* Send Hardware Error to upper stack */
2861 return hci_recv_frame(hdev, skb);
2862 }
2863 EXPORT_SYMBOL(hci_reset_dev);
2864
2865 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)2866 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2867 {
2868 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2869 && !test_bit(HCI_INIT, &hdev->flags))) {
2870 kfree_skb(skb);
2871 return -ENXIO;
2872 }
2873
2874 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
2875 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
2876 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
2877 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
2878 kfree_skb(skb);
2879 return -EINVAL;
2880 }
2881
2882 /* Incoming skb */
2883 bt_cb(skb)->incoming = 1;
2884
2885 /* Time stamp */
2886 __net_timestamp(skb);
2887
2888 skb_queue_tail(&hdev->rx_q, skb);
2889 queue_work(hdev->workqueue, &hdev->rx_work);
2890
2891 return 0;
2892 }
2893 EXPORT_SYMBOL(hci_recv_frame);
2894
2895 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)2896 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2897 {
2898 /* Mark as diagnostic packet */
2899 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2900
2901 /* Time stamp */
2902 __net_timestamp(skb);
2903
2904 skb_queue_tail(&hdev->rx_q, skb);
2905 queue_work(hdev->workqueue, &hdev->rx_work);
2906
2907 return 0;
2908 }
2909 EXPORT_SYMBOL(hci_recv_diag);
2910
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)2911 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2912 {
2913 va_list vargs;
2914
2915 va_start(vargs, fmt);
2916 kfree_const(hdev->hw_info);
2917 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2918 va_end(vargs);
2919 }
2920 EXPORT_SYMBOL(hci_set_hw_info);
2921
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)2922 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2923 {
2924 va_list vargs;
2925
2926 va_start(vargs, fmt);
2927 kfree_const(hdev->fw_info);
2928 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2929 va_end(vargs);
2930 }
2931 EXPORT_SYMBOL(hci_set_fw_info);
2932
2933 /* ---- Interface to upper protocols ---- */
2934
hci_register_cb(struct hci_cb * cb)2935 int hci_register_cb(struct hci_cb *cb)
2936 {
2937 BT_DBG("%p name %s", cb, cb->name);
2938
2939 mutex_lock(&hci_cb_list_lock);
2940 list_add_tail(&cb->list, &hci_cb_list);
2941 mutex_unlock(&hci_cb_list_lock);
2942
2943 return 0;
2944 }
2945 EXPORT_SYMBOL(hci_register_cb);
2946
hci_unregister_cb(struct hci_cb * cb)2947 int hci_unregister_cb(struct hci_cb *cb)
2948 {
2949 BT_DBG("%p name %s", cb, cb->name);
2950
2951 mutex_lock(&hci_cb_list_lock);
2952 list_del(&cb->list);
2953 mutex_unlock(&hci_cb_list_lock);
2954
2955 return 0;
2956 }
2957 EXPORT_SYMBOL(hci_unregister_cb);
2958
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)2959 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2960 {
2961 int err;
2962
2963 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2964 skb->len);
2965
2966 /* Time stamp */
2967 __net_timestamp(skb);
2968
2969 /* Send copy to monitor */
2970 hci_send_to_monitor(hdev, skb);
2971
2972 if (atomic_read(&hdev->promisc)) {
2973 /* Send copy to the sockets */
2974 hci_send_to_sock(hdev, skb);
2975 }
2976
2977 /* Get rid of skb owner, prior to sending to the driver. */
2978 skb_orphan(skb);
2979
2980 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
2981 kfree_skb(skb);
2982 return -EINVAL;
2983 }
2984
2985 err = hdev->send(hdev, skb);
2986 if (err < 0) {
2987 bt_dev_err(hdev, "sending frame failed (%d)", err);
2988 kfree_skb(skb);
2989 return err;
2990 }
2991
2992 return 0;
2993 }
2994
2995 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)2996 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2997 const void *param)
2998 {
2999 struct sk_buff *skb;
3000
3001 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3002
3003 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3004 if (!skb) {
3005 bt_dev_err(hdev, "no memory for command");
3006 return -ENOMEM;
3007 }
3008
3009 /* Stand-alone HCI commands must be flagged as
3010 * single-command requests.
3011 */
3012 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3013
3014 skb_queue_tail(&hdev->cmd_q, skb);
3015 queue_work(hdev->workqueue, &hdev->cmd_work);
3016
3017 return 0;
3018 }
3019
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3020 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3021 const void *param)
3022 {
3023 struct sk_buff *skb;
3024
3025 if (hci_opcode_ogf(opcode) != 0x3f) {
3026 /* A controller receiving a command shall respond with either
3027 * a Command Status Event or a Command Complete Event.
3028 * Therefore, all standard HCI commands must be sent via the
3029 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3030 * Some vendors do not comply with this rule for vendor-specific
3031 * commands and do not return any event. We want to support
3032 * unresponded commands for such cases only.
3033 */
3034 bt_dev_err(hdev, "unresponded command not supported");
3035 return -EINVAL;
3036 }
3037
3038 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3039 if (!skb) {
3040 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3041 opcode);
3042 return -ENOMEM;
3043 }
3044
3045 hci_send_frame(hdev, skb);
3046
3047 return 0;
3048 }
3049 EXPORT_SYMBOL(__hci_cmd_send);
3050
3051 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3052 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3053 {
3054 struct hci_command_hdr *hdr;
3055
3056 if (!hdev->sent_cmd)
3057 return NULL;
3058
3059 hdr = (void *) hdev->sent_cmd->data;
3060
3061 if (hdr->opcode != cpu_to_le16(opcode))
3062 return NULL;
3063
3064 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3065
3066 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3067 }
3068
3069 /* Get data from last received event */
hci_recv_event_data(struct hci_dev * hdev,__u8 event)3070 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3071 {
3072 struct hci_event_hdr *hdr;
3073 int offset;
3074
3075 if (!hdev->recv_event)
3076 return NULL;
3077
3078 hdr = (void *)hdev->recv_event->data;
3079 offset = sizeof(*hdr);
3080
3081 if (hdr->evt != event) {
3082 /* In case of LE metaevent check the subevent match */
3083 if (hdr->evt == HCI_EV_LE_META) {
3084 struct hci_ev_le_meta *ev;
3085
3086 ev = (void *)hdev->recv_event->data + offset;
3087 offset += sizeof(*ev);
3088 if (ev->subevent == event)
3089 goto found;
3090 }
3091 return NULL;
3092 }
3093
3094 found:
3095 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3096
3097 return hdev->recv_event->data + offset;
3098 }
3099
3100 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3101 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3102 {
3103 struct hci_acl_hdr *hdr;
3104 int len = skb->len;
3105
3106 skb_push(skb, HCI_ACL_HDR_SIZE);
3107 skb_reset_transport_header(skb);
3108 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3109 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3110 hdr->dlen = cpu_to_le16(len);
3111 }
3112
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3113 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3114 struct sk_buff *skb, __u16 flags)
3115 {
3116 struct hci_conn *conn = chan->conn;
3117 struct hci_dev *hdev = conn->hdev;
3118 struct sk_buff *list;
3119
3120 skb->len = skb_headlen(skb);
3121 skb->data_len = 0;
3122
3123 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3124
3125 switch (hdev->dev_type) {
3126 case HCI_PRIMARY:
3127 hci_add_acl_hdr(skb, conn->handle, flags);
3128 break;
3129 case HCI_AMP:
3130 hci_add_acl_hdr(skb, chan->handle, flags);
3131 break;
3132 default:
3133 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3134 return;
3135 }
3136
3137 list = skb_shinfo(skb)->frag_list;
3138 if (!list) {
3139 /* Non fragmented */
3140 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3141
3142 skb_queue_tail(queue, skb);
3143 } else {
3144 /* Fragmented */
3145 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3146
3147 skb_shinfo(skb)->frag_list = NULL;
3148
3149 /* Queue all fragments atomically. We need to use spin_lock_bh
3150 * here because of 6LoWPAN links, as there this function is
3151 * called from softirq and using normal spin lock could cause
3152 * deadlocks.
3153 */
3154 spin_lock_bh(&queue->lock);
3155
3156 __skb_queue_tail(queue, skb);
3157
3158 flags &= ~ACL_START;
3159 flags |= ACL_CONT;
3160 do {
3161 skb = list; list = list->next;
3162
3163 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3164 hci_add_acl_hdr(skb, conn->handle, flags);
3165
3166 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3167
3168 __skb_queue_tail(queue, skb);
3169 } while (list);
3170
3171 spin_unlock_bh(&queue->lock);
3172 }
3173 }
3174
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3175 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3176 {
3177 struct hci_dev *hdev = chan->conn->hdev;
3178
3179 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3180
3181 hci_queue_acl(chan, &chan->data_q, skb, flags);
3182
3183 queue_work(hdev->workqueue, &hdev->tx_work);
3184 }
3185
3186 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3187 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3188 {
3189 struct hci_dev *hdev = conn->hdev;
3190 struct hci_sco_hdr hdr;
3191
3192 BT_DBG("%s len %d", hdev->name, skb->len);
3193
3194 hdr.handle = cpu_to_le16(conn->handle);
3195 hdr.dlen = skb->len;
3196
3197 skb_push(skb, HCI_SCO_HDR_SIZE);
3198 skb_reset_transport_header(skb);
3199 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3200
3201 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3202
3203 skb_queue_tail(&conn->data_q, skb);
3204 queue_work(hdev->workqueue, &hdev->tx_work);
3205 }
3206
3207 /* Send ISO data */
hci_add_iso_hdr(struct sk_buff * skb,__u16 handle,__u8 flags)3208 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3209 {
3210 struct hci_iso_hdr *hdr;
3211 int len = skb->len;
3212
3213 skb_push(skb, HCI_ISO_HDR_SIZE);
3214 skb_reset_transport_header(skb);
3215 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3216 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3217 hdr->dlen = cpu_to_le16(len);
3218 }
3219
hci_queue_iso(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb)3220 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3221 struct sk_buff *skb)
3222 {
3223 struct hci_dev *hdev = conn->hdev;
3224 struct sk_buff *list;
3225 __u16 flags;
3226
3227 skb->len = skb_headlen(skb);
3228 skb->data_len = 0;
3229
3230 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3231
3232 list = skb_shinfo(skb)->frag_list;
3233
3234 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3235 hci_add_iso_hdr(skb, conn->handle, flags);
3236
3237 if (!list) {
3238 /* Non fragmented */
3239 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3240
3241 skb_queue_tail(queue, skb);
3242 } else {
3243 /* Fragmented */
3244 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3245
3246 skb_shinfo(skb)->frag_list = NULL;
3247
3248 __skb_queue_tail(queue, skb);
3249
3250 do {
3251 skb = list; list = list->next;
3252
3253 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3254 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3255 0x00);
3256 hci_add_iso_hdr(skb, conn->handle, flags);
3257
3258 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3259
3260 __skb_queue_tail(queue, skb);
3261 } while (list);
3262 }
3263 }
3264
hci_send_iso(struct hci_conn * conn,struct sk_buff * skb)3265 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3266 {
3267 struct hci_dev *hdev = conn->hdev;
3268
3269 BT_DBG("%s len %d", hdev->name, skb->len);
3270
3271 hci_queue_iso(conn, &conn->data_q, skb);
3272
3273 queue_work(hdev->workqueue, &hdev->tx_work);
3274 }
3275
3276 /* ---- HCI TX task (outgoing data) ---- */
3277
3278 /* HCI Connection scheduler */
hci_quote_sent(struct hci_conn * conn,int num,int * quote)3279 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3280 {
3281 struct hci_dev *hdev;
3282 int cnt, q;
3283
3284 if (!conn) {
3285 *quote = 0;
3286 return;
3287 }
3288
3289 hdev = conn->hdev;
3290
3291 switch (conn->type) {
3292 case ACL_LINK:
3293 cnt = hdev->acl_cnt;
3294 break;
3295 case AMP_LINK:
3296 cnt = hdev->block_cnt;
3297 break;
3298 case SCO_LINK:
3299 case ESCO_LINK:
3300 cnt = hdev->sco_cnt;
3301 break;
3302 case LE_LINK:
3303 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3304 break;
3305 case ISO_LINK:
3306 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3307 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3308 break;
3309 default:
3310 cnt = 0;
3311 bt_dev_err(hdev, "unknown link type %d", conn->type);
3312 }
3313
3314 q = cnt / num;
3315 *quote = q ? q : 1;
3316 }
3317
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3318 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3319 int *quote)
3320 {
3321 struct hci_conn_hash *h = &hdev->conn_hash;
3322 struct hci_conn *conn = NULL, *c;
3323 unsigned int num = 0, min = ~0;
3324
3325 /* We don't have to lock device here. Connections are always
3326 * added and removed with TX task disabled. */
3327
3328 rcu_read_lock();
3329
3330 list_for_each_entry_rcu(c, &h->list, list) {
3331 if (c->type != type || skb_queue_empty(&c->data_q))
3332 continue;
3333
3334 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3335 continue;
3336
3337 num++;
3338
3339 if (c->sent < min) {
3340 min = c->sent;
3341 conn = c;
3342 }
3343
3344 if (hci_conn_num(hdev, type) == num)
3345 break;
3346 }
3347
3348 rcu_read_unlock();
3349
3350 hci_quote_sent(conn, num, quote);
3351
3352 BT_DBG("conn %p quote %d", conn, *quote);
3353 return conn;
3354 }
3355
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3356 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3357 {
3358 struct hci_conn_hash *h = &hdev->conn_hash;
3359 struct hci_conn *c;
3360
3361 bt_dev_err(hdev, "link tx timeout");
3362
3363 rcu_read_lock();
3364
3365 /* Kill stalled connections */
3366 list_for_each_entry_rcu(c, &h->list, list) {
3367 if (c->type == type && c->sent) {
3368 bt_dev_err(hdev, "killing stalled connection %pMR",
3369 &c->dst);
3370 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3371 }
3372 }
3373
3374 rcu_read_unlock();
3375 }
3376
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3377 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3378 int *quote)
3379 {
3380 struct hci_conn_hash *h = &hdev->conn_hash;
3381 struct hci_chan *chan = NULL;
3382 unsigned int num = 0, min = ~0, cur_prio = 0;
3383 struct hci_conn *conn;
3384 int conn_num = 0;
3385
3386 BT_DBG("%s", hdev->name);
3387
3388 rcu_read_lock();
3389
3390 list_for_each_entry_rcu(conn, &h->list, list) {
3391 struct hci_chan *tmp;
3392
3393 if (conn->type != type)
3394 continue;
3395
3396 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3397 continue;
3398
3399 conn_num++;
3400
3401 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3402 struct sk_buff *skb;
3403
3404 if (skb_queue_empty(&tmp->data_q))
3405 continue;
3406
3407 skb = skb_peek(&tmp->data_q);
3408 if (skb->priority < cur_prio)
3409 continue;
3410
3411 if (skb->priority > cur_prio) {
3412 num = 0;
3413 min = ~0;
3414 cur_prio = skb->priority;
3415 }
3416
3417 num++;
3418
3419 if (conn->sent < min) {
3420 min = conn->sent;
3421 chan = tmp;
3422 }
3423 }
3424
3425 if (hci_conn_num(hdev, type) == conn_num)
3426 break;
3427 }
3428
3429 rcu_read_unlock();
3430
3431 if (!chan)
3432 return NULL;
3433
3434 hci_quote_sent(chan->conn, num, quote);
3435
3436 BT_DBG("chan %p quote %d", chan, *quote);
3437 return chan;
3438 }
3439
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3440 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3441 {
3442 struct hci_conn_hash *h = &hdev->conn_hash;
3443 struct hci_conn *conn;
3444 int num = 0;
3445
3446 BT_DBG("%s", hdev->name);
3447
3448 rcu_read_lock();
3449
3450 list_for_each_entry_rcu(conn, &h->list, list) {
3451 struct hci_chan *chan;
3452
3453 if (conn->type != type)
3454 continue;
3455
3456 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3457 continue;
3458
3459 num++;
3460
3461 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3462 struct sk_buff *skb;
3463
3464 if (chan->sent) {
3465 chan->sent = 0;
3466 continue;
3467 }
3468
3469 if (skb_queue_empty(&chan->data_q))
3470 continue;
3471
3472 skb = skb_peek(&chan->data_q);
3473 if (skb->priority >= HCI_PRIO_MAX - 1)
3474 continue;
3475
3476 skb->priority = HCI_PRIO_MAX - 1;
3477
3478 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3479 skb->priority);
3480 }
3481
3482 if (hci_conn_num(hdev, type) == num)
3483 break;
3484 }
3485
3486 rcu_read_unlock();
3487
3488 }
3489
__get_blocks(struct hci_dev * hdev,struct sk_buff * skb)3490 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3491 {
3492 /* Calculate count of blocks used by this packet */
3493 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3494 }
3495
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)3496 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3497 {
3498 unsigned long last_tx;
3499
3500 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3501 return;
3502
3503 switch (type) {
3504 case LE_LINK:
3505 last_tx = hdev->le_last_tx;
3506 break;
3507 default:
3508 last_tx = hdev->acl_last_tx;
3509 break;
3510 }
3511
3512 /* tx timeout must be longer than maximum link supervision timeout
3513 * (40.9 seconds)
3514 */
3515 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3516 hci_link_tx_to(hdev, type);
3517 }
3518
3519 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)3520 static void hci_sched_sco(struct hci_dev *hdev)
3521 {
3522 struct hci_conn *conn;
3523 struct sk_buff *skb;
3524 int quote;
3525
3526 BT_DBG("%s", hdev->name);
3527
3528 if (!hci_conn_num(hdev, SCO_LINK))
3529 return;
3530
3531 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3532 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3533 BT_DBG("skb %p len %d", skb, skb->len);
3534 hci_send_frame(hdev, skb);
3535
3536 conn->sent++;
3537 if (conn->sent == ~0)
3538 conn->sent = 0;
3539 }
3540 }
3541 }
3542
hci_sched_esco(struct hci_dev * hdev)3543 static void hci_sched_esco(struct hci_dev *hdev)
3544 {
3545 struct hci_conn *conn;
3546 struct sk_buff *skb;
3547 int quote;
3548
3549 BT_DBG("%s", hdev->name);
3550
3551 if (!hci_conn_num(hdev, ESCO_LINK))
3552 return;
3553
3554 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3555 "e))) {
3556 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3557 BT_DBG("skb %p len %d", skb, skb->len);
3558 hci_send_frame(hdev, skb);
3559
3560 conn->sent++;
3561 if (conn->sent == ~0)
3562 conn->sent = 0;
3563 }
3564 }
3565 }
3566
hci_sched_acl_pkt(struct hci_dev * hdev)3567 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3568 {
3569 unsigned int cnt = hdev->acl_cnt;
3570 struct hci_chan *chan;
3571 struct sk_buff *skb;
3572 int quote;
3573
3574 __check_timeout(hdev, cnt, ACL_LINK);
3575
3576 while (hdev->acl_cnt &&
3577 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3578 u32 priority = (skb_peek(&chan->data_q))->priority;
3579 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3580 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3581 skb->len, skb->priority);
3582
3583 /* Stop if priority has changed */
3584 if (skb->priority < priority)
3585 break;
3586
3587 skb = skb_dequeue(&chan->data_q);
3588
3589 hci_conn_enter_active_mode(chan->conn,
3590 bt_cb(skb)->force_active);
3591
3592 hci_send_frame(hdev, skb);
3593 hdev->acl_last_tx = jiffies;
3594
3595 hdev->acl_cnt--;
3596 chan->sent++;
3597 chan->conn->sent++;
3598
3599 /* Send pending SCO packets right away */
3600 hci_sched_sco(hdev);
3601 hci_sched_esco(hdev);
3602 }
3603 }
3604
3605 if (cnt != hdev->acl_cnt)
3606 hci_prio_recalculate(hdev, ACL_LINK);
3607 }
3608
hci_sched_acl_blk(struct hci_dev * hdev)3609 static void hci_sched_acl_blk(struct hci_dev *hdev)
3610 {
3611 unsigned int cnt = hdev->block_cnt;
3612 struct hci_chan *chan;
3613 struct sk_buff *skb;
3614 int quote;
3615 u8 type;
3616
3617 BT_DBG("%s", hdev->name);
3618
3619 if (hdev->dev_type == HCI_AMP)
3620 type = AMP_LINK;
3621 else
3622 type = ACL_LINK;
3623
3624 __check_timeout(hdev, cnt, type);
3625
3626 while (hdev->block_cnt > 0 &&
3627 (chan = hci_chan_sent(hdev, type, "e))) {
3628 u32 priority = (skb_peek(&chan->data_q))->priority;
3629 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3630 int blocks;
3631
3632 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3633 skb->len, skb->priority);
3634
3635 /* Stop if priority has changed */
3636 if (skb->priority < priority)
3637 break;
3638
3639 skb = skb_dequeue(&chan->data_q);
3640
3641 blocks = __get_blocks(hdev, skb);
3642 if (blocks > hdev->block_cnt)
3643 return;
3644
3645 hci_conn_enter_active_mode(chan->conn,
3646 bt_cb(skb)->force_active);
3647
3648 hci_send_frame(hdev, skb);
3649 hdev->acl_last_tx = jiffies;
3650
3651 hdev->block_cnt -= blocks;
3652 quote -= blocks;
3653
3654 chan->sent += blocks;
3655 chan->conn->sent += blocks;
3656 }
3657 }
3658
3659 if (cnt != hdev->block_cnt)
3660 hci_prio_recalculate(hdev, type);
3661 }
3662
hci_sched_acl(struct hci_dev * hdev)3663 static void hci_sched_acl(struct hci_dev *hdev)
3664 {
3665 BT_DBG("%s", hdev->name);
3666
3667 /* No ACL link over BR/EDR controller */
3668 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3669 return;
3670
3671 /* No AMP link over AMP controller */
3672 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3673 return;
3674
3675 switch (hdev->flow_ctl_mode) {
3676 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3677 hci_sched_acl_pkt(hdev);
3678 break;
3679
3680 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3681 hci_sched_acl_blk(hdev);
3682 break;
3683 }
3684 }
3685
hci_sched_le(struct hci_dev * hdev)3686 static void hci_sched_le(struct hci_dev *hdev)
3687 {
3688 struct hci_chan *chan;
3689 struct sk_buff *skb;
3690 int quote, cnt, tmp;
3691
3692 BT_DBG("%s", hdev->name);
3693
3694 if (!hci_conn_num(hdev, LE_LINK))
3695 return;
3696
3697 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3698
3699 __check_timeout(hdev, cnt, LE_LINK);
3700
3701 tmp = cnt;
3702 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3703 u32 priority = (skb_peek(&chan->data_q))->priority;
3704 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3705 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3706 skb->len, skb->priority);
3707
3708 /* Stop if priority has changed */
3709 if (skb->priority < priority)
3710 break;
3711
3712 skb = skb_dequeue(&chan->data_q);
3713
3714 hci_send_frame(hdev, skb);
3715 hdev->le_last_tx = jiffies;
3716
3717 cnt--;
3718 chan->sent++;
3719 chan->conn->sent++;
3720
3721 /* Send pending SCO packets right away */
3722 hci_sched_sco(hdev);
3723 hci_sched_esco(hdev);
3724 }
3725 }
3726
3727 if (hdev->le_pkts)
3728 hdev->le_cnt = cnt;
3729 else
3730 hdev->acl_cnt = cnt;
3731
3732 if (cnt != tmp)
3733 hci_prio_recalculate(hdev, LE_LINK);
3734 }
3735
3736 /* Schedule CIS */
hci_sched_iso(struct hci_dev * hdev)3737 static void hci_sched_iso(struct hci_dev *hdev)
3738 {
3739 struct hci_conn *conn;
3740 struct sk_buff *skb;
3741 int quote, *cnt;
3742
3743 BT_DBG("%s", hdev->name);
3744
3745 if (!hci_conn_num(hdev, ISO_LINK))
3746 return;
3747
3748 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3749 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3750 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) {
3751 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3752 BT_DBG("skb %p len %d", skb, skb->len);
3753 hci_send_frame(hdev, skb);
3754
3755 conn->sent++;
3756 if (conn->sent == ~0)
3757 conn->sent = 0;
3758 (*cnt)--;
3759 }
3760 }
3761 }
3762
hci_tx_work(struct work_struct * work)3763 static void hci_tx_work(struct work_struct *work)
3764 {
3765 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3766 struct sk_buff *skb;
3767
3768 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3769 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3770
3771 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3772 /* Schedule queues and send stuff to HCI driver */
3773 hci_sched_sco(hdev);
3774 hci_sched_esco(hdev);
3775 hci_sched_iso(hdev);
3776 hci_sched_acl(hdev);
3777 hci_sched_le(hdev);
3778 }
3779
3780 /* Send next queued raw (unknown type) packet */
3781 while ((skb = skb_dequeue(&hdev->raw_q)))
3782 hci_send_frame(hdev, skb);
3783 }
3784
3785 /* ----- HCI RX task (incoming data processing) ----- */
3786
3787 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3788 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3789 {
3790 struct hci_acl_hdr *hdr = (void *) skb->data;
3791 struct hci_conn *conn;
3792 __u16 handle, flags;
3793
3794 skb_pull(skb, HCI_ACL_HDR_SIZE);
3795
3796 handle = __le16_to_cpu(hdr->handle);
3797 flags = hci_flags(handle);
3798 handle = hci_handle(handle);
3799
3800 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3801 handle, flags);
3802
3803 hdev->stat.acl_rx++;
3804
3805 hci_dev_lock(hdev);
3806 conn = hci_conn_hash_lookup_handle(hdev, handle);
3807 hci_dev_unlock(hdev);
3808
3809 if (conn) {
3810 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3811
3812 /* Send to upper protocol */
3813 l2cap_recv_acldata(conn, skb, flags);
3814 return;
3815 } else {
3816 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3817 handle);
3818 }
3819
3820 kfree_skb(skb);
3821 }
3822
3823 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3824 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3825 {
3826 struct hci_sco_hdr *hdr = (void *) skb->data;
3827 struct hci_conn *conn;
3828 __u16 handle, flags;
3829
3830 skb_pull(skb, HCI_SCO_HDR_SIZE);
3831
3832 handle = __le16_to_cpu(hdr->handle);
3833 flags = hci_flags(handle);
3834 handle = hci_handle(handle);
3835
3836 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3837 handle, flags);
3838
3839 hdev->stat.sco_rx++;
3840
3841 hci_dev_lock(hdev);
3842 conn = hci_conn_hash_lookup_handle(hdev, handle);
3843 hci_dev_unlock(hdev);
3844
3845 if (conn) {
3846 /* Send to upper protocol */
3847 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3848 sco_recv_scodata(conn, skb);
3849 return;
3850 } else {
3851 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3852 handle);
3853 }
3854
3855 kfree_skb(skb);
3856 }
3857
hci_isodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3858 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3859 {
3860 struct hci_iso_hdr *hdr;
3861 struct hci_conn *conn;
3862 __u16 handle, flags;
3863
3864 hdr = skb_pull_data(skb, sizeof(*hdr));
3865 if (!hdr) {
3866 bt_dev_err(hdev, "ISO packet too small");
3867 goto drop;
3868 }
3869
3870 handle = __le16_to_cpu(hdr->handle);
3871 flags = hci_flags(handle);
3872 handle = hci_handle(handle);
3873
3874 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3875 handle, flags);
3876
3877 hci_dev_lock(hdev);
3878 conn = hci_conn_hash_lookup_handle(hdev, handle);
3879 hci_dev_unlock(hdev);
3880
3881 if (!conn) {
3882 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3883 handle);
3884 goto drop;
3885 }
3886
3887 /* Send to upper protocol */
3888 iso_recv(conn, skb, flags);
3889 return;
3890
3891 drop:
3892 kfree_skb(skb);
3893 }
3894
hci_req_is_complete(struct hci_dev * hdev)3895 static bool hci_req_is_complete(struct hci_dev *hdev)
3896 {
3897 struct sk_buff *skb;
3898
3899 skb = skb_peek(&hdev->cmd_q);
3900 if (!skb)
3901 return true;
3902
3903 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3904 }
3905
hci_resend_last(struct hci_dev * hdev)3906 static void hci_resend_last(struct hci_dev *hdev)
3907 {
3908 struct hci_command_hdr *sent;
3909 struct sk_buff *skb;
3910 u16 opcode;
3911
3912 if (!hdev->sent_cmd)
3913 return;
3914
3915 sent = (void *) hdev->sent_cmd->data;
3916 opcode = __le16_to_cpu(sent->opcode);
3917 if (opcode == HCI_OP_RESET)
3918 return;
3919
3920 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3921 if (!skb)
3922 return;
3923
3924 skb_queue_head(&hdev->cmd_q, skb);
3925 queue_work(hdev->workqueue, &hdev->cmd_work);
3926 }
3927
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3928 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3929 hci_req_complete_t *req_complete,
3930 hci_req_complete_skb_t *req_complete_skb)
3931 {
3932 struct sk_buff *skb;
3933 unsigned long flags;
3934
3935 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3936
3937 /* If the completed command doesn't match the last one that was
3938 * sent we need to do special handling of it.
3939 */
3940 if (!hci_sent_cmd_data(hdev, opcode)) {
3941 /* Some CSR based controllers generate a spontaneous
3942 * reset complete event during init and any pending
3943 * command will never be completed. In such a case we
3944 * need to resend whatever was the last sent
3945 * command.
3946 */
3947 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3948 hci_resend_last(hdev);
3949
3950 return;
3951 }
3952
3953 /* If we reach this point this event matches the last command sent */
3954 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3955
3956 /* If the command succeeded and there's still more commands in
3957 * this request the request is not yet complete.
3958 */
3959 if (!status && !hci_req_is_complete(hdev))
3960 return;
3961
3962 /* If this was the last command in a request the complete
3963 * callback would be found in hdev->sent_cmd instead of the
3964 * command queue (hdev->cmd_q).
3965 */
3966 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
3967 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
3968 return;
3969 }
3970
3971 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
3972 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
3973 return;
3974 }
3975
3976 /* Remove all pending commands belonging to this request */
3977 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3978 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3979 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3980 __skb_queue_head(&hdev->cmd_q, skb);
3981 break;
3982 }
3983
3984 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3985 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3986 else
3987 *req_complete = bt_cb(skb)->hci.req_complete;
3988 dev_kfree_skb_irq(skb);
3989 }
3990 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3991 }
3992
hci_rx_work(struct work_struct * work)3993 static void hci_rx_work(struct work_struct *work)
3994 {
3995 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3996 struct sk_buff *skb;
3997
3998 BT_DBG("%s", hdev->name);
3999
4000 /* The kcov_remote functions used for collecting packet parsing
4001 * coverage information from this background thread and associate
4002 * the coverage with the syscall's thread which originally injected
4003 * the packet. This helps fuzzing the kernel.
4004 */
4005 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4006 kcov_remote_start_common(skb_get_kcov_handle(skb));
4007
4008 /* Send copy to monitor */
4009 hci_send_to_monitor(hdev, skb);
4010
4011 if (atomic_read(&hdev->promisc)) {
4012 /* Send copy to the sockets */
4013 hci_send_to_sock(hdev, skb);
4014 }
4015
4016 /* If the device has been opened in HCI_USER_CHANNEL,
4017 * the userspace has exclusive access to device.
4018 * When device is HCI_INIT, we still need to process
4019 * the data packets to the driver in order
4020 * to complete its setup().
4021 */
4022 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4023 !test_bit(HCI_INIT, &hdev->flags)) {
4024 kfree_skb(skb);
4025 continue;
4026 }
4027
4028 if (test_bit(HCI_INIT, &hdev->flags)) {
4029 /* Don't process data packets in this states. */
4030 switch (hci_skb_pkt_type(skb)) {
4031 case HCI_ACLDATA_PKT:
4032 case HCI_SCODATA_PKT:
4033 case HCI_ISODATA_PKT:
4034 kfree_skb(skb);
4035 continue;
4036 }
4037 }
4038
4039 /* Process frame */
4040 switch (hci_skb_pkt_type(skb)) {
4041 case HCI_EVENT_PKT:
4042 BT_DBG("%s Event packet", hdev->name);
4043 hci_event_packet(hdev, skb);
4044 break;
4045
4046 case HCI_ACLDATA_PKT:
4047 BT_DBG("%s ACL data packet", hdev->name);
4048 hci_acldata_packet(hdev, skb);
4049 break;
4050
4051 case HCI_SCODATA_PKT:
4052 BT_DBG("%s SCO data packet", hdev->name);
4053 hci_scodata_packet(hdev, skb);
4054 break;
4055
4056 case HCI_ISODATA_PKT:
4057 BT_DBG("%s ISO data packet", hdev->name);
4058 hci_isodata_packet(hdev, skb);
4059 break;
4060
4061 default:
4062 kfree_skb(skb);
4063 break;
4064 }
4065 }
4066 }
4067
hci_cmd_work(struct work_struct * work)4068 static void hci_cmd_work(struct work_struct *work)
4069 {
4070 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4071 struct sk_buff *skb;
4072
4073 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4074 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4075
4076 /* Send queued commands */
4077 if (atomic_read(&hdev->cmd_cnt)) {
4078 skb = skb_dequeue(&hdev->cmd_q);
4079 if (!skb)
4080 return;
4081
4082 kfree_skb(hdev->sent_cmd);
4083
4084 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4085 if (hdev->sent_cmd) {
4086 int res;
4087 if (hci_req_status_pend(hdev))
4088 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4089 atomic_dec(&hdev->cmd_cnt);
4090
4091 res = hci_send_frame(hdev, skb);
4092 if (res < 0)
4093 __hci_cmd_sync_cancel(hdev, -res);
4094
4095 rcu_read_lock();
4096 if (test_bit(HCI_RESET, &hdev->flags) ||
4097 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4098 cancel_delayed_work(&hdev->cmd_timer);
4099 else
4100 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4101 HCI_CMD_TIMEOUT);
4102 rcu_read_unlock();
4103 } else {
4104 skb_queue_head(&hdev->cmd_q, skb);
4105 queue_work(hdev->workqueue, &hdev->cmd_work);
4106 }
4107 }
4108 }
4109