1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
50
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53
54 #define AUTO_OFF_TIMEOUT 2000
55
56 static void hci_rx_work(struct work_struct *work);
57 static void hci_cmd_work(struct work_struct *work);
58 static void hci_tx_work(struct work_struct *work);
59
60 /* HCI device list */
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
63
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
67
68 /* ---- HCI notifications ---- */
69
hci_notify(struct hci_dev * hdev,int event)70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72 hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI requests ---- */
76
hci_req_complete(struct hci_dev * hdev,__u16 cmd,int result)77 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
78 {
79 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
80
81 /* If this is the init phase check if the completed command matches
82 * the last init command, and if not just return.
83 */
84 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
86 struct sk_buff *skb;
87
88 /* Some CSR based controllers generate a spontaneous
89 * reset complete event during init and any pending
90 * command will never be completed. In such a case we
91 * need to resend whatever was the last sent
92 * command.
93 */
94
95 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
96 return;
97
98 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
99 if (skb) {
100 skb_queue_head(&hdev->cmd_q, skb);
101 queue_work(hdev->workqueue, &hdev->cmd_work);
102 }
103
104 return;
105 }
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112 }
113
hci_req_cancel(struct hci_dev * hdev,int err)114 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 {
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123 }
124
125 /* Execute request and wait for completion. */
__hci_request(struct hci_dev * hdev,void (* req)(struct hci_dev * hdev,unsigned long opt),unsigned long opt,__u32 timeout)126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127 unsigned long opt, __u32 timeout)
128 {
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_status = hdev->req_result = 0;
162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166 }
167
hci_request(struct hci_dev * hdev,void (* req)(struct hci_dev * hdev,unsigned long opt),unsigned long opt,__u32 timeout)168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 unsigned long opt, __u32 timeout)
170 {
171 int ret;
172
173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182 }
183
hci_reset_req(struct hci_dev * hdev,unsigned long opt)184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 {
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
189 set_bit(HCI_RESET, &hdev->flags);
190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
191 }
192
bredr_init(struct hci_dev * hdev)193 static void bredr_init(struct hci_dev *hdev)
194 {
195 struct hci_cp_delete_stored_link_key cp;
196 __le16 param;
197 __u8 flt_type;
198
199 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
200
201 /* Mandatory initialization */
202
203 /* Reset */
204 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
205 set_bit(HCI_RESET, &hdev->flags);
206 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
207 }
208
209 /* Read Local Supported Features */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
211
212 /* Read Local Version */
213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214
215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
217
218 /* Read BD Address */
219 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
220
221 /* Read Class of Device */
222 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
223
224 /* Read Local Name */
225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
226
227 /* Read Voice Setting */
228 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
229
230 /* Optional initialization */
231
232 /* Clear Event Filters */
233 flt_type = HCI_FLT_CLEAR_ALL;
234 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
235
236 /* Connection accept timeout ~20 secs */
237 param = cpu_to_le16(0x7d00);
238 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
239
240 bacpy(&cp.bdaddr, BDADDR_ANY);
241 cp.delete_all = 1;
242 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
243 }
244
amp_init(struct hci_dev * hdev)245 static void amp_init(struct hci_dev *hdev)
246 {
247 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
248
249 /* Reset */
250 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
251
252 /* Read Local Version */
253 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
254 }
255
hci_init_req(struct hci_dev * hdev,unsigned long opt)256 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
257 {
258 struct sk_buff *skb;
259
260 BT_DBG("%s %ld", hdev->name, opt);
261
262 /* Driver initialization */
263
264 /* Special commands */
265 while ((skb = skb_dequeue(&hdev->driver_init))) {
266 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
267 skb->dev = (void *) hdev;
268
269 skb_queue_tail(&hdev->cmd_q, skb);
270 queue_work(hdev->workqueue, &hdev->cmd_work);
271 }
272 skb_queue_purge(&hdev->driver_init);
273
274 switch (hdev->dev_type) {
275 case HCI_BREDR:
276 bredr_init(hdev);
277 break;
278
279 case HCI_AMP:
280 amp_init(hdev);
281 break;
282
283 default:
284 BT_ERR("Unknown device type %d", hdev->dev_type);
285 break;
286 }
287
288 }
289
hci_le_init_req(struct hci_dev * hdev,unsigned long opt)290 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
291 {
292 BT_DBG("%s", hdev->name);
293
294 /* Read LE buffer size */
295 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
296 }
297
hci_scan_req(struct hci_dev * hdev,unsigned long opt)298 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
299 {
300 __u8 scan = opt;
301
302 BT_DBG("%s %x", hdev->name, scan);
303
304 /* Inquiry and Page scans */
305 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
306 }
307
hci_auth_req(struct hci_dev * hdev,unsigned long opt)308 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
309 {
310 __u8 auth = opt;
311
312 BT_DBG("%s %x", hdev->name, auth);
313
314 /* Authentication */
315 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
316 }
317
hci_encrypt_req(struct hci_dev * hdev,unsigned long opt)318 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
319 {
320 __u8 encrypt = opt;
321
322 BT_DBG("%s %x", hdev->name, encrypt);
323
324 /* Encryption */
325 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
326 }
327
hci_linkpol_req(struct hci_dev * hdev,unsigned long opt)328 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
329 {
330 __le16 policy = cpu_to_le16(opt);
331
332 BT_DBG("%s %x", hdev->name, policy);
333
334 /* Default link policy */
335 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
336 }
337
338 /* Get HCI device by index.
339 * Device is held on return. */
hci_dev_get(int index)340 struct hci_dev *hci_dev_get(int index)
341 {
342 struct hci_dev *hdev = NULL, *d;
343
344 BT_DBG("%d", index);
345
346 if (index < 0)
347 return NULL;
348
349 read_lock(&hci_dev_list_lock);
350 list_for_each_entry(d, &hci_dev_list, list) {
351 if (d->id == index) {
352 hdev = hci_dev_hold(d);
353 break;
354 }
355 }
356 read_unlock(&hci_dev_list_lock);
357 return hdev;
358 }
359
360 /* ---- Inquiry support ---- */
361
hci_discovery_active(struct hci_dev * hdev)362 bool hci_discovery_active(struct hci_dev *hdev)
363 {
364 struct discovery_state *discov = &hdev->discovery;
365
366 switch (discov->state) {
367 case DISCOVERY_FINDING:
368 case DISCOVERY_RESOLVING:
369 return true;
370
371 default:
372 return false;
373 }
374 }
375
hci_discovery_set_state(struct hci_dev * hdev,int state)376 void hci_discovery_set_state(struct hci_dev *hdev, int state)
377 {
378 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
379
380 if (hdev->discovery.state == state)
381 return;
382
383 switch (state) {
384 case DISCOVERY_STOPPED:
385 if (hdev->discovery.state != DISCOVERY_STARTING)
386 mgmt_discovering(hdev, 0);
387 hdev->discovery.type = 0;
388 break;
389 case DISCOVERY_STARTING:
390 break;
391 case DISCOVERY_FINDING:
392 mgmt_discovering(hdev, 1);
393 break;
394 case DISCOVERY_RESOLVING:
395 break;
396 case DISCOVERY_STOPPING:
397 break;
398 }
399
400 hdev->discovery.state = state;
401 }
402
inquiry_cache_flush(struct hci_dev * hdev)403 static void inquiry_cache_flush(struct hci_dev *hdev)
404 {
405 struct discovery_state *cache = &hdev->discovery;
406 struct inquiry_entry *p, *n;
407
408 list_for_each_entry_safe(p, n, &cache->all, all) {
409 list_del(&p->all);
410 kfree(p);
411 }
412
413 INIT_LIST_HEAD(&cache->unknown);
414 INIT_LIST_HEAD(&cache->resolve);
415 }
416
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)417 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
418 {
419 struct discovery_state *cache = &hdev->discovery;
420 struct inquiry_entry *e;
421
422 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
423
424 list_for_each_entry(e, &cache->all, all) {
425 if (!bacmp(&e->data.bdaddr, bdaddr))
426 return e;
427 }
428
429 return NULL;
430 }
431
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)432 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
433 bdaddr_t *bdaddr)
434 {
435 struct discovery_state *cache = &hdev->discovery;
436 struct inquiry_entry *e;
437
438 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
439
440 list_for_each_entry(e, &cache->unknown, list) {
441 if (!bacmp(&e->data.bdaddr, bdaddr))
442 return e;
443 }
444
445 return NULL;
446 }
447
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)448 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
449 bdaddr_t *bdaddr,
450 int state)
451 {
452 struct discovery_state *cache = &hdev->discovery;
453 struct inquiry_entry *e;
454
455 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
456
457 list_for_each_entry(e, &cache->resolve, list) {
458 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
459 return e;
460 if (!bacmp(&e->data.bdaddr, bdaddr))
461 return e;
462 }
463
464 return NULL;
465 }
466
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)467 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
468 struct inquiry_entry *ie)
469 {
470 struct discovery_state *cache = &hdev->discovery;
471 struct list_head *pos = &cache->resolve;
472 struct inquiry_entry *p;
473
474 list_del(&ie->list);
475
476 list_for_each_entry(p, &cache->resolve, list) {
477 if (p->name_state != NAME_PENDING &&
478 abs(p->data.rssi) >= abs(ie->data.rssi))
479 break;
480 pos = &p->list;
481 }
482
483 list_add(&ie->list, pos);
484 }
485
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known,bool * ssp)486 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
487 bool name_known, bool *ssp)
488 {
489 struct discovery_state *cache = &hdev->discovery;
490 struct inquiry_entry *ie;
491
492 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
493
494 if (ssp)
495 *ssp = data->ssp_mode;
496
497 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
498 if (ie) {
499 if (ie->data.ssp_mode && ssp)
500 *ssp = true;
501
502 if (ie->name_state == NAME_NEEDED &&
503 data->rssi != ie->data.rssi) {
504 ie->data.rssi = data->rssi;
505 hci_inquiry_cache_update_resolve(hdev, ie);
506 }
507
508 goto update;
509 }
510
511 /* Entry not in the cache. Add new one. */
512 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
513 if (!ie)
514 return false;
515
516 list_add(&ie->all, &cache->all);
517
518 if (name_known) {
519 ie->name_state = NAME_KNOWN;
520 } else {
521 ie->name_state = NAME_NOT_KNOWN;
522 list_add(&ie->list, &cache->unknown);
523 }
524
525 update:
526 if (name_known && ie->name_state != NAME_KNOWN &&
527 ie->name_state != NAME_PENDING) {
528 ie->name_state = NAME_KNOWN;
529 list_del(&ie->list);
530 }
531
532 memcpy(&ie->data, data, sizeof(*data));
533 ie->timestamp = jiffies;
534 cache->timestamp = jiffies;
535
536 if (ie->name_state == NAME_NOT_KNOWN)
537 return false;
538
539 return true;
540 }
541
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)542 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
543 {
544 struct discovery_state *cache = &hdev->discovery;
545 struct inquiry_info *info = (struct inquiry_info *) buf;
546 struct inquiry_entry *e;
547 int copied = 0;
548
549 list_for_each_entry(e, &cache->all, all) {
550 struct inquiry_data *data = &e->data;
551
552 if (copied >= num)
553 break;
554
555 bacpy(&info->bdaddr, &data->bdaddr);
556 info->pscan_rep_mode = data->pscan_rep_mode;
557 info->pscan_period_mode = data->pscan_period_mode;
558 info->pscan_mode = data->pscan_mode;
559 memcpy(info->dev_class, data->dev_class, 3);
560 info->clock_offset = data->clock_offset;
561
562 info++;
563 copied++;
564 }
565
566 BT_DBG("cache %p, copied %d", cache, copied);
567 return copied;
568 }
569
hci_inq_req(struct hci_dev * hdev,unsigned long opt)570 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
571 {
572 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
573 struct hci_cp_inquiry cp;
574
575 BT_DBG("%s", hdev->name);
576
577 if (test_bit(HCI_INQUIRY, &hdev->flags))
578 return;
579
580 /* Start Inquiry */
581 memcpy(&cp.lap, &ir->lap, 3);
582 cp.length = ir->length;
583 cp.num_rsp = ir->num_rsp;
584 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
585 }
586
hci_inquiry(void __user * arg)587 int hci_inquiry(void __user *arg)
588 {
589 __u8 __user *ptr = arg;
590 struct hci_inquiry_req ir;
591 struct hci_dev *hdev;
592 int err = 0, do_inquiry = 0, max_rsp;
593 long timeo;
594 __u8 *buf;
595
596 if (copy_from_user(&ir, ptr, sizeof(ir)))
597 return -EFAULT;
598
599 hdev = hci_dev_get(ir.dev_id);
600 if (!hdev)
601 return -ENODEV;
602
603 hci_dev_lock(hdev);
604 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
605 inquiry_cache_empty(hdev) ||
606 ir.flags & IREQ_CACHE_FLUSH) {
607 inquiry_cache_flush(hdev);
608 do_inquiry = 1;
609 }
610 hci_dev_unlock(hdev);
611
612 timeo = ir.length * msecs_to_jiffies(2000);
613
614 if (do_inquiry) {
615 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
616 if (err < 0)
617 goto done;
618 }
619
620 /* for unlimited number of responses we will use buffer with 255 entries */
621 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
622
623 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
624 * copy it to the user space.
625 */
626 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
627 if (!buf) {
628 err = -ENOMEM;
629 goto done;
630 }
631
632 hci_dev_lock(hdev);
633 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
634 hci_dev_unlock(hdev);
635
636 BT_DBG("num_rsp %d", ir.num_rsp);
637
638 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
639 ptr += sizeof(ir);
640 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
641 ir.num_rsp))
642 err = -EFAULT;
643 } else
644 err = -EFAULT;
645
646 kfree(buf);
647
648 done:
649 hci_dev_put(hdev);
650 return err;
651 }
652
653 /* ---- HCI ioctl helpers ---- */
654
hci_dev_open(__u16 dev)655 int hci_dev_open(__u16 dev)
656 {
657 struct hci_dev *hdev;
658 int ret = 0;
659
660 hdev = hci_dev_get(dev);
661 if (!hdev)
662 return -ENODEV;
663
664 BT_DBG("%s %p", hdev->name, hdev);
665
666 hci_req_lock(hdev);
667
668 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
669 ret = -ENODEV;
670 goto done;
671 }
672
673 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
674 ret = -ERFKILL;
675 goto done;
676 }
677
678 if (test_bit(HCI_UP, &hdev->flags)) {
679 ret = -EALREADY;
680 goto done;
681 }
682
683 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
684 set_bit(HCI_RAW, &hdev->flags);
685
686 /* Treat all non BR/EDR controllers as raw devices if
687 enable_hs is not set */
688 if (hdev->dev_type != HCI_BREDR && !enable_hs)
689 set_bit(HCI_RAW, &hdev->flags);
690
691 if (hdev->open(hdev)) {
692 ret = -EIO;
693 goto done;
694 }
695
696 if (!test_bit(HCI_RAW, &hdev->flags)) {
697 atomic_set(&hdev->cmd_cnt, 1);
698 set_bit(HCI_INIT, &hdev->flags);
699 hdev->init_last_cmd = 0;
700
701 ret = __hci_request(hdev, hci_init_req, 0,
702 msecs_to_jiffies(HCI_INIT_TIMEOUT));
703
704 if (lmp_host_le_capable(hdev))
705 ret = __hci_request(hdev, hci_le_init_req, 0,
706 msecs_to_jiffies(HCI_INIT_TIMEOUT));
707
708 clear_bit(HCI_INIT, &hdev->flags);
709 }
710
711 if (!ret) {
712 hci_dev_hold(hdev);
713 set_bit(HCI_UP, &hdev->flags);
714 hci_notify(hdev, HCI_DEV_UP);
715 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
716 hci_dev_lock(hdev);
717 mgmt_powered(hdev, 1);
718 hci_dev_unlock(hdev);
719 }
720 } else {
721 /* Init failed, cleanup */
722 flush_work(&hdev->tx_work);
723 flush_work(&hdev->cmd_work);
724 flush_work(&hdev->rx_work);
725
726 skb_queue_purge(&hdev->cmd_q);
727 skb_queue_purge(&hdev->rx_q);
728
729 if (hdev->flush)
730 hdev->flush(hdev);
731
732 if (hdev->sent_cmd) {
733 kfree_skb(hdev->sent_cmd);
734 hdev->sent_cmd = NULL;
735 }
736
737 hdev->close(hdev);
738 hdev->flags = 0;
739 }
740
741 done:
742 hci_req_unlock(hdev);
743 hci_dev_put(hdev);
744 return ret;
745 }
746
hci_dev_do_close(struct hci_dev * hdev)747 static int hci_dev_do_close(struct hci_dev *hdev)
748 {
749 BT_DBG("%s %p", hdev->name, hdev);
750
751 cancel_work_sync(&hdev->le_scan);
752
753 cancel_delayed_work(&hdev->power_off);
754
755 hci_req_cancel(hdev, ENODEV);
756 hci_req_lock(hdev);
757
758 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
759 del_timer_sync(&hdev->cmd_timer);
760 hci_req_unlock(hdev);
761 return 0;
762 }
763
764 /* Flush RX and TX works */
765 flush_work(&hdev->tx_work);
766 flush_work(&hdev->rx_work);
767
768 if (hdev->discov_timeout > 0) {
769 cancel_delayed_work(&hdev->discov_off);
770 hdev->discov_timeout = 0;
771 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
772 }
773
774 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
775 cancel_delayed_work(&hdev->service_cache);
776
777 cancel_delayed_work_sync(&hdev->le_scan_disable);
778
779 hci_dev_lock(hdev);
780 inquiry_cache_flush(hdev);
781 hci_conn_hash_flush(hdev);
782 hci_dev_unlock(hdev);
783
784 hci_notify(hdev, HCI_DEV_DOWN);
785
786 if (hdev->flush)
787 hdev->flush(hdev);
788
789 /* Reset device */
790 skb_queue_purge(&hdev->cmd_q);
791 atomic_set(&hdev->cmd_cnt, 1);
792 if (!test_bit(HCI_RAW, &hdev->flags) &&
793 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
794 set_bit(HCI_INIT, &hdev->flags);
795 __hci_request(hdev, hci_reset_req, 0,
796 msecs_to_jiffies(250));
797 clear_bit(HCI_INIT, &hdev->flags);
798 }
799
800 /* flush cmd work */
801 flush_work(&hdev->cmd_work);
802
803 /* Drop queues */
804 skb_queue_purge(&hdev->rx_q);
805 skb_queue_purge(&hdev->cmd_q);
806 skb_queue_purge(&hdev->raw_q);
807
808 /* Drop last sent command */
809 if (hdev->sent_cmd) {
810 del_timer_sync(&hdev->cmd_timer);
811 kfree_skb(hdev->sent_cmd);
812 hdev->sent_cmd = NULL;
813 }
814
815 /* After this point our queues are empty
816 * and no tasks are scheduled. */
817 hdev->close(hdev);
818
819 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
820 hci_dev_lock(hdev);
821 mgmt_powered(hdev, 0);
822 hci_dev_unlock(hdev);
823 }
824
825 /* Clear flags */
826 hdev->flags = 0;
827
828 memset(hdev->eir, 0, sizeof(hdev->eir));
829 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
830
831 hci_req_unlock(hdev);
832
833 hci_dev_put(hdev);
834 return 0;
835 }
836
hci_dev_close(__u16 dev)837 int hci_dev_close(__u16 dev)
838 {
839 struct hci_dev *hdev;
840 int err;
841
842 hdev = hci_dev_get(dev);
843 if (!hdev)
844 return -ENODEV;
845
846 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
847 cancel_delayed_work(&hdev->power_off);
848
849 err = hci_dev_do_close(hdev);
850
851 hci_dev_put(hdev);
852 return err;
853 }
854
hci_dev_reset(__u16 dev)855 int hci_dev_reset(__u16 dev)
856 {
857 struct hci_dev *hdev;
858 int ret = 0;
859
860 hdev = hci_dev_get(dev);
861 if (!hdev)
862 return -ENODEV;
863
864 hci_req_lock(hdev);
865
866 if (!test_bit(HCI_UP, &hdev->flags))
867 goto done;
868
869 /* Drop queues */
870 skb_queue_purge(&hdev->rx_q);
871 skb_queue_purge(&hdev->cmd_q);
872
873 hci_dev_lock(hdev);
874 inquiry_cache_flush(hdev);
875 hci_conn_hash_flush(hdev);
876 hci_dev_unlock(hdev);
877
878 if (hdev->flush)
879 hdev->flush(hdev);
880
881 atomic_set(&hdev->cmd_cnt, 1);
882 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
883
884 if (!test_bit(HCI_RAW, &hdev->flags))
885 ret = __hci_request(hdev, hci_reset_req, 0,
886 msecs_to_jiffies(HCI_INIT_TIMEOUT));
887
888 done:
889 hci_req_unlock(hdev);
890 hci_dev_put(hdev);
891 return ret;
892 }
893
hci_dev_reset_stat(__u16 dev)894 int hci_dev_reset_stat(__u16 dev)
895 {
896 struct hci_dev *hdev;
897 int ret = 0;
898
899 hdev = hci_dev_get(dev);
900 if (!hdev)
901 return -ENODEV;
902
903 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
904
905 hci_dev_put(hdev);
906
907 return ret;
908 }
909
hci_dev_cmd(unsigned int cmd,void __user * arg)910 int hci_dev_cmd(unsigned int cmd, void __user *arg)
911 {
912 struct hci_dev *hdev;
913 struct hci_dev_req dr;
914 int err = 0;
915
916 if (copy_from_user(&dr, arg, sizeof(dr)))
917 return -EFAULT;
918
919 hdev = hci_dev_get(dr.dev_id);
920 if (!hdev)
921 return -ENODEV;
922
923 switch (cmd) {
924 case HCISETAUTH:
925 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
926 msecs_to_jiffies(HCI_INIT_TIMEOUT));
927 break;
928
929 case HCISETENCRYPT:
930 if (!lmp_encrypt_capable(hdev)) {
931 err = -EOPNOTSUPP;
932 break;
933 }
934
935 if (!test_bit(HCI_AUTH, &hdev->flags)) {
936 /* Auth must be enabled first */
937 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
938 msecs_to_jiffies(HCI_INIT_TIMEOUT));
939 if (err)
940 break;
941 }
942
943 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
944 msecs_to_jiffies(HCI_INIT_TIMEOUT));
945 break;
946
947 case HCISETSCAN:
948 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
949 msecs_to_jiffies(HCI_INIT_TIMEOUT));
950 break;
951
952 case HCISETLINKPOL:
953 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
954 msecs_to_jiffies(HCI_INIT_TIMEOUT));
955 break;
956
957 case HCISETLINKMODE:
958 hdev->link_mode = ((__u16) dr.dev_opt) &
959 (HCI_LM_MASTER | HCI_LM_ACCEPT);
960 break;
961
962 case HCISETPTYPE:
963 hdev->pkt_type = (__u16) dr.dev_opt;
964 break;
965
966 case HCISETACLMTU:
967 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
968 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
969 break;
970
971 case HCISETSCOMTU:
972 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
973 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
974 break;
975
976 default:
977 err = -EINVAL;
978 break;
979 }
980
981 hci_dev_put(hdev);
982 return err;
983 }
984
hci_get_dev_list(void __user * arg)985 int hci_get_dev_list(void __user *arg)
986 {
987 struct hci_dev *hdev;
988 struct hci_dev_list_req *dl;
989 struct hci_dev_req *dr;
990 int n = 0, size, err;
991 __u16 dev_num;
992
993 if (get_user(dev_num, (__u16 __user *) arg))
994 return -EFAULT;
995
996 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
997 return -EINVAL;
998
999 size = sizeof(*dl) + dev_num * sizeof(*dr);
1000
1001 dl = kzalloc(size, GFP_KERNEL);
1002 if (!dl)
1003 return -ENOMEM;
1004
1005 dr = dl->dev_req;
1006
1007 read_lock(&hci_dev_list_lock);
1008 list_for_each_entry(hdev, &hci_dev_list, list) {
1009 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1010 cancel_delayed_work(&hdev->power_off);
1011
1012 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1013 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1014
1015 (dr + n)->dev_id = hdev->id;
1016 (dr + n)->dev_opt = hdev->flags;
1017
1018 if (++n >= dev_num)
1019 break;
1020 }
1021 read_unlock(&hci_dev_list_lock);
1022
1023 dl->dev_num = n;
1024 size = sizeof(*dl) + n * sizeof(*dr);
1025
1026 err = copy_to_user(arg, dl, size);
1027 kfree(dl);
1028
1029 return err ? -EFAULT : 0;
1030 }
1031
hci_get_dev_info(void __user * arg)1032 int hci_get_dev_info(void __user *arg)
1033 {
1034 struct hci_dev *hdev;
1035 struct hci_dev_info di;
1036 int err = 0;
1037
1038 if (copy_from_user(&di, arg, sizeof(di)))
1039 return -EFAULT;
1040
1041 hdev = hci_dev_get(di.dev_id);
1042 if (!hdev)
1043 return -ENODEV;
1044
1045 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1046 cancel_delayed_work_sync(&hdev->power_off);
1047
1048 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1049 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1050
1051 strcpy(di.name, hdev->name);
1052 di.bdaddr = hdev->bdaddr;
1053 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1054 di.flags = hdev->flags;
1055 di.pkt_type = hdev->pkt_type;
1056 di.acl_mtu = hdev->acl_mtu;
1057 di.acl_pkts = hdev->acl_pkts;
1058 di.sco_mtu = hdev->sco_mtu;
1059 di.sco_pkts = hdev->sco_pkts;
1060 di.link_policy = hdev->link_policy;
1061 di.link_mode = hdev->link_mode;
1062
1063 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1064 memcpy(&di.features, &hdev->features, sizeof(di.features));
1065
1066 if (copy_to_user(arg, &di, sizeof(di)))
1067 err = -EFAULT;
1068
1069 hci_dev_put(hdev);
1070
1071 return err;
1072 }
1073
1074 /* ---- Interface to HCI drivers ---- */
1075
hci_rfkill_set_block(void * data,bool blocked)1076 static int hci_rfkill_set_block(void *data, bool blocked)
1077 {
1078 struct hci_dev *hdev = data;
1079
1080 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1081
1082 if (!blocked)
1083 return 0;
1084
1085 hci_dev_do_close(hdev);
1086
1087 return 0;
1088 }
1089
1090 static const struct rfkill_ops hci_rfkill_ops = {
1091 .set_block = hci_rfkill_set_block,
1092 };
1093
1094 /* Alloc HCI device */
hci_alloc_dev(void)1095 struct hci_dev *hci_alloc_dev(void)
1096 {
1097 struct hci_dev *hdev;
1098
1099 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1100 if (!hdev)
1101 return NULL;
1102
1103 hci_init_sysfs(hdev);
1104 skb_queue_head_init(&hdev->driver_init);
1105
1106 return hdev;
1107 }
1108 EXPORT_SYMBOL(hci_alloc_dev);
1109
1110 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)1111 void hci_free_dev(struct hci_dev *hdev)
1112 {
1113 skb_queue_purge(&hdev->driver_init);
1114
1115 /* will free via device release */
1116 put_device(&hdev->dev);
1117 }
1118 EXPORT_SYMBOL(hci_free_dev);
1119
hci_power_on(struct work_struct * work)1120 static void hci_power_on(struct work_struct *work)
1121 {
1122 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1123 int err;
1124
1125 BT_DBG("%s", hdev->name);
1126
1127 err = hci_dev_open(hdev->id);
1128 if (err < 0) {
1129 mgmt_set_powered_failed(hdev, err);
1130 return;
1131 }
1132
1133 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1134 schedule_delayed_work(&hdev->power_off,
1135 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1136
1137 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1138 mgmt_index_added(hdev);
1139 }
1140
hci_power_off(struct work_struct * work)1141 static void hci_power_off(struct work_struct *work)
1142 {
1143 struct hci_dev *hdev = container_of(work, struct hci_dev,
1144 power_off.work);
1145
1146 BT_DBG("%s", hdev->name);
1147
1148 hci_dev_do_close(hdev);
1149 }
1150
hci_discov_off(struct work_struct * work)1151 static void hci_discov_off(struct work_struct *work)
1152 {
1153 struct hci_dev *hdev;
1154 u8 scan = SCAN_PAGE;
1155
1156 hdev = container_of(work, struct hci_dev, discov_off.work);
1157
1158 BT_DBG("%s", hdev->name);
1159
1160 hci_dev_lock(hdev);
1161
1162 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1163
1164 hdev->discov_timeout = 0;
1165
1166 hci_dev_unlock(hdev);
1167 }
1168
hci_uuids_clear(struct hci_dev * hdev)1169 int hci_uuids_clear(struct hci_dev *hdev)
1170 {
1171 struct list_head *p, *n;
1172
1173 list_for_each_safe(p, n, &hdev->uuids) {
1174 struct bt_uuid *uuid;
1175
1176 uuid = list_entry(p, struct bt_uuid, list);
1177
1178 list_del(p);
1179 kfree(uuid);
1180 }
1181
1182 return 0;
1183 }
1184
hci_link_keys_clear(struct hci_dev * hdev)1185 int hci_link_keys_clear(struct hci_dev *hdev)
1186 {
1187 struct list_head *p, *n;
1188
1189 list_for_each_safe(p, n, &hdev->link_keys) {
1190 struct link_key *key;
1191
1192 key = list_entry(p, struct link_key, list);
1193
1194 list_del(p);
1195 kfree(key);
1196 }
1197
1198 return 0;
1199 }
1200
hci_smp_ltks_clear(struct hci_dev * hdev)1201 int hci_smp_ltks_clear(struct hci_dev *hdev)
1202 {
1203 struct smp_ltk *k, *tmp;
1204
1205 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1206 list_del(&k->list);
1207 kfree(k);
1208 }
1209
1210 return 0;
1211 }
1212
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1213 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1214 {
1215 struct link_key *k;
1216
1217 list_for_each_entry(k, &hdev->link_keys, list)
1218 if (bacmp(bdaddr, &k->bdaddr) == 0)
1219 return k;
1220
1221 return NULL;
1222 }
1223
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1224 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1225 u8 key_type, u8 old_key_type)
1226 {
1227 /* Legacy key */
1228 if (key_type < 0x03)
1229 return true;
1230
1231 /* Debug keys are insecure so don't store them persistently */
1232 if (key_type == HCI_LK_DEBUG_COMBINATION)
1233 return false;
1234
1235 /* Changed combination key and there's no previous one */
1236 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1237 return false;
1238
1239 /* Security mode 3 case */
1240 if (!conn)
1241 return true;
1242
1243 /* Neither local nor remote side had no-bonding as requirement */
1244 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1245 return true;
1246
1247 /* Local side had dedicated bonding as requirement */
1248 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1249 return true;
1250
1251 /* Remote side had dedicated bonding as requirement */
1252 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1253 return true;
1254
1255 /* If none of the above criteria match, then don't store the key
1256 * persistently */
1257 return false;
1258 }
1259
hci_find_ltk(struct hci_dev * hdev,__le16 ediv,u8 rand[8])1260 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1261 {
1262 struct smp_ltk *k;
1263
1264 list_for_each_entry(k, &hdev->long_term_keys, list) {
1265 if (k->ediv != ediv ||
1266 memcmp(rand, k->rand, sizeof(k->rand)))
1267 continue;
1268
1269 return k;
1270 }
1271
1272 return NULL;
1273 }
1274 EXPORT_SYMBOL(hci_find_ltk);
1275
hci_find_ltk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1276 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1277 u8 addr_type)
1278 {
1279 struct smp_ltk *k;
1280
1281 list_for_each_entry(k, &hdev->long_term_keys, list)
1282 if (addr_type == k->bdaddr_type &&
1283 bacmp(bdaddr, &k->bdaddr) == 0)
1284 return k;
1285
1286 return NULL;
1287 }
1288 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1289
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,int new_key,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len)1290 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1291 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1292 {
1293 struct link_key *key, *old_key;
1294 u8 old_key_type;
1295 bool persistent;
1296
1297 old_key = hci_find_link_key(hdev, bdaddr);
1298 if (old_key) {
1299 old_key_type = old_key->type;
1300 key = old_key;
1301 } else {
1302 old_key_type = conn ? conn->key_type : 0xff;
1303 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1304 if (!key)
1305 return -ENOMEM;
1306 list_add(&key->list, &hdev->link_keys);
1307 }
1308
1309 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1310
1311 /* Some buggy controller combinations generate a changed
1312 * combination key for legacy pairing even when there's no
1313 * previous key */
1314 if (type == HCI_LK_CHANGED_COMBINATION &&
1315 (!conn || conn->remote_auth == 0xff) &&
1316 old_key_type == 0xff) {
1317 type = HCI_LK_COMBINATION;
1318 if (conn)
1319 conn->key_type = type;
1320 }
1321
1322 bacpy(&key->bdaddr, bdaddr);
1323 memcpy(key->val, val, 16);
1324 key->pin_len = pin_len;
1325
1326 if (type == HCI_LK_CHANGED_COMBINATION)
1327 key->type = old_key_type;
1328 else
1329 key->type = type;
1330
1331 if (!new_key)
1332 return 0;
1333
1334 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1335
1336 mgmt_new_link_key(hdev, key, persistent);
1337
1338 if (conn)
1339 conn->flush_key = !persistent;
1340
1341 return 0;
1342 }
1343
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,int new_key,u8 authenticated,u8 tk[16],u8 enc_size,u16 ediv,u8 rand[8])1344 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1345 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
1346 ediv, u8 rand[8])
1347 {
1348 struct smp_ltk *key, *old_key;
1349
1350 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1351 return 0;
1352
1353 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1354 if (old_key)
1355 key = old_key;
1356 else {
1357 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1358 if (!key)
1359 return -ENOMEM;
1360 list_add(&key->list, &hdev->long_term_keys);
1361 }
1362
1363 bacpy(&key->bdaddr, bdaddr);
1364 key->bdaddr_type = addr_type;
1365 memcpy(key->val, tk, sizeof(key->val));
1366 key->authenticated = authenticated;
1367 key->ediv = ediv;
1368 key->enc_size = enc_size;
1369 key->type = type;
1370 memcpy(key->rand, rand, sizeof(key->rand));
1371
1372 if (!new_key)
1373 return 0;
1374
1375 if (type & HCI_SMP_LTK)
1376 mgmt_new_ltk(hdev, key, 1);
1377
1378 return 0;
1379 }
1380
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1381 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1382 {
1383 struct link_key *key;
1384
1385 key = hci_find_link_key(hdev, bdaddr);
1386 if (!key)
1387 return -ENOENT;
1388
1389 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1390
1391 list_del(&key->list);
1392 kfree(key);
1393
1394 return 0;
1395 }
1396
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr)1397 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1398 {
1399 struct smp_ltk *k, *tmp;
1400
1401 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1402 if (bacmp(bdaddr, &k->bdaddr))
1403 continue;
1404
1405 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1406
1407 list_del(&k->list);
1408 kfree(k);
1409 }
1410
1411 return 0;
1412 }
1413
1414 /* HCI command timer function */
hci_cmd_timer(unsigned long arg)1415 static void hci_cmd_timer(unsigned long arg)
1416 {
1417 struct hci_dev *hdev = (void *) arg;
1418
1419 BT_ERR("%s command tx timeout", hdev->name);
1420 atomic_set(&hdev->cmd_cnt, 1);
1421 queue_work(hdev->workqueue, &hdev->cmd_work);
1422 }
1423
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr)1424 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1425 bdaddr_t *bdaddr)
1426 {
1427 struct oob_data *data;
1428
1429 list_for_each_entry(data, &hdev->remote_oob_data, list)
1430 if (bacmp(bdaddr, &data->bdaddr) == 0)
1431 return data;
1432
1433 return NULL;
1434 }
1435
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr)1436 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1437 {
1438 struct oob_data *data;
1439
1440 data = hci_find_remote_oob_data(hdev, bdaddr);
1441 if (!data)
1442 return -ENOENT;
1443
1444 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1445
1446 list_del(&data->list);
1447 kfree(data);
1448
1449 return 0;
1450 }
1451
hci_remote_oob_data_clear(struct hci_dev * hdev)1452 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1453 {
1454 struct oob_data *data, *n;
1455
1456 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1457 list_del(&data->list);
1458 kfree(data);
1459 }
1460
1461 return 0;
1462 }
1463
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * hash,u8 * randomizer)1464 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1465 u8 *randomizer)
1466 {
1467 struct oob_data *data;
1468
1469 data = hci_find_remote_oob_data(hdev, bdaddr);
1470
1471 if (!data) {
1472 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1473 if (!data)
1474 return -ENOMEM;
1475
1476 bacpy(&data->bdaddr, bdaddr);
1477 list_add(&data->list, &hdev->remote_oob_data);
1478 }
1479
1480 memcpy(data->hash, hash, sizeof(data->hash));
1481 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1482
1483 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1484
1485 return 0;
1486 }
1487
hci_blacklist_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)1488 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1489 {
1490 struct bdaddr_list *b;
1491
1492 list_for_each_entry(b, &hdev->blacklist, list)
1493 if (bacmp(bdaddr, &b->bdaddr) == 0)
1494 return b;
1495
1496 return NULL;
1497 }
1498
hci_blacklist_clear(struct hci_dev * hdev)1499 int hci_blacklist_clear(struct hci_dev *hdev)
1500 {
1501 struct list_head *p, *n;
1502
1503 list_for_each_safe(p, n, &hdev->blacklist) {
1504 struct bdaddr_list *b;
1505
1506 b = list_entry(p, struct bdaddr_list, list);
1507
1508 list_del(p);
1509 kfree(b);
1510 }
1511
1512 return 0;
1513 }
1514
hci_blacklist_add(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1515 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1516 {
1517 struct bdaddr_list *entry;
1518
1519 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1520 return -EBADF;
1521
1522 if (hci_blacklist_lookup(hdev, bdaddr))
1523 return -EEXIST;
1524
1525 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1526 if (!entry)
1527 return -ENOMEM;
1528
1529 bacpy(&entry->bdaddr, bdaddr);
1530
1531 list_add(&entry->list, &hdev->blacklist);
1532
1533 return mgmt_device_blocked(hdev, bdaddr, type);
1534 }
1535
hci_blacklist_del(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1536 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1537 {
1538 struct bdaddr_list *entry;
1539
1540 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1541 return hci_blacklist_clear(hdev);
1542
1543 entry = hci_blacklist_lookup(hdev, bdaddr);
1544 if (!entry)
1545 return -ENOENT;
1546
1547 list_del(&entry->list);
1548 kfree(entry);
1549
1550 return mgmt_device_unblocked(hdev, bdaddr, type);
1551 }
1552
hci_clear_adv_cache(struct work_struct * work)1553 static void hci_clear_adv_cache(struct work_struct *work)
1554 {
1555 struct hci_dev *hdev = container_of(work, struct hci_dev,
1556 adv_work.work);
1557
1558 hci_dev_lock(hdev);
1559
1560 hci_adv_entries_clear(hdev);
1561
1562 hci_dev_unlock(hdev);
1563 }
1564
hci_adv_entries_clear(struct hci_dev * hdev)1565 int hci_adv_entries_clear(struct hci_dev *hdev)
1566 {
1567 struct adv_entry *entry, *tmp;
1568
1569 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1570 list_del(&entry->list);
1571 kfree(entry);
1572 }
1573
1574 BT_DBG("%s adv cache cleared", hdev->name);
1575
1576 return 0;
1577 }
1578
hci_find_adv_entry(struct hci_dev * hdev,bdaddr_t * bdaddr)1579 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1580 {
1581 struct adv_entry *entry;
1582
1583 list_for_each_entry(entry, &hdev->adv_entries, list)
1584 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1585 return entry;
1586
1587 return NULL;
1588 }
1589
is_connectable_adv(u8 evt_type)1590 static inline int is_connectable_adv(u8 evt_type)
1591 {
1592 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1593 return 1;
1594
1595 return 0;
1596 }
1597
hci_add_adv_entry(struct hci_dev * hdev,struct hci_ev_le_advertising_info * ev)1598 int hci_add_adv_entry(struct hci_dev *hdev,
1599 struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
1600 return -EINVAL;
1601
1602 /* Only new entries should be added to adv_entries. So, if
1603 * bdaddr was found, don't add it. */
1604 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1605 return 0;
1606
1607 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1608 if (!entry)
1609 return -ENOMEM;
1610
1611 bacpy(&entry->bdaddr, &ev->bdaddr);
1612 entry->bdaddr_type = ev->bdaddr_type;
1613
1614 list_add(&entry->list, &hdev->adv_entries);
1615
1616 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1617 batostr(&entry->bdaddr), entry->bdaddr_type);
1618
1619 return 0;
1620 }
1621
le_scan_param_req(struct hci_dev * hdev,unsigned long opt)1622 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1623 {
1624 struct le_scan_params *param = (struct le_scan_params *) opt;
1625 struct hci_cp_le_set_scan_param cp;
1626
1627 memset(&cp, 0, sizeof(cp));
1628 cp.type = param->type;
1629 cp.interval = cpu_to_le16(param->interval);
1630 cp.window = cpu_to_le16(param->window);
1631
1632 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1633 }
1634
le_scan_enable_req(struct hci_dev * hdev,unsigned long opt)1635 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1636 {
1637 struct hci_cp_le_set_scan_enable cp;
1638
1639 memset(&cp, 0, sizeof(cp));
1640 cp.enable = 1;
1641
1642 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1643 }
1644
hci_do_le_scan(struct hci_dev * hdev,u8 type,u16 interval,u16 window,int timeout)1645 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1646 u16 window, int timeout)
1647 {
1648 long timeo = msecs_to_jiffies(3000);
1649 struct le_scan_params param;
1650 int err;
1651
1652 BT_DBG("%s", hdev->name);
1653
1654 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1655 return -EINPROGRESS;
1656
1657 param.type = type;
1658 param.interval = interval;
1659 param.window = window;
1660
1661 hci_req_lock(hdev);
1662
1663 err = __hci_request(hdev, le_scan_param_req, (unsigned long) ¶m,
1664 timeo);
1665 if (!err)
1666 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1667
1668 hci_req_unlock(hdev);
1669
1670 if (err < 0)
1671 return err;
1672
1673 schedule_delayed_work(&hdev->le_scan_disable,
1674 msecs_to_jiffies(timeout));
1675
1676 return 0;
1677 }
1678
le_scan_disable_work(struct work_struct * work)1679 static void le_scan_disable_work(struct work_struct *work)
1680 {
1681 struct hci_dev *hdev = container_of(work, struct hci_dev,
1682 le_scan_disable.work);
1683 struct hci_cp_le_set_scan_enable cp;
1684
1685 BT_DBG("%s", hdev->name);
1686
1687 memset(&cp, 0, sizeof(cp));
1688
1689 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1690 }
1691
le_scan_work(struct work_struct * work)1692 static void le_scan_work(struct work_struct *work)
1693 {
1694 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1695 struct le_scan_params *param = &hdev->le_scan_params;
1696
1697 BT_DBG("%s", hdev->name);
1698
1699 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1700 param->timeout);
1701 }
1702
hci_le_scan(struct hci_dev * hdev,u8 type,u16 interval,u16 window,int timeout)1703 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1704 int timeout)
1705 {
1706 struct le_scan_params *param = &hdev->le_scan_params;
1707
1708 BT_DBG("%s", hdev->name);
1709
1710 if (work_busy(&hdev->le_scan))
1711 return -EINPROGRESS;
1712
1713 param->type = type;
1714 param->interval = interval;
1715 param->window = window;
1716 param->timeout = timeout;
1717
1718 queue_work(system_long_wq, &hdev->le_scan);
1719
1720 return 0;
1721 }
1722
1723 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)1724 int hci_register_dev(struct hci_dev *hdev)
1725 {
1726 struct list_head *head = &hci_dev_list, *p;
1727 int i, id, error;
1728
1729 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1730
1731 if (!hdev->open || !hdev->close)
1732 return -EINVAL;
1733
1734 /* Do not allow HCI_AMP devices to register at index 0,
1735 * so the index can be used as the AMP controller ID.
1736 */
1737 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1738
1739 write_lock(&hci_dev_list_lock);
1740
1741 /* Find first available device id */
1742 list_for_each(p, &hci_dev_list) {
1743 if (list_entry(p, struct hci_dev, list)->id != id)
1744 break;
1745 head = p; id++;
1746 }
1747
1748 sprintf(hdev->name, "hci%d", id);
1749 hdev->id = id;
1750 list_add_tail(&hdev->list, head);
1751
1752 mutex_init(&hdev->lock);
1753
1754 hdev->flags = 0;
1755 hdev->dev_flags = 0;
1756 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1757 hdev->esco_type = (ESCO_HV1);
1758 hdev->link_mode = (HCI_LM_ACCEPT);
1759 hdev->io_capability = 0x03; /* No Input No Output */
1760
1761 hdev->idle_timeout = 0;
1762 hdev->sniff_max_interval = 800;
1763 hdev->sniff_min_interval = 80;
1764
1765 INIT_WORK(&hdev->rx_work, hci_rx_work);
1766 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1767 INIT_WORK(&hdev->tx_work, hci_tx_work);
1768
1769
1770 skb_queue_head_init(&hdev->rx_q);
1771 skb_queue_head_init(&hdev->cmd_q);
1772 skb_queue_head_init(&hdev->raw_q);
1773
1774 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1775
1776 for (i = 0; i < NUM_REASSEMBLY; i++)
1777 hdev->reassembly[i] = NULL;
1778
1779 init_waitqueue_head(&hdev->req_wait_q);
1780 mutex_init(&hdev->req_lock);
1781
1782 discovery_init(hdev);
1783
1784 hci_conn_hash_init(hdev);
1785
1786 INIT_LIST_HEAD(&hdev->mgmt_pending);
1787
1788 INIT_LIST_HEAD(&hdev->blacklist);
1789
1790 INIT_LIST_HEAD(&hdev->uuids);
1791
1792 INIT_LIST_HEAD(&hdev->link_keys);
1793 INIT_LIST_HEAD(&hdev->long_term_keys);
1794
1795 INIT_LIST_HEAD(&hdev->remote_oob_data);
1796
1797 INIT_LIST_HEAD(&hdev->adv_entries);
1798
1799 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1800 INIT_WORK(&hdev->power_on, hci_power_on);
1801 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1802
1803 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1804
1805 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1806
1807 atomic_set(&hdev->promisc, 0);
1808
1809 INIT_WORK(&hdev->le_scan, le_scan_work);
1810
1811 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1812
1813 write_unlock(&hci_dev_list_lock);
1814
1815 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1816 WQ_MEM_RECLAIM, 1);
1817 if (!hdev->workqueue) {
1818 error = -ENOMEM;
1819 goto err;
1820 }
1821
1822 error = hci_add_sysfs(hdev);
1823 if (error < 0)
1824 goto err_wqueue;
1825
1826 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1827 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1828 if (hdev->rfkill) {
1829 if (rfkill_register(hdev->rfkill) < 0) {
1830 rfkill_destroy(hdev->rfkill);
1831 hdev->rfkill = NULL;
1832 }
1833 }
1834
1835 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1836 set_bit(HCI_SETUP, &hdev->dev_flags);
1837 schedule_work(&hdev->power_on);
1838
1839 hci_notify(hdev, HCI_DEV_REG);
1840 hci_dev_hold(hdev);
1841
1842 return id;
1843
1844 err_wqueue:
1845 destroy_workqueue(hdev->workqueue);
1846 err:
1847 write_lock(&hci_dev_list_lock);
1848 list_del(&hdev->list);
1849 write_unlock(&hci_dev_list_lock);
1850
1851 return error;
1852 }
1853 EXPORT_SYMBOL(hci_register_dev);
1854
1855 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)1856 void hci_unregister_dev(struct hci_dev *hdev)
1857 {
1858 int i;
1859
1860 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1861
1862 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1863
1864 write_lock(&hci_dev_list_lock);
1865 list_del(&hdev->list);
1866 write_unlock(&hci_dev_list_lock);
1867
1868 hci_dev_do_close(hdev);
1869
1870 for (i = 0; i < NUM_REASSEMBLY; i++)
1871 kfree_skb(hdev->reassembly[i]);
1872
1873 cancel_work_sync(&hdev->power_on);
1874
1875 if (!test_bit(HCI_INIT, &hdev->flags) &&
1876 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1877 hci_dev_lock(hdev);
1878 mgmt_index_removed(hdev);
1879 hci_dev_unlock(hdev);
1880 }
1881
1882 /* mgmt_index_removed should take care of emptying the
1883 * pending list */
1884 BUG_ON(!list_empty(&hdev->mgmt_pending));
1885
1886 hci_notify(hdev, HCI_DEV_UNREG);
1887
1888 if (hdev->rfkill) {
1889 rfkill_unregister(hdev->rfkill);
1890 rfkill_destroy(hdev->rfkill);
1891 }
1892
1893 hci_del_sysfs(hdev);
1894
1895 cancel_delayed_work_sync(&hdev->adv_work);
1896
1897 destroy_workqueue(hdev->workqueue);
1898
1899 hci_dev_lock(hdev);
1900 hci_blacklist_clear(hdev);
1901 hci_uuids_clear(hdev);
1902 hci_link_keys_clear(hdev);
1903 hci_smp_ltks_clear(hdev);
1904 hci_remote_oob_data_clear(hdev);
1905 hci_adv_entries_clear(hdev);
1906 hci_dev_unlock(hdev);
1907
1908 hci_dev_put(hdev);
1909 }
1910 EXPORT_SYMBOL(hci_unregister_dev);
1911
1912 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)1913 int hci_suspend_dev(struct hci_dev *hdev)
1914 {
1915 hci_notify(hdev, HCI_DEV_SUSPEND);
1916 return 0;
1917 }
1918 EXPORT_SYMBOL(hci_suspend_dev);
1919
1920 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)1921 int hci_resume_dev(struct hci_dev *hdev)
1922 {
1923 hci_notify(hdev, HCI_DEV_RESUME);
1924 return 0;
1925 }
1926 EXPORT_SYMBOL(hci_resume_dev);
1927
1928 /* Receive frame from HCI drivers */
hci_recv_frame(struct sk_buff * skb)1929 int hci_recv_frame(struct sk_buff *skb)
1930 {
1931 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1932 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1933 && !test_bit(HCI_INIT, &hdev->flags))) {
1934 kfree_skb(skb);
1935 return -ENXIO;
1936 }
1937
1938 /* Incomming skb */
1939 bt_cb(skb)->incoming = 1;
1940
1941 /* Time stamp */
1942 __net_timestamp(skb);
1943
1944 skb_queue_tail(&hdev->rx_q, skb);
1945 queue_work(hdev->workqueue, &hdev->rx_work);
1946
1947 return 0;
1948 }
1949 EXPORT_SYMBOL(hci_recv_frame);
1950
hci_reassembly(struct hci_dev * hdev,int type,void * data,int count,__u8 index)1951 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1952 int count, __u8 index)
1953 {
1954 int len = 0;
1955 int hlen = 0;
1956 int remain = count;
1957 struct sk_buff *skb;
1958 struct bt_skb_cb *scb;
1959
1960 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1961 index >= NUM_REASSEMBLY)
1962 return -EILSEQ;
1963
1964 skb = hdev->reassembly[index];
1965
1966 if (!skb) {
1967 switch (type) {
1968 case HCI_ACLDATA_PKT:
1969 len = HCI_MAX_FRAME_SIZE;
1970 hlen = HCI_ACL_HDR_SIZE;
1971 break;
1972 case HCI_EVENT_PKT:
1973 len = HCI_MAX_EVENT_SIZE;
1974 hlen = HCI_EVENT_HDR_SIZE;
1975 break;
1976 case HCI_SCODATA_PKT:
1977 len = HCI_MAX_SCO_SIZE;
1978 hlen = HCI_SCO_HDR_SIZE;
1979 break;
1980 }
1981
1982 skb = bt_skb_alloc(len, GFP_ATOMIC);
1983 if (!skb)
1984 return -ENOMEM;
1985
1986 scb = (void *) skb->cb;
1987 scb->expect = hlen;
1988 scb->pkt_type = type;
1989
1990 skb->dev = (void *) hdev;
1991 hdev->reassembly[index] = skb;
1992 }
1993
1994 while (count) {
1995 scb = (void *) skb->cb;
1996 len = min_t(uint, scb->expect, count);
1997
1998 memcpy(skb_put(skb, len), data, len);
1999
2000 count -= len;
2001 data += len;
2002 scb->expect -= len;
2003 remain = count;
2004
2005 switch (type) {
2006 case HCI_EVENT_PKT:
2007 if (skb->len == HCI_EVENT_HDR_SIZE) {
2008 struct hci_event_hdr *h = hci_event_hdr(skb);
2009 scb->expect = h->plen;
2010
2011 if (skb_tailroom(skb) < scb->expect) {
2012 kfree_skb(skb);
2013 hdev->reassembly[index] = NULL;
2014 return -ENOMEM;
2015 }
2016 }
2017 break;
2018
2019 case HCI_ACLDATA_PKT:
2020 if (skb->len == HCI_ACL_HDR_SIZE) {
2021 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2022 scb->expect = __le16_to_cpu(h->dlen);
2023
2024 if (skb_tailroom(skb) < scb->expect) {
2025 kfree_skb(skb);
2026 hdev->reassembly[index] = NULL;
2027 return -ENOMEM;
2028 }
2029 }
2030 break;
2031
2032 case HCI_SCODATA_PKT:
2033 if (skb->len == HCI_SCO_HDR_SIZE) {
2034 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2035 scb->expect = h->dlen;
2036
2037 if (skb_tailroom(skb) < scb->expect) {
2038 kfree_skb(skb);
2039 hdev->reassembly[index] = NULL;
2040 return -ENOMEM;
2041 }
2042 }
2043 break;
2044 }
2045
2046 if (scb->expect == 0) {
2047 /* Complete frame */
2048
2049 bt_cb(skb)->pkt_type = type;
2050 hci_recv_frame(skb);
2051
2052 hdev->reassembly[index] = NULL;
2053 return remain;
2054 }
2055 }
2056
2057 return remain;
2058 }
2059
hci_recv_fragment(struct hci_dev * hdev,int type,void * data,int count)2060 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2061 {
2062 int rem = 0;
2063
2064 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2065 return -EILSEQ;
2066
2067 while (count) {
2068 rem = hci_reassembly(hdev, type, data, count, type - 1);
2069 if (rem < 0)
2070 return rem;
2071
2072 data += (count - rem);
2073 count = rem;
2074 }
2075
2076 return rem;
2077 }
2078 EXPORT_SYMBOL(hci_recv_fragment);
2079
2080 #define STREAM_REASSEMBLY 0
2081
hci_recv_stream_fragment(struct hci_dev * hdev,void * data,int count)2082 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2083 {
2084 int type;
2085 int rem = 0;
2086
2087 while (count) {
2088 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2089
2090 if (!skb) {
2091 struct { char type; } *pkt;
2092
2093 /* Start of the frame */
2094 pkt = data;
2095 type = pkt->type;
2096
2097 data++;
2098 count--;
2099 } else
2100 type = bt_cb(skb)->pkt_type;
2101
2102 rem = hci_reassembly(hdev, type, data, count,
2103 STREAM_REASSEMBLY);
2104 if (rem < 0)
2105 return rem;
2106
2107 data += (count - rem);
2108 count = rem;
2109 }
2110
2111 return rem;
2112 }
2113 EXPORT_SYMBOL(hci_recv_stream_fragment);
2114
2115 /* ---- Interface to upper protocols ---- */
2116
hci_register_cb(struct hci_cb * cb)2117 int hci_register_cb(struct hci_cb *cb)
2118 {
2119 BT_DBG("%p name %s", cb, cb->name);
2120
2121 write_lock(&hci_cb_list_lock);
2122 list_add(&cb->list, &hci_cb_list);
2123 write_unlock(&hci_cb_list_lock);
2124
2125 return 0;
2126 }
2127 EXPORT_SYMBOL(hci_register_cb);
2128
hci_unregister_cb(struct hci_cb * cb)2129 int hci_unregister_cb(struct hci_cb *cb)
2130 {
2131 BT_DBG("%p name %s", cb, cb->name);
2132
2133 write_lock(&hci_cb_list_lock);
2134 list_del(&cb->list);
2135 write_unlock(&hci_cb_list_lock);
2136
2137 return 0;
2138 }
2139 EXPORT_SYMBOL(hci_unregister_cb);
2140
hci_send_frame(struct sk_buff * skb)2141 static int hci_send_frame(struct sk_buff *skb)
2142 {
2143 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2144
2145 if (!hdev) {
2146 kfree_skb(skb);
2147 return -ENODEV;
2148 }
2149
2150 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2151
2152 /* Time stamp */
2153 __net_timestamp(skb);
2154
2155 /* Send copy to monitor */
2156 hci_send_to_monitor(hdev, skb);
2157
2158 if (atomic_read(&hdev->promisc)) {
2159 /* Send copy to the sockets */
2160 hci_send_to_sock(hdev, skb);
2161 }
2162
2163 /* Get rid of skb owner, prior to sending to the driver. */
2164 skb_orphan(skb);
2165
2166 return hdev->send(skb);
2167 }
2168
2169 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,void * param)2170 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2171 {
2172 int len = HCI_COMMAND_HDR_SIZE + plen;
2173 struct hci_command_hdr *hdr;
2174 struct sk_buff *skb;
2175
2176 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2177
2178 skb = bt_skb_alloc(len, GFP_ATOMIC);
2179 if (!skb) {
2180 BT_ERR("%s no memory for command", hdev->name);
2181 return -ENOMEM;
2182 }
2183
2184 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2185 hdr->opcode = cpu_to_le16(opcode);
2186 hdr->plen = plen;
2187
2188 if (plen)
2189 memcpy(skb_put(skb, plen), param, plen);
2190
2191 BT_DBG("skb len %d", skb->len);
2192
2193 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2194 skb->dev = (void *) hdev;
2195
2196 if (test_bit(HCI_INIT, &hdev->flags))
2197 hdev->init_last_cmd = opcode;
2198
2199 skb_queue_tail(&hdev->cmd_q, skb);
2200 queue_work(hdev->workqueue, &hdev->cmd_work);
2201
2202 return 0;
2203 }
2204
2205 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)2206 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2207 {
2208 struct hci_command_hdr *hdr;
2209
2210 if (!hdev->sent_cmd)
2211 return NULL;
2212
2213 hdr = (void *) hdev->sent_cmd->data;
2214
2215 if (hdr->opcode != cpu_to_le16(opcode))
2216 return NULL;
2217
2218 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2219
2220 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2221 }
2222
2223 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)2224 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2225 {
2226 struct hci_acl_hdr *hdr;
2227 int len = skb->len;
2228
2229 skb_push(skb, HCI_ACL_HDR_SIZE);
2230 skb_reset_transport_header(skb);
2231 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2232 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2233 hdr->dlen = cpu_to_le16(len);
2234 }
2235
hci_queue_acl(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)2236 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2237 struct sk_buff *skb, __u16 flags)
2238 {
2239 struct hci_dev *hdev = conn->hdev;
2240 struct sk_buff *list;
2241
2242 list = skb_shinfo(skb)->frag_list;
2243 if (!list) {
2244 /* Non fragmented */
2245 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2246
2247 skb_queue_tail(queue, skb);
2248 } else {
2249 /* Fragmented */
2250 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2251
2252 skb_shinfo(skb)->frag_list = NULL;
2253
2254 /* Queue all fragments atomically */
2255 spin_lock(&queue->lock);
2256
2257 __skb_queue_tail(queue, skb);
2258
2259 flags &= ~ACL_START;
2260 flags |= ACL_CONT;
2261 do {
2262 skb = list; list = list->next;
2263
2264 skb->dev = (void *) hdev;
2265 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2266 hci_add_acl_hdr(skb, conn->handle, flags);
2267
2268 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2269
2270 __skb_queue_tail(queue, skb);
2271 } while (list);
2272
2273 spin_unlock(&queue->lock);
2274 }
2275 }
2276
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)2277 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2278 {
2279 struct hci_conn *conn = chan->conn;
2280 struct hci_dev *hdev = conn->hdev;
2281
2282 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2283
2284 skb->dev = (void *) hdev;
2285 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2286 hci_add_acl_hdr(skb, conn->handle, flags);
2287
2288 hci_queue_acl(conn, &chan->data_q, skb, flags);
2289
2290 queue_work(hdev->workqueue, &hdev->tx_work);
2291 }
2292 EXPORT_SYMBOL(hci_send_acl);
2293
2294 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)2295 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2296 {
2297 struct hci_dev *hdev = conn->hdev;
2298 struct hci_sco_hdr hdr;
2299
2300 BT_DBG("%s len %d", hdev->name, skb->len);
2301
2302 hdr.handle = cpu_to_le16(conn->handle);
2303 hdr.dlen = skb->len;
2304
2305 skb_push(skb, HCI_SCO_HDR_SIZE);
2306 skb_reset_transport_header(skb);
2307 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2308
2309 skb->dev = (void *) hdev;
2310 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2311
2312 skb_queue_tail(&conn->data_q, skb);
2313 queue_work(hdev->workqueue, &hdev->tx_work);
2314 }
2315 EXPORT_SYMBOL(hci_send_sco);
2316
2317 /* ---- HCI TX task (outgoing data) ---- */
2318
2319 /* HCI Connection scheduler */
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)2320 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2321 {
2322 struct hci_conn_hash *h = &hdev->conn_hash;
2323 struct hci_conn *conn = NULL, *c;
2324 int num = 0, min = ~0;
2325
2326 /* We don't have to lock device here. Connections are always
2327 * added and removed with TX task disabled. */
2328
2329 rcu_read_lock();
2330
2331 list_for_each_entry_rcu(c, &h->list, list) {
2332 if (c->type != type || skb_queue_empty(&c->data_q))
2333 continue;
2334
2335 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2336 continue;
2337
2338 num++;
2339
2340 if (c->sent < min) {
2341 min = c->sent;
2342 conn = c;
2343 }
2344
2345 if (hci_conn_num(hdev, type) == num)
2346 break;
2347 }
2348
2349 rcu_read_unlock();
2350
2351 if (conn) {
2352 int cnt, q;
2353
2354 switch (conn->type) {
2355 case ACL_LINK:
2356 cnt = hdev->acl_cnt;
2357 break;
2358 case SCO_LINK:
2359 case ESCO_LINK:
2360 cnt = hdev->sco_cnt;
2361 break;
2362 case LE_LINK:
2363 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2364 break;
2365 default:
2366 cnt = 0;
2367 BT_ERR("Unknown link type");
2368 }
2369
2370 q = cnt / num;
2371 *quote = q ? q : 1;
2372 } else
2373 *quote = 0;
2374
2375 BT_DBG("conn %p quote %d", conn, *quote);
2376 return conn;
2377 }
2378
hci_link_tx_to(struct hci_dev * hdev,__u8 type)2379 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2380 {
2381 struct hci_conn_hash *h = &hdev->conn_hash;
2382 struct hci_conn *c;
2383
2384 BT_ERR("%s link tx timeout", hdev->name);
2385
2386 rcu_read_lock();
2387
2388 /* Kill stalled connections */
2389 list_for_each_entry_rcu(c, &h->list, list) {
2390 if (c->type == type && c->sent) {
2391 BT_ERR("%s killing stalled connection %s",
2392 hdev->name, batostr(&c->dst));
2393 hci_acl_disconn(c, 0x13);
2394 }
2395 }
2396
2397 rcu_read_unlock();
2398 }
2399
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)2400 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2401 int *quote)
2402 {
2403 struct hci_conn_hash *h = &hdev->conn_hash;
2404 struct hci_chan *chan = NULL;
2405 int num = 0, min = ~0, cur_prio = 0;
2406 struct hci_conn *conn;
2407 int cnt, q, conn_num = 0;
2408
2409 BT_DBG("%s", hdev->name);
2410
2411 rcu_read_lock();
2412
2413 list_for_each_entry_rcu(conn, &h->list, list) {
2414 struct hci_chan *tmp;
2415
2416 if (conn->type != type)
2417 continue;
2418
2419 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2420 continue;
2421
2422 conn_num++;
2423
2424 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2425 struct sk_buff *skb;
2426
2427 if (skb_queue_empty(&tmp->data_q))
2428 continue;
2429
2430 skb = skb_peek(&tmp->data_q);
2431 if (skb->priority < cur_prio)
2432 continue;
2433
2434 if (skb->priority > cur_prio) {
2435 num = 0;
2436 min = ~0;
2437 cur_prio = skb->priority;
2438 }
2439
2440 num++;
2441
2442 if (conn->sent < min) {
2443 min = conn->sent;
2444 chan = tmp;
2445 }
2446 }
2447
2448 if (hci_conn_num(hdev, type) == conn_num)
2449 break;
2450 }
2451
2452 rcu_read_unlock();
2453
2454 if (!chan)
2455 return NULL;
2456
2457 switch (chan->conn->type) {
2458 case ACL_LINK:
2459 cnt = hdev->acl_cnt;
2460 break;
2461 case SCO_LINK:
2462 case ESCO_LINK:
2463 cnt = hdev->sco_cnt;
2464 break;
2465 case LE_LINK:
2466 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2467 break;
2468 default:
2469 cnt = 0;
2470 BT_ERR("Unknown link type");
2471 }
2472
2473 q = cnt / num;
2474 *quote = q ? q : 1;
2475 BT_DBG("chan %p quote %d", chan, *quote);
2476 return chan;
2477 }
2478
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)2479 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2480 {
2481 struct hci_conn_hash *h = &hdev->conn_hash;
2482 struct hci_conn *conn;
2483 int num = 0;
2484
2485 BT_DBG("%s", hdev->name);
2486
2487 rcu_read_lock();
2488
2489 list_for_each_entry_rcu(conn, &h->list, list) {
2490 struct hci_chan *chan;
2491
2492 if (conn->type != type)
2493 continue;
2494
2495 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2496 continue;
2497
2498 num++;
2499
2500 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2501 struct sk_buff *skb;
2502
2503 if (chan->sent) {
2504 chan->sent = 0;
2505 continue;
2506 }
2507
2508 if (skb_queue_empty(&chan->data_q))
2509 continue;
2510
2511 skb = skb_peek(&chan->data_q);
2512 if (skb->priority >= HCI_PRIO_MAX - 1)
2513 continue;
2514
2515 skb->priority = HCI_PRIO_MAX - 1;
2516
2517 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2518 skb->priority);
2519 }
2520
2521 if (hci_conn_num(hdev, type) == num)
2522 break;
2523 }
2524
2525 rcu_read_unlock();
2526
2527 }
2528
__get_blocks(struct hci_dev * hdev,struct sk_buff * skb)2529 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2530 {
2531 /* Calculate count of blocks used by this packet */
2532 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2533 }
2534
__check_timeout(struct hci_dev * hdev,unsigned int cnt)2535 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2536 {
2537 if (!test_bit(HCI_RAW, &hdev->flags)) {
2538 /* ACL tx timeout must be longer than maximum
2539 * link supervision timeout (40.9 seconds) */
2540 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2541 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2542 hci_link_tx_to(hdev, ACL_LINK);
2543 }
2544 }
2545
hci_sched_acl_pkt(struct hci_dev * hdev)2546 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2547 {
2548 unsigned int cnt = hdev->acl_cnt;
2549 struct hci_chan *chan;
2550 struct sk_buff *skb;
2551 int quote;
2552
2553 __check_timeout(hdev, cnt);
2554
2555 while (hdev->acl_cnt &&
2556 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2557 u32 priority = (skb_peek(&chan->data_q))->priority;
2558 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2559 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2560 skb->len, skb->priority);
2561
2562 /* Stop if priority has changed */
2563 if (skb->priority < priority)
2564 break;
2565
2566 skb = skb_dequeue(&chan->data_q);
2567
2568 hci_conn_enter_active_mode(chan->conn,
2569 bt_cb(skb)->force_active);
2570
2571 hci_send_frame(skb);
2572 hdev->acl_last_tx = jiffies;
2573
2574 hdev->acl_cnt--;
2575 chan->sent++;
2576 chan->conn->sent++;
2577 }
2578 }
2579
2580 if (cnt != hdev->acl_cnt)
2581 hci_prio_recalculate(hdev, ACL_LINK);
2582 }
2583
hci_sched_acl_blk(struct hci_dev * hdev)2584 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2585 {
2586 unsigned int cnt = hdev->block_cnt;
2587 struct hci_chan *chan;
2588 struct sk_buff *skb;
2589 int quote;
2590
2591 __check_timeout(hdev, cnt);
2592
2593 while (hdev->block_cnt > 0 &&
2594 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2595 u32 priority = (skb_peek(&chan->data_q))->priority;
2596 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2597 int blocks;
2598
2599 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2600 skb->len, skb->priority);
2601
2602 /* Stop if priority has changed */
2603 if (skb->priority < priority)
2604 break;
2605
2606 skb = skb_dequeue(&chan->data_q);
2607
2608 blocks = __get_blocks(hdev, skb);
2609 if (blocks > hdev->block_cnt)
2610 return;
2611
2612 hci_conn_enter_active_mode(chan->conn,
2613 bt_cb(skb)->force_active);
2614
2615 hci_send_frame(skb);
2616 hdev->acl_last_tx = jiffies;
2617
2618 hdev->block_cnt -= blocks;
2619 quote -= blocks;
2620
2621 chan->sent += blocks;
2622 chan->conn->sent += blocks;
2623 }
2624 }
2625
2626 if (cnt != hdev->block_cnt)
2627 hci_prio_recalculate(hdev, ACL_LINK);
2628 }
2629
hci_sched_acl(struct hci_dev * hdev)2630 static inline void hci_sched_acl(struct hci_dev *hdev)
2631 {
2632 BT_DBG("%s", hdev->name);
2633
2634 if (!hci_conn_num(hdev, ACL_LINK))
2635 return;
2636
2637 switch (hdev->flow_ctl_mode) {
2638 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2639 hci_sched_acl_pkt(hdev);
2640 break;
2641
2642 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2643 hci_sched_acl_blk(hdev);
2644 break;
2645 }
2646 }
2647
2648 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)2649 static inline void hci_sched_sco(struct hci_dev *hdev)
2650 {
2651 struct hci_conn *conn;
2652 struct sk_buff *skb;
2653 int quote;
2654
2655 BT_DBG("%s", hdev->name);
2656
2657 if (!hci_conn_num(hdev, SCO_LINK))
2658 return;
2659
2660 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
2661 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2662 BT_DBG("skb %p len %d", skb, skb->len);
2663 hci_send_frame(skb);
2664
2665 conn->sent++;
2666 if (conn->sent == ~0)
2667 conn->sent = 0;
2668 }
2669 }
2670 }
2671
hci_sched_esco(struct hci_dev * hdev)2672 static inline void hci_sched_esco(struct hci_dev *hdev)
2673 {
2674 struct hci_conn *conn;
2675 struct sk_buff *skb;
2676 int quote;
2677
2678 BT_DBG("%s", hdev->name);
2679
2680 if (!hci_conn_num(hdev, ESCO_LINK))
2681 return;
2682
2683 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
2684 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2685 BT_DBG("skb %p len %d", skb, skb->len);
2686 hci_send_frame(skb);
2687
2688 conn->sent++;
2689 if (conn->sent == ~0)
2690 conn->sent = 0;
2691 }
2692 }
2693 }
2694
hci_sched_le(struct hci_dev * hdev)2695 static inline void hci_sched_le(struct hci_dev *hdev)
2696 {
2697 struct hci_chan *chan;
2698 struct sk_buff *skb;
2699 int quote, cnt, tmp;
2700
2701 BT_DBG("%s", hdev->name);
2702
2703 if (!hci_conn_num(hdev, LE_LINK))
2704 return;
2705
2706 if (!test_bit(HCI_RAW, &hdev->flags)) {
2707 /* LE tx timeout must be longer than maximum
2708 * link supervision timeout (40.9 seconds) */
2709 if (!hdev->le_cnt && hdev->le_pkts &&
2710 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2711 hci_link_tx_to(hdev, LE_LINK);
2712 }
2713
2714 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2715 tmp = cnt;
2716 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
2717 u32 priority = (skb_peek(&chan->data_q))->priority;
2718 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2719 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2720 skb->len, skb->priority);
2721
2722 /* Stop if priority has changed */
2723 if (skb->priority < priority)
2724 break;
2725
2726 skb = skb_dequeue(&chan->data_q);
2727
2728 hci_send_frame(skb);
2729 hdev->le_last_tx = jiffies;
2730
2731 cnt--;
2732 chan->sent++;
2733 chan->conn->sent++;
2734 }
2735 }
2736
2737 if (hdev->le_pkts)
2738 hdev->le_cnt = cnt;
2739 else
2740 hdev->acl_cnt = cnt;
2741
2742 if (cnt != tmp)
2743 hci_prio_recalculate(hdev, LE_LINK);
2744 }
2745
hci_tx_work(struct work_struct * work)2746 static void hci_tx_work(struct work_struct *work)
2747 {
2748 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2749 struct sk_buff *skb;
2750
2751 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2752 hdev->sco_cnt, hdev->le_cnt);
2753
2754 /* Schedule queues and send stuff to HCI driver */
2755
2756 hci_sched_acl(hdev);
2757
2758 hci_sched_sco(hdev);
2759
2760 hci_sched_esco(hdev);
2761
2762 hci_sched_le(hdev);
2763
2764 /* Send next queued raw (unknown type) packet */
2765 while ((skb = skb_dequeue(&hdev->raw_q)))
2766 hci_send_frame(skb);
2767 }
2768
2769 /* ----- HCI RX task (incoming data processing) ----- */
2770
2771 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)2772 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2773 {
2774 struct hci_acl_hdr *hdr = (void *) skb->data;
2775 struct hci_conn *conn;
2776 __u16 handle, flags;
2777
2778 skb_pull(skb, HCI_ACL_HDR_SIZE);
2779
2780 handle = __le16_to_cpu(hdr->handle);
2781 flags = hci_flags(handle);
2782 handle = hci_handle(handle);
2783
2784 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2785
2786 hdev->stat.acl_rx++;
2787
2788 hci_dev_lock(hdev);
2789 conn = hci_conn_hash_lookup_handle(hdev, handle);
2790 hci_dev_unlock(hdev);
2791
2792 if (conn) {
2793 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2794
2795 hci_dev_lock(hdev);
2796 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2797 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2798 mgmt_device_connected(hdev, &conn->dst, conn->type,
2799 conn->dst_type, 0, NULL, 0,
2800 conn->dev_class);
2801 hci_dev_unlock(hdev);
2802
2803 /* Send to upper protocol */
2804 l2cap_recv_acldata(conn, skb, flags);
2805 return;
2806 } else {
2807 BT_ERR("%s ACL packet for unknown connection handle %d",
2808 hdev->name, handle);
2809 }
2810
2811 kfree_skb(skb);
2812 }
2813
2814 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)2815 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2816 {
2817 struct hci_sco_hdr *hdr = (void *) skb->data;
2818 struct hci_conn *conn;
2819 __u16 handle;
2820
2821 skb_pull(skb, HCI_SCO_HDR_SIZE);
2822
2823 handle = __le16_to_cpu(hdr->handle);
2824
2825 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2826
2827 hdev->stat.sco_rx++;
2828
2829 hci_dev_lock(hdev);
2830 conn = hci_conn_hash_lookup_handle(hdev, handle);
2831 hci_dev_unlock(hdev);
2832
2833 if (conn) {
2834 /* Send to upper protocol */
2835 sco_recv_scodata(conn, skb);
2836 return;
2837 } else {
2838 BT_ERR("%s SCO packet for unknown connection handle %d",
2839 hdev->name, handle);
2840 }
2841
2842 kfree_skb(skb);
2843 }
2844
hci_rx_work(struct work_struct * work)2845 static void hci_rx_work(struct work_struct *work)
2846 {
2847 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2848 struct sk_buff *skb;
2849
2850 BT_DBG("%s", hdev->name);
2851
2852 while ((skb = skb_dequeue(&hdev->rx_q))) {
2853 /* Send copy to monitor */
2854 hci_send_to_monitor(hdev, skb);
2855
2856 if (atomic_read(&hdev->promisc)) {
2857 /* Send copy to the sockets */
2858 hci_send_to_sock(hdev, skb);
2859 }
2860
2861 if (test_bit(HCI_RAW, &hdev->flags)) {
2862 kfree_skb(skb);
2863 continue;
2864 }
2865
2866 if (test_bit(HCI_INIT, &hdev->flags)) {
2867 /* Don't process data packets in this states. */
2868 switch (bt_cb(skb)->pkt_type) {
2869 case HCI_ACLDATA_PKT:
2870 case HCI_SCODATA_PKT:
2871 kfree_skb(skb);
2872 continue;
2873 }
2874 }
2875
2876 /* Process frame */
2877 switch (bt_cb(skb)->pkt_type) {
2878 case HCI_EVENT_PKT:
2879 BT_DBG("%s Event packet", hdev->name);
2880 hci_event_packet(hdev, skb);
2881 break;
2882
2883 case HCI_ACLDATA_PKT:
2884 BT_DBG("%s ACL data packet", hdev->name);
2885 hci_acldata_packet(hdev, skb);
2886 break;
2887
2888 case HCI_SCODATA_PKT:
2889 BT_DBG("%s SCO data packet", hdev->name);
2890 hci_scodata_packet(hdev, skb);
2891 break;
2892
2893 default:
2894 kfree_skb(skb);
2895 break;
2896 }
2897 }
2898 }
2899
hci_cmd_work(struct work_struct * work)2900 static void hci_cmd_work(struct work_struct *work)
2901 {
2902 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2903 struct sk_buff *skb;
2904
2905 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2906
2907 /* Send queued commands */
2908 if (atomic_read(&hdev->cmd_cnt)) {
2909 skb = skb_dequeue(&hdev->cmd_q);
2910 if (!skb)
2911 return;
2912
2913 kfree_skb(hdev->sent_cmd);
2914
2915 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2916 if (hdev->sent_cmd) {
2917 atomic_dec(&hdev->cmd_cnt);
2918 hci_send_frame(skb);
2919 if (test_bit(HCI_RESET, &hdev->flags))
2920 del_timer(&hdev->cmd_timer);
2921 else
2922 mod_timer(&hdev->cmd_timer,
2923 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2924 } else {
2925 skb_queue_head(&hdev->cmd_q, skb);
2926 queue_work(hdev->workqueue, &hdev->cmd_work);
2927 }
2928 }
2929 }
2930
hci_do_inquiry(struct hci_dev * hdev,u8 length)2931 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2932 {
2933 /* General inquiry access code (GIAC) */
2934 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2935 struct hci_cp_inquiry cp;
2936
2937 BT_DBG("%s", hdev->name);
2938
2939 if (test_bit(HCI_INQUIRY, &hdev->flags))
2940 return -EINPROGRESS;
2941
2942 inquiry_cache_flush(hdev);
2943
2944 memset(&cp, 0, sizeof(cp));
2945 memcpy(&cp.lap, lap, sizeof(cp.lap));
2946 cp.length = length;
2947
2948 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2949 }
2950
hci_cancel_inquiry(struct hci_dev * hdev)2951 int hci_cancel_inquiry(struct hci_dev *hdev)
2952 {
2953 BT_DBG("%s", hdev->name);
2954
2955 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2956 return -EPERM;
2957
2958 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2959 }
2960