1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /*
26 * BlueZ HCI Core.
27 *
28 * $Id: hci_core.c,v 1.14 2002/08/26 16:57:57 maxk Exp $
29 */
30
31 #include <linux/config.h>
32 #include <linux/module.h>
33 #include <linux/kmod.h>
34
35 #include <linux/types.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/major.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/poll.h>
42 #include <linux/fcntl.h>
43 #include <linux/init.h>
44 #include <linux/skbuff.h>
45 #include <linux/interrupt.h>
46 #include <linux/notifier.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <asm/uaccess.h>
51 #include <asm/unaligned.h>
52
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
55
56 #ifndef HCI_CORE_DEBUG
57 #undef BT_DBG
58 #define BT_DBG( A... )
59 #endif
60
61 static void hci_cmd_task(unsigned long arg);
62 static void hci_rx_task(unsigned long arg);
63 static void hci_tx_task(unsigned long arg);
64 static void hci_notify(struct hci_dev *hdev, int event);
65
66 rwlock_t hci_task_lock = RW_LOCK_UNLOCKED;
67
68 /* HCI device list */
69 LIST_HEAD(hdev_list);
70 rwlock_t hdev_list_lock = RW_LOCK_UNLOCKED;
71
72 /* HCI protocols */
73 #define HCI_MAX_PROTO 2
74 struct hci_proto *hci_proto[HCI_MAX_PROTO];
75
76 /* HCI notifiers list */
77 static struct notifier_block *hci_notifier;
78
79
80 /* ---- HCI notifications ---- */
81
hci_register_notifier(struct notifier_block * nb)82 int hci_register_notifier(struct notifier_block *nb)
83 {
84 return notifier_chain_register(&hci_notifier, nb);
85 }
86
hci_unregister_notifier(struct notifier_block * nb)87 int hci_unregister_notifier(struct notifier_block *nb)
88 {
89 return notifier_chain_unregister(&hci_notifier, nb);
90 }
91
hci_notify(struct hci_dev * hdev,int event)92 void hci_notify(struct hci_dev *hdev, int event)
93 {
94 notifier_call_chain(&hci_notifier, event, hdev);
95 }
96
97 /* ---- HCI hotplug support ---- */
98
99 #ifdef CONFIG_HOTPLUG
100
hci_run_hotplug(char * dev,char * action)101 static int hci_run_hotplug(char *dev, char *action)
102 {
103 char *argv[3], *envp[5], dstr[20], astr[32];
104
105 sprintf(dstr, "DEVICE=%s", dev);
106 sprintf(astr, "ACTION=%s", action);
107
108 argv[0] = hotplug_path;
109 argv[1] = "bluetooth";
110 argv[2] = NULL;
111
112 envp[0] = "HOME=/";
113 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
114 envp[2] = dstr;
115 envp[3] = astr;
116 envp[4] = NULL;
117
118 return call_usermodehelper(argv[0], argv, envp);
119 }
120 #else
121 #define hci_run_hotplug(A...)
122 #endif
123
124 /* ---- HCI requests ---- */
125
hci_req_complete(struct hci_dev * hdev,int result)126 void hci_req_complete(struct hci_dev *hdev, int result)
127 {
128 BT_DBG("%s result 0x%2.2x", hdev->name, result);
129
130 if (hdev->req_status == HCI_REQ_PEND) {
131 hdev->req_result = result;
132 hdev->req_status = HCI_REQ_DONE;
133 wake_up_interruptible(&hdev->req_wait_q);
134 }
135 }
136
hci_req_cancel(struct hci_dev * hdev,int err)137 void hci_req_cancel(struct hci_dev *hdev, int err)
138 {
139 BT_DBG("%s err 0x%2.2x", hdev->name, err);
140
141 if (hdev->req_status == HCI_REQ_PEND) {
142 hdev->req_result = err;
143 hdev->req_status = HCI_REQ_CANCELED;
144 wake_up_interruptible(&hdev->req_wait_q);
145 }
146 }
147
148 /* Execute request and wait for completion. */
__hci_request(struct hci_dev * hdev,void (* req)(struct hci_dev * hdev,unsigned long opt),unsigned long opt,__u32 timeout)149 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), unsigned long opt, __u32 timeout)
150 {
151 DECLARE_WAITQUEUE(wait, current);
152 int err = 0;
153
154 BT_DBG("%s start", hdev->name);
155
156 hdev->req_status = HCI_REQ_PEND;
157
158 add_wait_queue(&hdev->req_wait_q, &wait);
159 set_current_state(TASK_INTERRUPTIBLE);
160
161 req(hdev, opt);
162 schedule_timeout(timeout);
163
164 set_current_state(TASK_RUNNING);
165 remove_wait_queue(&hdev->req_wait_q, &wait);
166
167 if (signal_pending(current))
168 return -EINTR;
169
170 switch (hdev->req_status) {
171 case HCI_REQ_DONE:
172 err = -bterr(hdev->req_result);
173 break;
174
175 case HCI_REQ_CANCELED:
176 err = -hdev->req_result;
177 break;
178
179 default:
180 err = -ETIMEDOUT;
181 break;
182 };
183
184 hdev->req_status = hdev->req_result = 0;
185
186 BT_DBG("%s end: err %d", hdev->name, err);
187
188 return err;
189 }
190
hci_request(struct hci_dev * hdev,void (* req)(struct hci_dev * hdev,unsigned long opt),unsigned long opt,__u32 timeout)191 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
192 unsigned long opt, __u32 timeout)
193 {
194 int ret;
195
196 /* Serialize all requests */
197 hci_req_lock(hdev);
198 ret = __hci_request(hdev, req, opt, timeout);
199 hci_req_unlock(hdev);
200
201 return ret;
202 }
203
hci_reset_req(struct hci_dev * hdev,unsigned long opt)204 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
205 {
206 BT_DBG("%s %ld", hdev->name, opt);
207
208 /* Reset device */
209 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
210 }
211
hci_init_req(struct hci_dev * hdev,unsigned long opt)212 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
213 {
214 set_event_flt_cp ef;
215 __u16 param;
216
217 BT_DBG("%s %ld", hdev->name, opt);
218
219 /* Mandatory initialization */
220
221 /* Reset */
222 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
223 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
224
225 /* Read Local Supported Features */
226 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
227
228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
229 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
230
231 #if 0
232 /* Host buffer size */
233 {
234 host_buffer_size_cp bs;
235 bs.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
236 bs.sco_mtu = HCI_MAX_SCO_SIZE;
237 bs.acl_max_pkt = __cpu_to_le16(0xffff);
238 bs.sco_max_pkt = __cpu_to_le16(0xffff);
239 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE,
240 HOST_BUFFER_SIZE_CP_SIZE, &bs);
241 }
242 #endif
243
244 /* Read BD Address */
245 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
246
247 /* Optional initialization */
248
249 /* Clear Event Filters */
250 ef.flt_type = FLT_CLEAR_ALL;
251 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, 1, &ef);
252
253 /* Page timeout ~20 secs */
254 param = __cpu_to_le16(0x8000);
255 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, ¶m);
256
257 /* Connection accept timeout ~20 secs */
258 param = __cpu_to_le16(0x7d00);
259 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, ¶m);
260 }
261
hci_scan_req(struct hci_dev * hdev,unsigned long opt)262 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
263 {
264 __u8 scan = opt;
265
266 BT_DBG("%s %x", hdev->name, scan);
267
268 /* Inquiry and Page scans */
269 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
270 }
271
hci_auth_req(struct hci_dev * hdev,unsigned long opt)272 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
273 {
274 __u8 auth = opt;
275
276 BT_DBG("%s %x", hdev->name, auth);
277
278 /* Authentication */
279 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
280 }
281
hci_encrypt_req(struct hci_dev * hdev,unsigned long opt)282 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
283 {
284 __u8 encrypt = opt;
285
286 BT_DBG("%s %x", hdev->name, encrypt);
287
288 /* Authentication */
289 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
290 }
291
292 /* Get HCI device by index.
293 * Device is locked on return. */
hci_dev_get(int index)294 struct hci_dev *hci_dev_get(int index)
295 {
296 struct hci_dev *hdev;
297 struct list_head *p;
298
299 BT_DBG("%d", index);
300
301 if (index < 0)
302 return NULL;
303
304 read_lock(&hdev_list_lock);
305 list_for_each(p, &hdev_list) {
306 hdev = list_entry(p, struct hci_dev, list);
307 if (hdev->id == index) {
308 hci_dev_hold(hdev);
309 goto done;
310 }
311 }
312 hdev = NULL;
313 done:
314 read_unlock(&hdev_list_lock);
315 return hdev;
316 }
317
318 /* ---- Inquiry support ---- */
inquiry_cache_flush(struct hci_dev * hdev)319 void inquiry_cache_flush(struct hci_dev *hdev)
320 {
321 struct inquiry_cache *cache = &hdev->inq_cache;
322 struct inquiry_entry *next = cache->list, *e;
323
324 BT_DBG("cache %p", cache);
325
326 cache->list = NULL;
327 while ((e = next)) {
328 next = e->next;
329 kfree(e);
330 }
331 }
332
inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)333 struct inquiry_entry *inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
334 {
335 struct inquiry_cache *cache = &hdev->inq_cache;
336 struct inquiry_entry *e;
337
338 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
339
340 for (e = cache->list; e; e = e->next)
341 if (!bacmp(&e->info.bdaddr, bdaddr))
342 break;
343 return e;
344 }
345
inquiry_cache_update(struct hci_dev * hdev,inquiry_info * info)346 void inquiry_cache_update(struct hci_dev *hdev, inquiry_info *info)
347 {
348 struct inquiry_cache *cache = &hdev->inq_cache;
349 struct inquiry_entry *e;
350
351 BT_DBG("cache %p, %s", cache, batostr(&info->bdaddr));
352
353 if (!(e = inquiry_cache_lookup(hdev, &info->bdaddr))) {
354 /* Entry not in the cache. Add new one. */
355 if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
356 return;
357 memset(e, 0, sizeof(struct inquiry_entry));
358 e->next = cache->list;
359 cache->list = e;
360 }
361
362 memcpy(&e->info, info, sizeof(inquiry_info));
363 e->timestamp = jiffies;
364 cache->timestamp = jiffies;
365 }
366
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)367 int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
368 {
369 struct inquiry_cache *cache = &hdev->inq_cache;
370 inquiry_info *info = (inquiry_info *) buf;
371 struct inquiry_entry *e;
372 int copied = 0;
373
374 for (e = cache->list; e && copied < num; e = e->next, copied++)
375 memcpy(info++, &e->info, sizeof(inquiry_info));
376
377 BT_DBG("cache %p, copied %d", cache, copied);
378 return copied;
379 }
380
hci_inq_req(struct hci_dev * hdev,unsigned long opt)381 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
382 {
383 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
384 inquiry_cp ic;
385
386 BT_DBG("%s", hdev->name);
387
388 if (test_bit(HCI_INQUIRY, &hdev->flags))
389 return;
390
391 /* Start Inquiry */
392 memcpy(&ic.lap, &ir->lap, 3);
393 ic.length = ir->length;
394 ic.num_rsp = ir->num_rsp;
395 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, INQUIRY_CP_SIZE, &ic);
396 }
397
hci_inquiry(unsigned long arg)398 int hci_inquiry(unsigned long arg)
399 {
400 struct hci_inquiry_req ir;
401 struct hci_dev *hdev;
402 int err = 0, do_inquiry = 0, max_rsp;
403 long timeo;
404 __u8 *buf, *ptr;
405
406 ptr = (void *) arg;
407 if (copy_from_user(&ir, ptr, sizeof(ir)))
408 return -EFAULT;
409
410 if (!(hdev = hci_dev_get(ir.dev_id)))
411 return -ENODEV;
412
413 hci_dev_lock_bh(hdev);
414 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
415 inquiry_cache_empty(hdev) ||
416 ir.flags & IREQ_CACHE_FLUSH) {
417 inquiry_cache_flush(hdev);
418 do_inquiry = 1;
419 }
420 hci_dev_unlock_bh(hdev);
421
422 timeo = ir.length * 2 * HZ;
423 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
424 goto done;
425
426 /* for unlimited number of responses we will use buffer with 255 entries */
427 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
428
429 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
430 * copy it to the user space.
431 */
432 if (!(buf = kmalloc(sizeof(inquiry_info) * max_rsp, GFP_KERNEL))) {
433 err = -ENOMEM;
434 goto done;
435 }
436
437 hci_dev_lock_bh(hdev);
438 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
439 hci_dev_unlock_bh(hdev);
440
441 BT_DBG("num_rsp %d", ir.num_rsp);
442
443 if (!verify_area(VERIFY_WRITE, ptr, sizeof(ir) +
444 (sizeof(inquiry_info) * ir.num_rsp))) {
445 copy_to_user(ptr, &ir, sizeof(ir));
446 ptr += sizeof(ir);
447 copy_to_user(ptr, buf, sizeof(inquiry_info) * ir.num_rsp);
448 } else
449 err = -EFAULT;
450
451 kfree(buf);
452
453 done:
454 hci_dev_put(hdev);
455 return err;
456 }
457
458 /* ---- HCI ioctl helpers ---- */
459
hci_dev_open(__u16 dev)460 int hci_dev_open(__u16 dev)
461 {
462 struct hci_dev *hdev;
463 int ret = 0;
464
465 if (!(hdev = hci_dev_get(dev)))
466 return -ENODEV;
467
468 BT_DBG("%s %p", hdev->name, hdev);
469
470 hci_req_lock(hdev);
471
472 if (test_bit(HCI_UP, &hdev->flags)) {
473 ret = -EALREADY;
474 goto done;
475 }
476
477 if (hdev->open(hdev)) {
478 ret = -EIO;
479 goto done;
480 }
481
482 if (!test_bit(HCI_RAW, &hdev->flags)) {
483 atomic_set(&hdev->cmd_cnt, 1);
484 set_bit(HCI_INIT, &hdev->flags);
485
486 //__hci_request(hdev, hci_reset_req, 0, HZ);
487 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
488
489 clear_bit(HCI_INIT, &hdev->flags);
490 }
491
492 if (!ret) {
493 set_bit(HCI_UP, &hdev->flags);
494 hci_notify(hdev, HCI_DEV_UP);
495 } else {
496 /* Init failed, cleanup */
497 tasklet_kill(&hdev->rx_task);
498 tasklet_kill(&hdev->tx_task);
499 tasklet_kill(&hdev->cmd_task);
500
501 skb_queue_purge(&hdev->cmd_q);
502 skb_queue_purge(&hdev->rx_q);
503
504 if (hdev->flush)
505 hdev->flush(hdev);
506
507 if (hdev->sent_cmd) {
508 kfree_skb(hdev->sent_cmd);
509 hdev->sent_cmd = NULL;
510 }
511
512 hdev->close(hdev);
513 hdev->flags = 0;
514 }
515
516 done:
517 hci_req_unlock(hdev);
518 hci_dev_put(hdev);
519 return ret;
520 }
521
hci_dev_do_close(struct hci_dev * hdev)522 static int hci_dev_do_close(struct hci_dev *hdev)
523 {
524 BT_DBG("%s %p", hdev->name, hdev);
525
526 hci_req_cancel(hdev, ENODEV);
527 hci_req_lock(hdev);
528
529 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
530 hci_req_unlock(hdev);
531 return 0;
532 }
533
534 /* Kill RX and TX tasks */
535 tasklet_kill(&hdev->rx_task);
536 tasklet_kill(&hdev->tx_task);
537
538 hci_dev_lock_bh(hdev);
539 inquiry_cache_flush(hdev);
540 hci_conn_hash_flush(hdev);
541 hci_dev_unlock_bh(hdev);
542
543 hci_notify(hdev, HCI_DEV_DOWN);
544
545 if (hdev->flush)
546 hdev->flush(hdev);
547
548 /* Reset device */
549 skb_queue_purge(&hdev->cmd_q);
550 atomic_set(&hdev->cmd_cnt, 1);
551 set_bit(HCI_INIT, &hdev->flags);
552 __hci_request(hdev, hci_reset_req, 0, HZ/4);
553 clear_bit(HCI_INIT, &hdev->flags);
554
555 /* Kill cmd task */
556 tasklet_kill(&hdev->cmd_task);
557
558 /* Drop queues */
559 skb_queue_purge(&hdev->rx_q);
560 skb_queue_purge(&hdev->cmd_q);
561 skb_queue_purge(&hdev->raw_q);
562
563 /* Drop last sent command */
564 if (hdev->sent_cmd) {
565 kfree_skb(hdev->sent_cmd);
566 hdev->sent_cmd = NULL;
567 }
568
569 /* After this point our queues are empty
570 * and no tasks are scheduled. */
571 hdev->close(hdev);
572
573 /* Clear flags */
574 hdev->flags = 0;
575
576 hci_req_unlock(hdev);
577 return 0;
578 }
579
hci_dev_close(__u16 dev)580 int hci_dev_close(__u16 dev)
581 {
582 struct hci_dev *hdev;
583 int err;
584
585 if (!(hdev = hci_dev_get(dev)))
586 return -ENODEV;
587 err = hci_dev_do_close(hdev);
588 hci_dev_put(hdev);
589 return err;
590 }
591
hci_dev_reset(__u16 dev)592 int hci_dev_reset(__u16 dev)
593 {
594 struct hci_dev *hdev;
595 int ret = 0;
596
597 if (!(hdev = hci_dev_get(dev)))
598 return -ENODEV;
599
600 hci_req_lock(hdev);
601 tasklet_disable(&hdev->tx_task);
602
603 if (!test_bit(HCI_UP, &hdev->flags))
604 goto done;
605
606 /* Drop queues */
607 skb_queue_purge(&hdev->rx_q);
608 skb_queue_purge(&hdev->cmd_q);
609
610 hci_dev_lock_bh(hdev);
611 inquiry_cache_flush(hdev);
612 hci_conn_hash_flush(hdev);
613 hci_dev_unlock_bh(hdev);
614
615 if (hdev->flush)
616 hdev->flush(hdev);
617
618 atomic_set(&hdev->cmd_cnt, 1);
619 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
620
621 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
622
623 done:
624 tasklet_enable(&hdev->tx_task);
625 hci_req_unlock(hdev);
626 hci_dev_put(hdev);
627 return ret;
628 }
629
hci_dev_reset_stat(__u16 dev)630 int hci_dev_reset_stat(__u16 dev)
631 {
632 struct hci_dev *hdev;
633 int ret = 0;
634
635 if (!(hdev = hci_dev_get(dev)))
636 return -ENODEV;
637
638 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
639
640 hci_dev_put(hdev);
641
642 return ret;
643 }
644
hci_dev_cmd(unsigned int cmd,unsigned long arg)645 int hci_dev_cmd(unsigned int cmd, unsigned long arg)
646 {
647 struct hci_dev *hdev;
648 struct hci_dev_req dr;
649 int err = 0;
650
651 if (copy_from_user(&dr, (void *) arg, sizeof(dr)))
652 return -EFAULT;
653
654 if (!(hdev = hci_dev_get(dr.dev_id)))
655 return -ENODEV;
656
657 switch (cmd) {
658 case HCISETAUTH:
659 err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
660 break;
661
662 case HCISETENCRYPT:
663 if (!lmp_encrypt_capable(hdev)) {
664 err = -EOPNOTSUPP;
665 break;
666 }
667
668 if (!test_bit(HCI_AUTH, &hdev->flags)) {
669 /* Auth must be enabled first */
670 err = hci_request(hdev, hci_auth_req,
671 dr.dev_opt, HCI_INIT_TIMEOUT);
672 if (err)
673 break;
674 }
675
676 err = hci_request(hdev, hci_encrypt_req,
677 dr.dev_opt, HCI_INIT_TIMEOUT);
678 break;
679
680 case HCISETSCAN:
681 err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
682 break;
683
684 case HCISETPTYPE:
685 hdev->pkt_type = (__u16) dr.dev_opt;
686 break;
687
688 case HCISETLINKPOL:
689 hdev->link_policy = (__u16) dr.dev_opt;
690 break;
691
692 case HCISETLINKMODE:
693 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
694 break;
695
696 case HCISETACLMTU:
697 hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1);
698 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
699 break;
700
701 case HCISETSCOMTU:
702 hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1);
703 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
704 break;
705
706 default:
707 err = -EINVAL;
708 break;
709 }
710 hci_dev_put(hdev);
711 return err;
712 }
713
hci_get_dev_list(unsigned long arg)714 int hci_get_dev_list(unsigned long arg)
715 {
716 struct hci_dev_list_req *dl;
717 struct hci_dev_req *dr;
718 struct list_head *p;
719 int n = 0, size, err;
720 __u16 dev_num;
721
722 if (get_user(dev_num, (__u16 *) arg))
723 return -EFAULT;
724
725 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
726 return -EINVAL;
727
728 size = sizeof(*dl) + dev_num * sizeof(*dr);
729
730 if (!(dl = kmalloc(size, GFP_KERNEL)))
731 return -ENOMEM;
732
733 dr = dl->dev_req;
734
735 read_lock_bh(&hdev_list_lock);
736 list_for_each(p, &hdev_list) {
737 struct hci_dev *hdev;
738 hdev = list_entry(p, struct hci_dev, list);
739 (dr + n)->dev_id = hdev->id;
740 (dr + n)->dev_opt = hdev->flags;
741 if (++n >= dev_num)
742 break;
743 }
744 read_unlock_bh(&hdev_list_lock);
745
746 dl->dev_num = n;
747 size = sizeof(*dl) + n * sizeof(*dr);
748
749 err = copy_to_user((void *) arg, dl, size);
750 kfree(dl);
751
752 return err ? -EFAULT : 0;
753 }
754
hci_get_dev_info(unsigned long arg)755 int hci_get_dev_info(unsigned long arg)
756 {
757 struct hci_dev *hdev;
758 struct hci_dev_info di;
759 int err = 0;
760
761 if (copy_from_user(&di, (void *) arg, sizeof(di)))
762 return -EFAULT;
763
764 if (!(hdev = hci_dev_get(di.dev_id)))
765 return -ENODEV;
766
767 strcpy(di.name, hdev->name);
768 di.bdaddr = hdev->bdaddr;
769 di.type = hdev->type;
770 di.flags = hdev->flags;
771 di.pkt_type = hdev->pkt_type;
772 di.acl_mtu = hdev->acl_mtu;
773 di.acl_pkts = hdev->acl_pkts;
774 di.sco_mtu = hdev->sco_mtu;
775 di.sco_pkts = hdev->sco_pkts;
776 di.link_policy = hdev->link_policy;
777 di.link_mode = hdev->link_mode;
778
779 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
780 memcpy(&di.features, &hdev->features, sizeof(di.features));
781
782 if (copy_to_user((void *) arg, &di, sizeof(di)))
783 err = -EFAULT;
784
785 hci_dev_put(hdev);
786
787 return err;
788 }
789
790
791 /* ---- Interface to HCI drivers ---- */
792
793 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)794 int hci_register_dev(struct hci_dev *hdev)
795 {
796 struct list_head *head = &hdev_list, *p;
797 int id = 0;
798
799 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
800
801 if (!hdev->open || !hdev->close || !hdev->destruct)
802 return -EINVAL;
803
804 write_lock_bh(&hdev_list_lock);
805
806 /* Find first available device id */
807 list_for_each(p, &hdev_list) {
808 if (list_entry(p, struct hci_dev, list)->id != id)
809 break;
810 head = p; id++;
811 }
812
813 sprintf(hdev->name, "hci%d", id);
814 hdev->id = id;
815 list_add(&hdev->list, head);
816
817 atomic_set(&hdev->refcnt, 1);
818 spin_lock_init(&hdev->lock);
819
820 hdev->flags = 0;
821 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
822 hdev->link_mode = (HCI_LM_ACCEPT);
823
824 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
825 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
826 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
827
828 skb_queue_head_init(&hdev->rx_q);
829 skb_queue_head_init(&hdev->cmd_q);
830 skb_queue_head_init(&hdev->raw_q);
831
832 init_waitqueue_head(&hdev->req_wait_q);
833 init_MUTEX(&hdev->req_lock);
834
835 inquiry_cache_init(hdev);
836
837 conn_hash_init(hdev);
838
839 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
840
841 atomic_set(&hdev->promisc, 0);
842
843 MOD_INC_USE_COUNT;
844
845 write_unlock_bh(&hdev_list_lock);
846
847 hci_notify(hdev, HCI_DEV_REG);
848 hci_run_hotplug(hdev->name, "register");
849
850 return id;
851 }
852
853 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)854 int hci_unregister_dev(struct hci_dev *hdev)
855 {
856 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
857
858 write_lock_bh(&hdev_list_lock);
859 list_del(&hdev->list);
860 write_unlock_bh(&hdev_list_lock);
861
862 hci_dev_do_close(hdev);
863
864 hci_notify(hdev, HCI_DEV_UNREG);
865 hci_run_hotplug(hdev->name, "unregister");
866
867 hci_dev_put(hdev);
868
869 MOD_DEC_USE_COUNT;
870 return 0;
871 }
872
873 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)874 int hci_suspend_dev(struct hci_dev *hdev)
875 {
876 hci_notify(hdev, HCI_DEV_SUSPEND);
877 hci_run_hotplug(hdev->name, "suspend");
878 return 0;
879 }
880
881 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)882 int hci_resume_dev(struct hci_dev *hdev)
883 {
884 hci_notify(hdev, HCI_DEV_RESUME);
885 hci_run_hotplug(hdev->name, "resume");
886 return 0;
887 }
888
889 /* Receive frame from HCI drivers */
hci_recv_frame(struct sk_buff * skb)890 int hci_recv_frame(struct sk_buff *skb)
891 {
892 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
893
894 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) &&
895 !test_bit(HCI_INIT, &hdev->flags)) ) {
896 kfree_skb(skb);
897 return -1;
898 }
899
900 BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
901
902 /* Incomming skb */
903 bluez_cb(skb)->incomming = 1;
904
905 /* Time stamp */
906 do_gettimeofday(&skb->stamp);
907
908 /* Queue frame for rx task */
909 skb_queue_tail(&hdev->rx_q, skb);
910 hci_sched_rx(hdev);
911 return 0;
912 }
913
914 /* ---- Interface to upper protocols ---- */
915
916 /* Register/Unregister protocols.
917 * hci_task_lock is used to ensure that no tasks are running. */
hci_register_proto(struct hci_proto * hp)918 int hci_register_proto(struct hci_proto *hp)
919 {
920 int err = 0;
921
922 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
923
924 if (hp->id >= HCI_MAX_PROTO)
925 return -EINVAL;
926
927 write_lock_bh(&hci_task_lock);
928
929 if (!hci_proto[hp->id])
930 hci_proto[hp->id] = hp;
931 else
932 err = -EEXIST;
933
934 write_unlock_bh(&hci_task_lock);
935
936 return err;
937 }
938
hci_unregister_proto(struct hci_proto * hp)939 int hci_unregister_proto(struct hci_proto *hp)
940 {
941 int err = 0;
942
943 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
944
945 if (hp->id >= HCI_MAX_PROTO)
946 return -EINVAL;
947
948 write_lock_bh(&hci_task_lock);
949
950 if (hci_proto[hp->id])
951 hci_proto[hp->id] = NULL;
952 else
953 err = -ENOENT;
954
955 write_unlock_bh(&hci_task_lock);
956
957 return err;
958 }
959
hci_send_frame(struct sk_buff * skb)960 static int hci_send_frame(struct sk_buff *skb)
961 {
962 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
963
964 if (!hdev) {
965 kfree_skb(skb);
966 return -ENODEV;
967 }
968
969 BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
970
971 if (atomic_read(&hdev->promisc)) {
972 /* Time stamp */
973 do_gettimeofday(&skb->stamp);
974
975 hci_send_to_sock(hdev, skb);
976 }
977
978 /* Get rid of skb owner, prior to sending to the driver. */
979 skb_orphan(skb);
980
981 return hdev->send(skb);
982 }
983
984 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 ogf,__u16 ocf,__u32 plen,void * param)985 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
986 {
987 int len = HCI_COMMAND_HDR_SIZE + plen;
988 hci_command_hdr *hc;
989 struct sk_buff *skb;
990
991 BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
992
993 if (!(skb = bluez_skb_alloc(len, GFP_ATOMIC))) {
994 BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
995 return -ENOMEM;
996 }
997
998 hc = (hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
999 hc->opcode = __cpu_to_le16(cmd_opcode_pack(ogf, ocf));
1000 hc->plen = plen;
1001
1002 if (plen)
1003 memcpy(skb_put(skb, plen), param, plen);
1004
1005 BT_DBG("skb len %d", skb->len);
1006
1007 skb->pkt_type = HCI_COMMAND_PKT;
1008 skb->dev = (void *) hdev;
1009 skb_queue_tail(&hdev->cmd_q, skb);
1010 hci_sched_cmd(hdev);
1011
1012 return 0;
1013 }
1014
1015 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 ogf,__u16 ocf)1016 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
1017 {
1018 hci_command_hdr *hc;
1019
1020 if (!hdev->sent_cmd)
1021 return NULL;
1022
1023 hc = (void *) hdev->sent_cmd->data;
1024
1025 if (hc->opcode != __cpu_to_le16(cmd_opcode_pack(ogf, ocf)))
1026 return NULL;
1027
1028 BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1029
1030 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1031 }
1032
1033 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)1034 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1035 {
1036 int len = skb->len;
1037 hci_acl_hdr *ah;
1038
1039 ah = (hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
1040 ah->handle = __cpu_to_le16(acl_handle_pack(handle, flags));
1041 ah->dlen = __cpu_to_le16(len);
1042
1043 skb->h.raw = (void *) ah;
1044 }
1045
hci_send_acl(struct hci_conn * conn,struct sk_buff * skb,__u16 flags)1046 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1047 {
1048 struct hci_dev *hdev = conn->hdev;
1049 struct sk_buff *list;
1050
1051 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1052
1053 skb->dev = (void *) hdev;
1054 skb->pkt_type = HCI_ACLDATA_PKT;
1055 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1056
1057 if (!(list = skb_shinfo(skb)->frag_list)) {
1058 /* Non fragmented */
1059 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1060
1061 skb_queue_tail(&conn->data_q, skb);
1062 } else {
1063 /* Fragmented */
1064 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1065
1066 skb_shinfo(skb)->frag_list = NULL;
1067
1068 /* Queue all fragments atomically */
1069 spin_lock_bh(&conn->data_q.lock);
1070
1071 __skb_queue_tail(&conn->data_q, skb);
1072 do {
1073 skb = list; list = list->next;
1074
1075 skb->dev = (void *) hdev;
1076 skb->pkt_type = HCI_ACLDATA_PKT;
1077 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1078
1079 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1080
1081 __skb_queue_tail(&conn->data_q, skb);
1082 } while (list);
1083
1084 spin_unlock_bh(&conn->data_q.lock);
1085 }
1086
1087 hci_sched_tx(hdev);
1088 return 0;
1089 }
1090
1091 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)1092 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1093 {
1094 struct hci_dev *hdev = conn->hdev;
1095 hci_sco_hdr hs;
1096
1097 BT_DBG("%s len %d", hdev->name, skb->len);
1098
1099 if (skb->len > hdev->sco_mtu) {
1100 kfree_skb(skb);
1101 return -EINVAL;
1102 }
1103
1104 hs.handle = __cpu_to_le16(conn->handle);
1105 hs.dlen = skb->len;
1106
1107 skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE);
1108 memcpy(skb->h.raw, &hs, HCI_SCO_HDR_SIZE);
1109
1110 skb->dev = (void *) hdev;
1111 skb->pkt_type = HCI_SCODATA_PKT;
1112 skb_queue_tail(&conn->data_q, skb);
1113 hci_sched_tx(hdev);
1114 return 0;
1115 }
1116
1117 /* ---- HCI TX task (outgoing data) ---- */
1118
1119 /* HCI Connection scheduler */
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)1120 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1121 {
1122 struct conn_hash *h = &hdev->conn_hash;
1123 struct hci_conn *conn = NULL;
1124 int num = 0, min = ~0;
1125 struct list_head *p;
1126
1127 /* We don't have to lock device here. Connections are always
1128 * added and removed with TX task disabled. */
1129 list_for_each(p, &h->list) {
1130 struct hci_conn *c;
1131 c = list_entry(p, struct hci_conn, list);
1132
1133 if (c->type != type || c->state != BT_CONNECTED
1134 || skb_queue_empty(&c->data_q))
1135 continue;
1136 num++;
1137
1138 if (c->sent < min) {
1139 min = c->sent;
1140 conn = c;
1141 }
1142 }
1143
1144 if (conn) {
1145 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1146 int q = cnt / num;
1147 *quote = q ? q : 1;
1148 } else
1149 *quote = 0;
1150
1151 BT_DBG("conn %p quote %d", conn, *quote);
1152 return conn;
1153 }
1154
hci_acl_tx_to(struct hci_dev * hdev)1155 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1156 {
1157 struct conn_hash *h = &hdev->conn_hash;
1158 struct list_head *p;
1159 struct hci_conn *c;
1160
1161 BT_ERR("%s ACL tx timeout", hdev->name);
1162
1163 /* Kill stalled connections */
1164 list_for_each(p, &h->list) {
1165 c = list_entry(p, struct hci_conn, list);
1166 if (c->type == ACL_LINK && c->sent) {
1167 BT_ERR("%s killing stalled ACL connection %s",
1168 hdev->name, batostr(&c->dst));
1169 hci_acl_disconn(c, 0x13);
1170 }
1171 }
1172 }
1173
hci_sched_acl(struct hci_dev * hdev)1174 static inline void hci_sched_acl(struct hci_dev *hdev)
1175 {
1176 struct hci_conn *conn;
1177 struct sk_buff *skb;
1178 int quote;
1179
1180 BT_DBG("%s", hdev->name);
1181
1182 /* ACL tx timeout must be longer than maximum
1183 * link supervision timeout (40.9 seconds) */
1184 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1185 hci_acl_tx_to(hdev);
1186
1187 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
1188 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1189 BT_DBG("skb %p len %d", skb, skb->len);
1190 hci_send_frame(skb);
1191 hdev->acl_last_tx = jiffies;
1192
1193 hdev->acl_cnt--;
1194 conn->sent++;
1195 }
1196 }
1197 }
1198
1199 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)1200 static inline void hci_sched_sco(struct hci_dev *hdev)
1201 {
1202 struct hci_conn *conn;
1203 struct sk_buff *skb;
1204 int quote;
1205
1206 BT_DBG("%s", hdev->name);
1207
1208 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
1209 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1210 BT_DBG("skb %p len %d", skb, skb->len);
1211 hci_send_frame(skb);
1212
1213 conn->sent++;
1214 if (conn->sent == ~0)
1215 conn->sent = 0;
1216 }
1217 }
1218 }
1219
hci_tx_task(unsigned long arg)1220 static void hci_tx_task(unsigned long arg)
1221 {
1222 struct hci_dev *hdev = (struct hci_dev *) arg;
1223 struct sk_buff *skb;
1224
1225 read_lock(&hci_task_lock);
1226
1227 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1228
1229 /* Schedule queues and send stuff to HCI driver */
1230
1231 hci_sched_acl(hdev);
1232
1233 hci_sched_sco(hdev);
1234
1235 /* Send next queued raw (unknown type) packet */
1236 while ((skb = skb_dequeue(&hdev->raw_q)))
1237 hci_send_frame(skb);
1238
1239 read_unlock(&hci_task_lock);
1240 }
1241
1242
1243 /* ----- HCI RX task (incomming data proccessing) ----- */
1244
1245 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)1246 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1247 {
1248 hci_acl_hdr *ah = (void *) skb->data;
1249 struct hci_conn *conn;
1250 __u16 handle, flags;
1251
1252 skb_pull(skb, HCI_ACL_HDR_SIZE);
1253
1254 handle = __le16_to_cpu(ah->handle);
1255 flags = acl_flags(handle);
1256 handle = acl_handle(handle);
1257
1258 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1259
1260 hdev->stat.acl_rx++;
1261
1262 hci_dev_lock(hdev);
1263 conn = conn_hash_lookup_handle(hdev, handle);
1264 hci_dev_unlock(hdev);
1265
1266 if (conn) {
1267 register struct hci_proto *hp;
1268
1269 /* Send to upper protocol */
1270 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1271 hp->recv_acldata(conn, skb, flags);
1272 return;
1273 }
1274 } else {
1275 BT_ERR("%s ACL packet for unknown connection handle %d",
1276 hdev->name, handle);
1277 }
1278
1279 kfree_skb(skb);
1280 }
1281
1282 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)1283 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1284 {
1285 hci_sco_hdr *sh = (void *) skb->data;
1286 struct hci_conn *conn;
1287 __u16 handle;
1288
1289 skb_pull(skb, HCI_SCO_HDR_SIZE);
1290
1291 handle = __le16_to_cpu(sh->handle);
1292
1293 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1294
1295 hdev->stat.sco_rx++;
1296
1297 hci_dev_lock(hdev);
1298 conn = conn_hash_lookup_handle(hdev, handle);
1299 hci_dev_unlock(hdev);
1300
1301 if (conn) {
1302 register struct hci_proto *hp;
1303
1304 /* Send to upper protocol */
1305 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1306 hp->recv_scodata(conn, skb);
1307 return;
1308 }
1309 } else {
1310 BT_ERR("%s SCO packet for unknown connection handle %d",
1311 hdev->name, handle);
1312 }
1313
1314 kfree_skb(skb);
1315 }
1316
hci_rx_task(unsigned long arg)1317 void hci_rx_task(unsigned long arg)
1318 {
1319 struct hci_dev *hdev = (struct hci_dev *) arg;
1320 struct sk_buff *skb;
1321
1322 BT_DBG("%s", hdev->name);
1323
1324 read_lock(&hci_task_lock);
1325
1326 while ((skb = skb_dequeue(&hdev->rx_q))) {
1327 if (atomic_read(&hdev->promisc)) {
1328 /* Send copy to the sockets */
1329 hci_send_to_sock(hdev, skb);
1330 }
1331
1332 if (test_bit(HCI_RAW, &hdev->flags)) {
1333 kfree_skb(skb);
1334 continue;
1335 }
1336
1337 if (test_bit(HCI_INIT, &hdev->flags)) {
1338 /* Don't process data packets in this states. */
1339 switch (skb->pkt_type) {
1340 case HCI_ACLDATA_PKT:
1341 case HCI_SCODATA_PKT:
1342 kfree_skb(skb);
1343 continue;
1344 };
1345 }
1346
1347 /* Process frame */
1348 switch (skb->pkt_type) {
1349 case HCI_EVENT_PKT:
1350 hci_event_packet(hdev, skb);
1351 break;
1352
1353 case HCI_ACLDATA_PKT:
1354 BT_DBG("%s ACL data packet", hdev->name);
1355 hci_acldata_packet(hdev, skb);
1356 break;
1357
1358 case HCI_SCODATA_PKT:
1359 BT_DBG("%s SCO data packet", hdev->name);
1360 hci_scodata_packet(hdev, skb);
1361 break;
1362
1363 default:
1364 kfree_skb(skb);
1365 break;
1366 }
1367 }
1368
1369 read_unlock(&hci_task_lock);
1370 }
1371
hci_cmd_task(unsigned long arg)1372 static void hci_cmd_task(unsigned long arg)
1373 {
1374 struct hci_dev *hdev = (struct hci_dev *) arg;
1375 struct sk_buff *skb;
1376
1377 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1378
1379 if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1380 BT_ERR("%s command tx timeout", hdev->name);
1381 atomic_set(&hdev->cmd_cnt, 1);
1382 }
1383
1384 /* Send queued commands */
1385 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1386 if (hdev->sent_cmd)
1387 kfree_skb(hdev->sent_cmd);
1388
1389 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1390 atomic_dec(&hdev->cmd_cnt);
1391 hci_send_frame(skb);
1392 hdev->cmd_last_tx = jiffies;
1393 } else {
1394 skb_queue_head(&hdev->cmd_q, skb);
1395 hci_sched_cmd(hdev);
1396 }
1397 }
1398 }
1399
1400 /* ---- Initialization ---- */
1401
hci_core_init(void)1402 int hci_core_init(void)
1403 {
1404 return 0;
1405 }
1406
hci_core_cleanup(void)1407 int hci_core_cleanup(void)
1408 {
1409 return 0;
1410 }
1411