1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/slab.h>
34 #include <linux/poll.h>
35 #include <linux/fcntl.h>
36 #include <linux/init.h>
37 #include <linux/skbuff.h>
38 #include <linux/workqueue.h>
39 #include <linux/interrupt.h>
40 #include <linux/compat.h>
41 #include <linux/socket.h>
42 #include <linux/ioctl.h>
43 #include <net/sock.h>
44
45 #include <linux/uaccess.h>
46 #include <asm/unaligned.h>
47
48 #include <net/bluetooth/bluetooth.h>
49 #include <net/bluetooth/hci_core.h>
50 #include <net/bluetooth/hci_mon.h>
51
52 static atomic_t monitor_promisc = ATOMIC_INIT(0);
53
54 /* ----- HCI socket interface ----- */
55
hci_test_bit(int nr,void * addr)56 static inline int hci_test_bit(int nr, void *addr)
57 {
58 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
59 }
60
61 /* Security filter */
62 static struct hci_sec_filter hci_sec_filter = {
63 /* Packet types */
64 0x10,
65 /* Events */
66 { 0x1000d9fe, 0x0000b00c },
67 /* Commands */
68 {
69 { 0x0 },
70 /* OGF_LINK_CTL */
71 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
72 /* OGF_LINK_POLICY */
73 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
74 /* OGF_HOST_CTL */
75 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
76 /* OGF_INFO_PARAM */
77 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
78 /* OGF_STATUS_PARAM */
79 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
80 }
81 };
82
83 static struct bt_sock_list hci_sk_list = {
84 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
85 };
86
87 /* Send frame to RAW socket */
hci_send_to_sock(struct hci_dev * hdev,struct sk_buff * skb)88 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
89 {
90 struct sock *sk;
91 struct hlist_node *node;
92 struct sk_buff *skb_copy = NULL;
93
94 BT_DBG("hdev %p len %d", hdev, skb->len);
95
96 read_lock(&hci_sk_list.lock);
97
98 sk_for_each(sk, node, &hci_sk_list.head) {
99 struct hci_filter *flt;
100 struct sk_buff *nskb;
101
102 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
103 continue;
104
105 /* Don't send frame to the socket it came from */
106 if (skb->sk == sk)
107 continue;
108
109 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
110 continue;
111
112 /* Apply filter */
113 flt = &hci_pi(sk)->filter;
114
115 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
116 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask))
117 continue;
118
119 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
120 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
121
122 if (!hci_test_bit(evt, &flt->event_mask))
123 continue;
124
125 if (flt->opcode &&
126 ((evt == HCI_EV_CMD_COMPLETE &&
127 flt->opcode !=
128 get_unaligned((__le16 *)(skb->data + 3))) ||
129 (evt == HCI_EV_CMD_STATUS &&
130 flt->opcode !=
131 get_unaligned((__le16 *)(skb->data + 4)))))
132 continue;
133 }
134
135 if (!skb_copy) {
136 /* Create a private copy with headroom */
137 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
138 if (!skb_copy)
139 continue;
140
141 /* Put type byte before the data */
142 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
143 }
144
145 nskb = skb_clone(skb_copy, GFP_ATOMIC);
146 if (!nskb)
147 continue;
148
149 if (sock_queue_rcv_skb(sk, nskb))
150 kfree_skb(nskb);
151 }
152
153 read_unlock(&hci_sk_list.lock);
154
155 kfree_skb(skb_copy);
156 }
157
158 /* Send frame to control socket */
hci_send_to_control(struct sk_buff * skb,struct sock * skip_sk)159 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
160 {
161 struct sock *sk;
162 struct hlist_node *node;
163
164 BT_DBG("len %d", skb->len);
165
166 read_lock(&hci_sk_list.lock);
167
168 sk_for_each(sk, node, &hci_sk_list.head) {
169 struct sk_buff *nskb;
170
171 /* Skip the original socket */
172 if (sk == skip_sk)
173 continue;
174
175 if (sk->sk_state != BT_BOUND)
176 continue;
177
178 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
179 continue;
180
181 nskb = skb_clone(skb, GFP_ATOMIC);
182 if (!nskb)
183 continue;
184
185 if (sock_queue_rcv_skb(sk, nskb))
186 kfree_skb(nskb);
187 }
188
189 read_unlock(&hci_sk_list.lock);
190 }
191
192 /* Send frame to monitor socket */
hci_send_to_monitor(struct hci_dev * hdev,struct sk_buff * skb)193 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
194 {
195 struct sock *sk;
196 struct hlist_node *node;
197 struct sk_buff *skb_copy = NULL;
198 __le16 opcode;
199
200 if (!atomic_read(&monitor_promisc))
201 return;
202
203 BT_DBG("hdev %p len %d", hdev, skb->len);
204
205 switch (bt_cb(skb)->pkt_type) {
206 case HCI_COMMAND_PKT:
207 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
208 break;
209 case HCI_EVENT_PKT:
210 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
211 break;
212 case HCI_ACLDATA_PKT:
213 if (bt_cb(skb)->incoming)
214 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
215 else
216 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
217 break;
218 case HCI_SCODATA_PKT:
219 if (bt_cb(skb)->incoming)
220 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
221 else
222 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
223 break;
224 default:
225 return;
226 }
227
228 read_lock(&hci_sk_list.lock);
229
230 sk_for_each(sk, node, &hci_sk_list.head) {
231 struct sk_buff *nskb;
232
233 if (sk->sk_state != BT_BOUND)
234 continue;
235
236 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
237 continue;
238
239 if (!skb_copy) {
240 struct hci_mon_hdr *hdr;
241
242 /* Create a private copy with headroom */
243 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
244 if (!skb_copy)
245 continue;
246
247 /* Put header before the data */
248 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
249 hdr->opcode = opcode;
250 hdr->index = cpu_to_le16(hdev->id);
251 hdr->len = cpu_to_le16(skb->len);
252 }
253
254 nskb = skb_clone(skb_copy, GFP_ATOMIC);
255 if (!nskb)
256 continue;
257
258 if (sock_queue_rcv_skb(sk, nskb))
259 kfree_skb(nskb);
260 }
261
262 read_unlock(&hci_sk_list.lock);
263
264 kfree_skb(skb_copy);
265 }
266
send_monitor_event(struct sk_buff * skb)267 static void send_monitor_event(struct sk_buff *skb)
268 {
269 struct sock *sk;
270 struct hlist_node *node;
271
272 BT_DBG("len %d", skb->len);
273
274 read_lock(&hci_sk_list.lock);
275
276 sk_for_each(sk, node, &hci_sk_list.head) {
277 struct sk_buff *nskb;
278
279 if (sk->sk_state != BT_BOUND)
280 continue;
281
282 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
283 continue;
284
285 nskb = skb_clone(skb, GFP_ATOMIC);
286 if (!nskb)
287 continue;
288
289 if (sock_queue_rcv_skb(sk, nskb))
290 kfree_skb(nskb);
291 }
292
293 read_unlock(&hci_sk_list.lock);
294 }
295
create_monitor_event(struct hci_dev * hdev,int event)296 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
297 {
298 struct hci_mon_hdr *hdr;
299 struct hci_mon_new_index *ni;
300 struct sk_buff *skb;
301 __le16 opcode;
302
303 switch (event) {
304 case HCI_DEV_REG:
305 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
306 if (!skb)
307 return NULL;
308
309 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
310 ni->type = hdev->dev_type;
311 ni->bus = hdev->bus;
312 bacpy(&ni->bdaddr, &hdev->bdaddr);
313 memcpy(ni->name, hdev->name, 8);
314
315 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
316 break;
317
318 case HCI_DEV_UNREG:
319 skb = bt_skb_alloc(0, GFP_ATOMIC);
320 if (!skb)
321 return NULL;
322
323 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
324 break;
325
326 default:
327 return NULL;
328 }
329
330 __net_timestamp(skb);
331
332 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
333 hdr->opcode = opcode;
334 hdr->index = cpu_to_le16(hdev->id);
335 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
336
337 return skb;
338 }
339
send_monitor_replay(struct sock * sk)340 static void send_monitor_replay(struct sock *sk)
341 {
342 struct hci_dev *hdev;
343
344 read_lock(&hci_dev_list_lock);
345
346 list_for_each_entry(hdev, &hci_dev_list, list) {
347 struct sk_buff *skb;
348
349 skb = create_monitor_event(hdev, HCI_DEV_REG);
350 if (!skb)
351 continue;
352
353 if (sock_queue_rcv_skb(sk, skb))
354 kfree_skb(skb);
355 }
356
357 read_unlock(&hci_dev_list_lock);
358 }
359
360 /* Generate internal stack event */
hci_si_event(struct hci_dev * hdev,int type,int dlen,void * data)361 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
362 {
363 struct hci_event_hdr *hdr;
364 struct hci_ev_stack_internal *ev;
365 struct sk_buff *skb;
366
367 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
368 if (!skb)
369 return;
370
371 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
372 hdr->evt = HCI_EV_STACK_INTERNAL;
373 hdr->plen = sizeof(*ev) + dlen;
374
375 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
376 ev->type = type;
377 memcpy(ev->data, data, dlen);
378
379 bt_cb(skb)->incoming = 1;
380 __net_timestamp(skb);
381
382 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
383 skb->dev = (void *) hdev;
384 hci_send_to_sock(hdev, skb);
385 kfree_skb(skb);
386 }
387
hci_sock_dev_event(struct hci_dev * hdev,int event)388 void hci_sock_dev_event(struct hci_dev *hdev, int event)
389 {
390 struct hci_ev_si_device ev;
391
392 BT_DBG("hdev %s event %d", hdev->name, event);
393
394 /* Send event to monitor */
395 if (atomic_read(&monitor_promisc)) {
396 struct sk_buff *skb;
397
398 skb = create_monitor_event(hdev, event);
399 if (skb) {
400 send_monitor_event(skb);
401 kfree_skb(skb);
402 }
403 }
404
405 /* Send event to sockets */
406 ev.event = event;
407 ev.dev_id = hdev->id;
408 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
409
410 if (event == HCI_DEV_UNREG) {
411 struct sock *sk;
412 struct hlist_node *node;
413
414 /* Detach sockets from device */
415 read_lock(&hci_sk_list.lock);
416 sk_for_each(sk, node, &hci_sk_list.head) {
417 bh_lock_sock_nested(sk);
418 if (hci_pi(sk)->hdev == hdev) {
419 hci_pi(sk)->hdev = NULL;
420 sk->sk_err = EPIPE;
421 sk->sk_state = BT_OPEN;
422 sk->sk_state_change(sk);
423
424 hci_dev_put(hdev);
425 }
426 bh_unlock_sock(sk);
427 }
428 read_unlock(&hci_sk_list.lock);
429 }
430 }
431
hci_sock_release(struct socket * sock)432 static int hci_sock_release(struct socket *sock)
433 {
434 struct sock *sk = sock->sk;
435 struct hci_dev *hdev;
436
437 BT_DBG("sock %p sk %p", sock, sk);
438
439 if (!sk)
440 return 0;
441
442 hdev = hci_pi(sk)->hdev;
443
444 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
445 atomic_dec(&monitor_promisc);
446
447 bt_sock_unlink(&hci_sk_list, sk);
448
449 if (hdev) {
450 atomic_dec(&hdev->promisc);
451 hci_dev_put(hdev);
452 }
453
454 sock_orphan(sk);
455
456 skb_queue_purge(&sk->sk_receive_queue);
457 skb_queue_purge(&sk->sk_write_queue);
458
459 sock_put(sk);
460 return 0;
461 }
462
hci_sock_blacklist_add(struct hci_dev * hdev,void __user * arg)463 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
464 {
465 bdaddr_t bdaddr;
466 int err;
467
468 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
469 return -EFAULT;
470
471 hci_dev_lock(hdev);
472
473 err = hci_blacklist_add(hdev, &bdaddr, 0);
474
475 hci_dev_unlock(hdev);
476
477 return err;
478 }
479
hci_sock_blacklist_del(struct hci_dev * hdev,void __user * arg)480 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
481 {
482 bdaddr_t bdaddr;
483 int err;
484
485 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
486 return -EFAULT;
487
488 hci_dev_lock(hdev);
489
490 err = hci_blacklist_del(hdev, &bdaddr, 0);
491
492 hci_dev_unlock(hdev);
493
494 return err;
495 }
496
497 /* Ioctls that require bound socket */
hci_sock_bound_ioctl(struct sock * sk,unsigned int cmd,unsigned long arg)498 static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
499 {
500 struct hci_dev *hdev = hci_pi(sk)->hdev;
501
502 if (!hdev)
503 return -EBADFD;
504
505 switch (cmd) {
506 case HCISETRAW:
507 if (!capable(CAP_NET_ADMIN))
508 return -EACCES;
509
510 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
511 return -EPERM;
512
513 if (arg)
514 set_bit(HCI_RAW, &hdev->flags);
515 else
516 clear_bit(HCI_RAW, &hdev->flags);
517
518 return 0;
519
520 case HCIGETCONNINFO:
521 return hci_get_conn_info(hdev, (void __user *) arg);
522
523 case HCIGETAUTHINFO:
524 return hci_get_auth_info(hdev, (void __user *) arg);
525
526 case HCIBLOCKADDR:
527 if (!capable(CAP_NET_ADMIN))
528 return -EACCES;
529 return hci_sock_blacklist_add(hdev, (void __user *) arg);
530
531 case HCIUNBLOCKADDR:
532 if (!capable(CAP_NET_ADMIN))
533 return -EACCES;
534 return hci_sock_blacklist_del(hdev, (void __user *) arg);
535
536 default:
537 if (hdev->ioctl)
538 return hdev->ioctl(hdev, cmd, arg);
539 return -EINVAL;
540 }
541 }
542
hci_sock_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)543 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
544 {
545 struct sock *sk = sock->sk;
546 void __user *argp = (void __user *) arg;
547 int err;
548
549 BT_DBG("cmd %x arg %lx", cmd, arg);
550
551 switch (cmd) {
552 case HCIGETDEVLIST:
553 return hci_get_dev_list(argp);
554
555 case HCIGETDEVINFO:
556 return hci_get_dev_info(argp);
557
558 case HCIGETCONNLIST:
559 return hci_get_conn_list(argp);
560
561 case HCIDEVUP:
562 if (!capable(CAP_NET_ADMIN))
563 return -EACCES;
564 return hci_dev_open(arg);
565
566 case HCIDEVDOWN:
567 if (!capable(CAP_NET_ADMIN))
568 return -EACCES;
569 return hci_dev_close(arg);
570
571 case HCIDEVRESET:
572 if (!capable(CAP_NET_ADMIN))
573 return -EACCES;
574 return hci_dev_reset(arg);
575
576 case HCIDEVRESTAT:
577 if (!capable(CAP_NET_ADMIN))
578 return -EACCES;
579 return hci_dev_reset_stat(arg);
580
581 case HCISETSCAN:
582 case HCISETAUTH:
583 case HCISETENCRYPT:
584 case HCISETPTYPE:
585 case HCISETLINKPOL:
586 case HCISETLINKMODE:
587 case HCISETACLMTU:
588 case HCISETSCOMTU:
589 if (!capable(CAP_NET_ADMIN))
590 return -EACCES;
591 return hci_dev_cmd(cmd, argp);
592
593 case HCIINQUIRY:
594 return hci_inquiry(argp);
595
596 default:
597 lock_sock(sk);
598 err = hci_sock_bound_ioctl(sk, cmd, arg);
599 release_sock(sk);
600 return err;
601 }
602 }
603
hci_sock_bind(struct socket * sock,struct sockaddr * addr,int addr_len)604 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
605 {
606 struct sockaddr_hci haddr;
607 struct sock *sk = sock->sk;
608 struct hci_dev *hdev = NULL;
609 int len, err = 0;
610
611 BT_DBG("sock %p sk %p", sock, sk);
612
613 if (!addr)
614 return -EINVAL;
615
616 memset(&haddr, 0, sizeof(haddr));
617 len = min_t(unsigned int, sizeof(haddr), addr_len);
618 memcpy(&haddr, addr, len);
619
620 if (haddr.hci_family != AF_BLUETOOTH)
621 return -EINVAL;
622
623 lock_sock(sk);
624
625 if (sk->sk_state == BT_BOUND) {
626 err = -EALREADY;
627 goto done;
628 }
629
630 switch (haddr.hci_channel) {
631 case HCI_CHANNEL_RAW:
632 if (hci_pi(sk)->hdev) {
633 err = -EALREADY;
634 goto done;
635 }
636
637 if (haddr.hci_dev != HCI_DEV_NONE) {
638 hdev = hci_dev_get(haddr.hci_dev);
639 if (!hdev) {
640 err = -ENODEV;
641 goto done;
642 }
643
644 atomic_inc(&hdev->promisc);
645 }
646
647 hci_pi(sk)->hdev = hdev;
648 break;
649
650 case HCI_CHANNEL_CONTROL:
651 if (haddr.hci_dev != HCI_DEV_NONE) {
652 err = -EINVAL;
653 goto done;
654 }
655
656 if (!capable(CAP_NET_ADMIN)) {
657 err = -EPERM;
658 goto done;
659 }
660
661 break;
662
663 case HCI_CHANNEL_MONITOR:
664 if (haddr.hci_dev != HCI_DEV_NONE) {
665 err = -EINVAL;
666 goto done;
667 }
668
669 if (!capable(CAP_NET_RAW)) {
670 err = -EPERM;
671 goto done;
672 }
673
674 send_monitor_replay(sk);
675
676 atomic_inc(&monitor_promisc);
677 break;
678
679 default:
680 err = -EINVAL;
681 goto done;
682 }
683
684
685 hci_pi(sk)->channel = haddr.hci_channel;
686 sk->sk_state = BT_BOUND;
687
688 done:
689 release_sock(sk);
690 return err;
691 }
692
hci_sock_getname(struct socket * sock,struct sockaddr * addr,int * addr_len,int peer)693 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
694 {
695 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
696 struct sock *sk = sock->sk;
697 struct hci_dev *hdev = hci_pi(sk)->hdev;
698
699 BT_DBG("sock %p sk %p", sock, sk);
700
701 if (!hdev)
702 return -EBADFD;
703
704 lock_sock(sk);
705
706 *addr_len = sizeof(*haddr);
707 haddr->hci_family = AF_BLUETOOTH;
708 haddr->hci_dev = hdev->id;
709 haddr->hci_channel= 0;
710
711 release_sock(sk);
712 return 0;
713 }
714
hci_sock_cmsg(struct sock * sk,struct msghdr * msg,struct sk_buff * skb)715 static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
716 {
717 __u32 mask = hci_pi(sk)->cmsg_mask;
718
719 if (mask & HCI_CMSG_DIR) {
720 int incoming = bt_cb(skb)->incoming;
721 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
722 }
723
724 if (mask & HCI_CMSG_TSTAMP) {
725 #ifdef CONFIG_COMPAT
726 struct compat_timeval ctv;
727 #endif
728 struct timeval tv;
729 void *data;
730 int len;
731
732 skb_get_timestamp(skb, &tv);
733
734 data = &tv;
735 len = sizeof(tv);
736 #ifdef CONFIG_COMPAT
737 if (!COMPAT_USE_64BIT_TIME &&
738 (msg->msg_flags & MSG_CMSG_COMPAT)) {
739 ctv.tv_sec = tv.tv_sec;
740 ctv.tv_usec = tv.tv_usec;
741 data = &ctv;
742 len = sizeof(ctv);
743 }
744 #endif
745
746 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
747 }
748 }
749
hci_sock_recvmsg(struct kiocb * iocb,struct socket * sock,struct msghdr * msg,size_t len,int flags)750 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
751 struct msghdr *msg, size_t len, int flags)
752 {
753 int noblock = flags & MSG_DONTWAIT;
754 struct sock *sk = sock->sk;
755 struct sk_buff *skb;
756 int copied, err;
757
758 BT_DBG("sock %p, sk %p", sock, sk);
759
760 if (flags & (MSG_OOB))
761 return -EOPNOTSUPP;
762
763 if (sk->sk_state == BT_CLOSED)
764 return 0;
765
766 skb = skb_recv_datagram(sk, flags, noblock, &err);
767 if (!skb)
768 return err;
769
770 copied = skb->len;
771 if (len < copied) {
772 msg->msg_flags |= MSG_TRUNC;
773 copied = len;
774 }
775
776 skb_reset_transport_header(skb);
777 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
778
779 switch (hci_pi(sk)->channel) {
780 case HCI_CHANNEL_RAW:
781 hci_sock_cmsg(sk, msg, skb);
782 break;
783 case HCI_CHANNEL_CONTROL:
784 case HCI_CHANNEL_MONITOR:
785 sock_recv_timestamp(msg, sk, skb);
786 break;
787 }
788
789 skb_free_datagram(sk, skb);
790
791 return err ? : copied;
792 }
793
hci_sock_sendmsg(struct kiocb * iocb,struct socket * sock,struct msghdr * msg,size_t len)794 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
795 struct msghdr *msg, size_t len)
796 {
797 struct sock *sk = sock->sk;
798 struct hci_dev *hdev;
799 struct sk_buff *skb;
800 int err;
801
802 BT_DBG("sock %p sk %p", sock, sk);
803
804 if (msg->msg_flags & MSG_OOB)
805 return -EOPNOTSUPP;
806
807 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
808 return -EINVAL;
809
810 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
811 return -EINVAL;
812
813 lock_sock(sk);
814
815 switch (hci_pi(sk)->channel) {
816 case HCI_CHANNEL_RAW:
817 break;
818 case HCI_CHANNEL_CONTROL:
819 err = mgmt_control(sk, msg, len);
820 goto done;
821 case HCI_CHANNEL_MONITOR:
822 err = -EOPNOTSUPP;
823 goto done;
824 default:
825 err = -EINVAL;
826 goto done;
827 }
828
829 hdev = hci_pi(sk)->hdev;
830 if (!hdev) {
831 err = -EBADFD;
832 goto done;
833 }
834
835 if (!test_bit(HCI_UP, &hdev->flags)) {
836 err = -ENETDOWN;
837 goto done;
838 }
839
840 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
841 if (!skb)
842 goto done;
843
844 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
845 err = -EFAULT;
846 goto drop;
847 }
848
849 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
850 skb_pull(skb, 1);
851 skb->dev = (void *) hdev;
852
853 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
854 u16 opcode = get_unaligned_le16(skb->data);
855 u16 ogf = hci_opcode_ogf(opcode);
856 u16 ocf = hci_opcode_ocf(opcode);
857
858 if (((ogf > HCI_SFLT_MAX_OGF) ||
859 !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) &&
860 !capable(CAP_NET_RAW)) {
861 err = -EPERM;
862 goto drop;
863 }
864
865 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
866 skb_queue_tail(&hdev->raw_q, skb);
867 queue_work(hdev->workqueue, &hdev->tx_work);
868 } else {
869 skb_queue_tail(&hdev->cmd_q, skb);
870 queue_work(hdev->workqueue, &hdev->cmd_work);
871 }
872 } else {
873 if (!capable(CAP_NET_RAW)) {
874 err = -EPERM;
875 goto drop;
876 }
877
878 skb_queue_tail(&hdev->raw_q, skb);
879 queue_work(hdev->workqueue, &hdev->tx_work);
880 }
881
882 err = len;
883
884 done:
885 release_sock(sk);
886 return err;
887
888 drop:
889 kfree_skb(skb);
890 goto done;
891 }
892
hci_sock_setsockopt(struct socket * sock,int level,int optname,char __user * optval,unsigned int len)893 static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
894 {
895 struct hci_ufilter uf = { .opcode = 0 };
896 struct sock *sk = sock->sk;
897 int err = 0, opt = 0;
898
899 BT_DBG("sk %p, opt %d", sk, optname);
900
901 lock_sock(sk);
902
903 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
904 err = -EINVAL;
905 goto done;
906 }
907
908 switch (optname) {
909 case HCI_DATA_DIR:
910 if (get_user(opt, (int __user *)optval)) {
911 err = -EFAULT;
912 break;
913 }
914
915 if (opt)
916 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
917 else
918 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
919 break;
920
921 case HCI_TIME_STAMP:
922 if (get_user(opt, (int __user *)optval)) {
923 err = -EFAULT;
924 break;
925 }
926
927 if (opt)
928 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
929 else
930 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
931 break;
932
933 case HCI_FILTER:
934 {
935 struct hci_filter *f = &hci_pi(sk)->filter;
936
937 uf.type_mask = f->type_mask;
938 uf.opcode = f->opcode;
939 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
940 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
941 }
942
943 len = min_t(unsigned int, len, sizeof(uf));
944 if (copy_from_user(&uf, optval, len)) {
945 err = -EFAULT;
946 break;
947 }
948
949 if (!capable(CAP_NET_RAW)) {
950 uf.type_mask &= hci_sec_filter.type_mask;
951 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
952 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
953 }
954
955 {
956 struct hci_filter *f = &hci_pi(sk)->filter;
957
958 f->type_mask = uf.type_mask;
959 f->opcode = uf.opcode;
960 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
961 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
962 }
963 break;
964
965 default:
966 err = -ENOPROTOOPT;
967 break;
968 }
969
970 done:
971 release_sock(sk);
972 return err;
973 }
974
hci_sock_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)975 static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
976 {
977 struct hci_ufilter uf;
978 struct sock *sk = sock->sk;
979 int len, opt, err = 0;
980
981 BT_DBG("sk %p, opt %d", sk, optname);
982
983 if (get_user(len, optlen))
984 return -EFAULT;
985
986 lock_sock(sk);
987
988 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
989 err = -EINVAL;
990 goto done;
991 }
992
993 switch (optname) {
994 case HCI_DATA_DIR:
995 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
996 opt = 1;
997 else
998 opt = 0;
999
1000 if (put_user(opt, optval))
1001 err = -EFAULT;
1002 break;
1003
1004 case HCI_TIME_STAMP:
1005 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1006 opt = 1;
1007 else
1008 opt = 0;
1009
1010 if (put_user(opt, optval))
1011 err = -EFAULT;
1012 break;
1013
1014 case HCI_FILTER:
1015 {
1016 struct hci_filter *f = &hci_pi(sk)->filter;
1017
1018 memset(&uf, 0, sizeof(uf));
1019 uf.type_mask = f->type_mask;
1020 uf.opcode = f->opcode;
1021 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1022 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1023 }
1024
1025 len = min_t(unsigned int, len, sizeof(uf));
1026 if (copy_to_user(optval, &uf, len))
1027 err = -EFAULT;
1028 break;
1029
1030 default:
1031 err = -ENOPROTOOPT;
1032 break;
1033 }
1034
1035 done:
1036 release_sock(sk);
1037 return err;
1038 }
1039
1040 static const struct proto_ops hci_sock_ops = {
1041 .family = PF_BLUETOOTH,
1042 .owner = THIS_MODULE,
1043 .release = hci_sock_release,
1044 .bind = hci_sock_bind,
1045 .getname = hci_sock_getname,
1046 .sendmsg = hci_sock_sendmsg,
1047 .recvmsg = hci_sock_recvmsg,
1048 .ioctl = hci_sock_ioctl,
1049 .poll = datagram_poll,
1050 .listen = sock_no_listen,
1051 .shutdown = sock_no_shutdown,
1052 .setsockopt = hci_sock_setsockopt,
1053 .getsockopt = hci_sock_getsockopt,
1054 .connect = sock_no_connect,
1055 .socketpair = sock_no_socketpair,
1056 .accept = sock_no_accept,
1057 .mmap = sock_no_mmap
1058 };
1059
1060 static struct proto hci_sk_proto = {
1061 .name = "HCI",
1062 .owner = THIS_MODULE,
1063 .obj_size = sizeof(struct hci_pinfo)
1064 };
1065
hci_sock_create(struct net * net,struct socket * sock,int protocol,int kern)1066 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1067 int kern)
1068 {
1069 struct sock *sk;
1070
1071 BT_DBG("sock %p", sock);
1072
1073 if (sock->type != SOCK_RAW)
1074 return -ESOCKTNOSUPPORT;
1075
1076 sock->ops = &hci_sock_ops;
1077
1078 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1079 if (!sk)
1080 return -ENOMEM;
1081
1082 sock_init_data(sock, sk);
1083
1084 sock_reset_flag(sk, SOCK_ZAPPED);
1085
1086 sk->sk_protocol = protocol;
1087
1088 sock->state = SS_UNCONNECTED;
1089 sk->sk_state = BT_OPEN;
1090
1091 bt_sock_link(&hci_sk_list, sk);
1092 return 0;
1093 }
1094
1095 static const struct net_proto_family hci_sock_family_ops = {
1096 .family = PF_BLUETOOTH,
1097 .owner = THIS_MODULE,
1098 .create = hci_sock_create,
1099 };
1100
hci_sock_init(void)1101 int __init hci_sock_init(void)
1102 {
1103 int err;
1104
1105 err = proto_register(&hci_sk_proto, 0);
1106 if (err < 0)
1107 return err;
1108
1109 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1110 if (err < 0)
1111 goto error;
1112
1113 BT_INFO("HCI socket layer initialized");
1114
1115 return 0;
1116
1117 error:
1118 BT_ERR("HCI socket registration failed");
1119 proto_unregister(&hci_sk_proto);
1120 return err;
1121 }
1122
hci_sock_cleanup(void)1123 void hci_sock_cleanup(void)
1124 {
1125 if (bt_sock_unregister(BTPROTO_HCI) < 0)
1126 BT_ERR("HCI socket unregistration failed");
1127
1128 proto_unregister(&hci_sk_proto);
1129 }
1130