1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37 
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42 
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50 
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54 
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58 
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62 
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65 
hci_scan_req(struct hci_request * req,unsigned long opt)66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
67 {
68 	__u8 scan = opt;
69 
70 	BT_DBG("%s %x", req->hdev->name, scan);
71 
72 	/* Inquiry and Page scans */
73 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
74 	return 0;
75 }
76 
hci_auth_req(struct hci_request * req,unsigned long opt)77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
78 {
79 	__u8 auth = opt;
80 
81 	BT_DBG("%s %x", req->hdev->name, auth);
82 
83 	/* Authentication */
84 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
85 	return 0;
86 }
87 
hci_encrypt_req(struct hci_request * req,unsigned long opt)88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
89 {
90 	__u8 encrypt = opt;
91 
92 	BT_DBG("%s %x", req->hdev->name, encrypt);
93 
94 	/* Encryption */
95 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
96 	return 0;
97 }
98 
hci_linkpol_req(struct hci_request * req,unsigned long opt)99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
100 {
101 	__le16 policy = cpu_to_le16(opt);
102 
103 	BT_DBG("%s %x", req->hdev->name, policy);
104 
105 	/* Default link policy */
106 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
107 	return 0;
108 }
109 
110 /* Get HCI device by index.
111  * Device is held on return. */
hci_dev_get(int index)112 struct hci_dev *hci_dev_get(int index)
113 {
114 	struct hci_dev *hdev = NULL, *d;
115 
116 	BT_DBG("%d", index);
117 
118 	if (index < 0)
119 		return NULL;
120 
121 	read_lock(&hci_dev_list_lock);
122 	list_for_each_entry(d, &hci_dev_list, list) {
123 		if (d->id == index) {
124 			hdev = hci_dev_hold(d);
125 			break;
126 		}
127 	}
128 	read_unlock(&hci_dev_list_lock);
129 	return hdev;
130 }
131 
132 /* ---- Inquiry support ---- */
133 
hci_discovery_active(struct hci_dev * hdev)134 bool hci_discovery_active(struct hci_dev *hdev)
135 {
136 	struct discovery_state *discov = &hdev->discovery;
137 
138 	switch (discov->state) {
139 	case DISCOVERY_FINDING:
140 	case DISCOVERY_RESOLVING:
141 		return true;
142 
143 	default:
144 		return false;
145 	}
146 }
147 
hci_discovery_set_state(struct hci_dev * hdev,int state)148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
149 {
150 	int old_state = hdev->discovery.state;
151 
152 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
153 
154 	if (old_state == state)
155 		return;
156 
157 	hdev->discovery.state = state;
158 
159 	switch (state) {
160 	case DISCOVERY_STOPPED:
161 		hci_update_passive_scan(hdev);
162 
163 		if (old_state != DISCOVERY_STARTING)
164 			mgmt_discovering(hdev, 0);
165 		break;
166 	case DISCOVERY_STARTING:
167 		break;
168 	case DISCOVERY_FINDING:
169 		mgmt_discovering(hdev, 1);
170 		break;
171 	case DISCOVERY_RESOLVING:
172 		break;
173 	case DISCOVERY_STOPPING:
174 		break;
175 	}
176 }
177 
hci_inquiry_cache_flush(struct hci_dev * hdev)178 void hci_inquiry_cache_flush(struct hci_dev *hdev)
179 {
180 	struct discovery_state *cache = &hdev->discovery;
181 	struct inquiry_entry *p, *n;
182 
183 	list_for_each_entry_safe(p, n, &cache->all, all) {
184 		list_del(&p->all);
185 		kfree(p);
186 	}
187 
188 	INIT_LIST_HEAD(&cache->unknown);
189 	INIT_LIST_HEAD(&cache->resolve);
190 }
191 
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
193 					       bdaddr_t *bdaddr)
194 {
195 	struct discovery_state *cache = &hdev->discovery;
196 	struct inquiry_entry *e;
197 
198 	BT_DBG("cache %p, %pMR", cache, bdaddr);
199 
200 	list_for_each_entry(e, &cache->all, all) {
201 		if (!bacmp(&e->data.bdaddr, bdaddr))
202 			return e;
203 	}
204 
205 	return NULL;
206 }
207 
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
209 						       bdaddr_t *bdaddr)
210 {
211 	struct discovery_state *cache = &hdev->discovery;
212 	struct inquiry_entry *e;
213 
214 	BT_DBG("cache %p, %pMR", cache, bdaddr);
215 
216 	list_for_each_entry(e, &cache->unknown, list) {
217 		if (!bacmp(&e->data.bdaddr, bdaddr))
218 			return e;
219 	}
220 
221 	return NULL;
222 }
223 
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
225 						       bdaddr_t *bdaddr,
226 						       int state)
227 {
228 	struct discovery_state *cache = &hdev->discovery;
229 	struct inquiry_entry *e;
230 
231 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
232 
233 	list_for_each_entry(e, &cache->resolve, list) {
234 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
235 			return e;
236 		if (!bacmp(&e->data.bdaddr, bdaddr))
237 			return e;
238 	}
239 
240 	return NULL;
241 }
242 
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
244 				      struct inquiry_entry *ie)
245 {
246 	struct discovery_state *cache = &hdev->discovery;
247 	struct list_head *pos = &cache->resolve;
248 	struct inquiry_entry *p;
249 
250 	list_del(&ie->list);
251 
252 	list_for_each_entry(p, &cache->resolve, list) {
253 		if (p->name_state != NAME_PENDING &&
254 		    abs(p->data.rssi) >= abs(ie->data.rssi))
255 			break;
256 		pos = &p->list;
257 	}
258 
259 	list_add(&ie->list, pos);
260 }
261 
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
263 			     bool name_known)
264 {
265 	struct discovery_state *cache = &hdev->discovery;
266 	struct inquiry_entry *ie;
267 	u32 flags = 0;
268 
269 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
270 
271 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
272 
273 	if (!data->ssp_mode)
274 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
275 
276 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
277 	if (ie) {
278 		if (!ie->data.ssp_mode)
279 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
280 
281 		if (ie->name_state == NAME_NEEDED &&
282 		    data->rssi != ie->data.rssi) {
283 			ie->data.rssi = data->rssi;
284 			hci_inquiry_cache_update_resolve(hdev, ie);
285 		}
286 
287 		goto update;
288 	}
289 
290 	/* Entry not in the cache. Add new one. */
291 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
292 	if (!ie) {
293 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
294 		goto done;
295 	}
296 
297 	list_add(&ie->all, &cache->all);
298 
299 	if (name_known) {
300 		ie->name_state = NAME_KNOWN;
301 	} else {
302 		ie->name_state = NAME_NOT_KNOWN;
303 		list_add(&ie->list, &cache->unknown);
304 	}
305 
306 update:
307 	if (name_known && ie->name_state != NAME_KNOWN &&
308 	    ie->name_state != NAME_PENDING) {
309 		ie->name_state = NAME_KNOWN;
310 		list_del(&ie->list);
311 	}
312 
313 	memcpy(&ie->data, data, sizeof(*data));
314 	ie->timestamp = jiffies;
315 	cache->timestamp = jiffies;
316 
317 	if (ie->name_state == NAME_NOT_KNOWN)
318 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
319 
320 done:
321 	return flags;
322 }
323 
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
325 {
326 	struct discovery_state *cache = &hdev->discovery;
327 	struct inquiry_info *info = (struct inquiry_info *) buf;
328 	struct inquiry_entry *e;
329 	int copied = 0;
330 
331 	list_for_each_entry(e, &cache->all, all) {
332 		struct inquiry_data *data = &e->data;
333 
334 		if (copied >= num)
335 			break;
336 
337 		bacpy(&info->bdaddr, &data->bdaddr);
338 		info->pscan_rep_mode	= data->pscan_rep_mode;
339 		info->pscan_period_mode	= data->pscan_period_mode;
340 		info->pscan_mode	= data->pscan_mode;
341 		memcpy(info->dev_class, data->dev_class, 3);
342 		info->clock_offset	= data->clock_offset;
343 
344 		info++;
345 		copied++;
346 	}
347 
348 	BT_DBG("cache %p, copied %d", cache, copied);
349 	return copied;
350 }
351 
hci_inq_req(struct hci_request * req,unsigned long opt)352 static int hci_inq_req(struct hci_request *req, unsigned long opt)
353 {
354 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355 	struct hci_dev *hdev = req->hdev;
356 	struct hci_cp_inquiry cp;
357 
358 	BT_DBG("%s", hdev->name);
359 
360 	if (test_bit(HCI_INQUIRY, &hdev->flags))
361 		return 0;
362 
363 	/* Start Inquiry */
364 	memcpy(&cp.lap, &ir->lap, 3);
365 	cp.length  = ir->length;
366 	cp.num_rsp = ir->num_rsp;
367 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
368 
369 	return 0;
370 }
371 
hci_inquiry(void __user * arg)372 int hci_inquiry(void __user *arg)
373 {
374 	__u8 __user *ptr = arg;
375 	struct hci_inquiry_req ir;
376 	struct hci_dev *hdev;
377 	int err = 0, do_inquiry = 0, max_rsp;
378 	long timeo;
379 	__u8 *buf;
380 
381 	if (copy_from_user(&ir, ptr, sizeof(ir)))
382 		return -EFAULT;
383 
384 	hdev = hci_dev_get(ir.dev_id);
385 	if (!hdev)
386 		return -ENODEV;
387 
388 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
389 		err = -EBUSY;
390 		goto done;
391 	}
392 
393 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
394 		err = -EOPNOTSUPP;
395 		goto done;
396 	}
397 
398 	if (hdev->dev_type != HCI_PRIMARY) {
399 		err = -EOPNOTSUPP;
400 		goto done;
401 	}
402 
403 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
404 		err = -EOPNOTSUPP;
405 		goto done;
406 	}
407 
408 	/* Restrict maximum inquiry length to 60 seconds */
409 	if (ir.length > 60) {
410 		err = -EINVAL;
411 		goto done;
412 	}
413 
414 	hci_dev_lock(hdev);
415 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
416 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
417 		hci_inquiry_cache_flush(hdev);
418 		do_inquiry = 1;
419 	}
420 	hci_dev_unlock(hdev);
421 
422 	timeo = ir.length * msecs_to_jiffies(2000);
423 
424 	if (do_inquiry) {
425 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
426 				   timeo, NULL);
427 		if (err < 0)
428 			goto done;
429 
430 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
431 		 * cleared). If it is interrupted by a signal, return -EINTR.
432 		 */
433 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
434 				TASK_INTERRUPTIBLE)) {
435 			err = -EINTR;
436 			goto done;
437 		}
438 	}
439 
440 	/* for unlimited number of responses we will use buffer with
441 	 * 255 entries
442 	 */
443 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
444 
445 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
446 	 * copy it to the user space.
447 	 */
448 	buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
449 	if (!buf) {
450 		err = -ENOMEM;
451 		goto done;
452 	}
453 
454 	hci_dev_lock(hdev);
455 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
456 	hci_dev_unlock(hdev);
457 
458 	BT_DBG("num_rsp %d", ir.num_rsp);
459 
460 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
461 		ptr += sizeof(ir);
462 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
463 				 ir.num_rsp))
464 			err = -EFAULT;
465 	} else
466 		err = -EFAULT;
467 
468 	kfree(buf);
469 
470 done:
471 	hci_dev_put(hdev);
472 	return err;
473 }
474 
hci_dev_do_open(struct hci_dev * hdev)475 static int hci_dev_do_open(struct hci_dev *hdev)
476 {
477 	int ret = 0;
478 
479 	BT_DBG("%s %p", hdev->name, hdev);
480 
481 	hci_req_sync_lock(hdev);
482 
483 	ret = hci_dev_open_sync(hdev);
484 
485 	hci_req_sync_unlock(hdev);
486 	return ret;
487 }
488 
489 /* ---- HCI ioctl helpers ---- */
490 
hci_dev_open(__u16 dev)491 int hci_dev_open(__u16 dev)
492 {
493 	struct hci_dev *hdev;
494 	int err;
495 
496 	hdev = hci_dev_get(dev);
497 	if (!hdev)
498 		return -ENODEV;
499 
500 	/* Devices that are marked as unconfigured can only be powered
501 	 * up as user channel. Trying to bring them up as normal devices
502 	 * will result into a failure. Only user channel operation is
503 	 * possible.
504 	 *
505 	 * When this function is called for a user channel, the flag
506 	 * HCI_USER_CHANNEL will be set first before attempting to
507 	 * open the device.
508 	 */
509 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
510 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
511 		err = -EOPNOTSUPP;
512 		goto done;
513 	}
514 
515 	/* We need to ensure that no other power on/off work is pending
516 	 * before proceeding to call hci_dev_do_open. This is
517 	 * particularly important if the setup procedure has not yet
518 	 * completed.
519 	 */
520 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
521 		cancel_delayed_work(&hdev->power_off);
522 
523 	/* After this call it is guaranteed that the setup procedure
524 	 * has finished. This means that error conditions like RFKILL
525 	 * or no valid public or static random address apply.
526 	 */
527 	flush_workqueue(hdev->req_workqueue);
528 
529 	/* For controllers not using the management interface and that
530 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
531 	 * so that pairing works for them. Once the management interface
532 	 * is in use this bit will be cleared again and userspace has
533 	 * to explicitly enable it.
534 	 */
535 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
536 	    !hci_dev_test_flag(hdev, HCI_MGMT))
537 		hci_dev_set_flag(hdev, HCI_BONDABLE);
538 
539 	err = hci_dev_do_open(hdev);
540 
541 done:
542 	hci_dev_put(hdev);
543 	return err;
544 }
545 
hci_dev_do_close(struct hci_dev * hdev)546 int hci_dev_do_close(struct hci_dev *hdev)
547 {
548 	int err;
549 
550 	BT_DBG("%s %p", hdev->name, hdev);
551 
552 	hci_req_sync_lock(hdev);
553 
554 	err = hci_dev_close_sync(hdev);
555 
556 	hci_req_sync_unlock(hdev);
557 
558 	return err;
559 }
560 
hci_dev_close(__u16 dev)561 int hci_dev_close(__u16 dev)
562 {
563 	struct hci_dev *hdev;
564 	int err;
565 
566 	hdev = hci_dev_get(dev);
567 	if (!hdev)
568 		return -ENODEV;
569 
570 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
571 		err = -EBUSY;
572 		goto done;
573 	}
574 
575 	cancel_work_sync(&hdev->power_on);
576 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
577 		cancel_delayed_work(&hdev->power_off);
578 
579 	err = hci_dev_do_close(hdev);
580 
581 done:
582 	hci_dev_put(hdev);
583 	return err;
584 }
585 
hci_dev_do_reset(struct hci_dev * hdev)586 static int hci_dev_do_reset(struct hci_dev *hdev)
587 {
588 	int ret;
589 
590 	BT_DBG("%s %p", hdev->name, hdev);
591 
592 	hci_req_sync_lock(hdev);
593 
594 	/* Drop queues */
595 	skb_queue_purge(&hdev->rx_q);
596 	skb_queue_purge(&hdev->cmd_q);
597 
598 	/* Cancel these to avoid queueing non-chained pending work */
599 	hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
600 	/* Wait for
601 	 *
602 	 *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
603 	 *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
604 	 *
605 	 * inside RCU section to see the flag or complete scheduling.
606 	 */
607 	synchronize_rcu();
608 	/* Explicitly cancel works in case scheduled after setting the flag. */
609 	cancel_delayed_work(&hdev->cmd_timer);
610 	cancel_delayed_work(&hdev->ncmd_timer);
611 
612 	/* Avoid potential lockdep warnings from the *_flush() calls by
613 	 * ensuring the workqueue is empty up front.
614 	 */
615 	drain_workqueue(hdev->workqueue);
616 
617 	hci_dev_lock(hdev);
618 	hci_inquiry_cache_flush(hdev);
619 	hci_conn_hash_flush(hdev);
620 	hci_dev_unlock(hdev);
621 
622 	if (hdev->flush)
623 		hdev->flush(hdev);
624 
625 	hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
626 
627 	atomic_set(&hdev->cmd_cnt, 1);
628 	hdev->acl_cnt = 0;
629 	hdev->sco_cnt = 0;
630 	hdev->le_cnt = 0;
631 	hdev->iso_cnt = 0;
632 
633 	ret = hci_reset_sync(hdev);
634 
635 	hci_req_sync_unlock(hdev);
636 	return ret;
637 }
638 
hci_dev_reset(__u16 dev)639 int hci_dev_reset(__u16 dev)
640 {
641 	struct hci_dev *hdev;
642 	int err;
643 
644 	hdev = hci_dev_get(dev);
645 	if (!hdev)
646 		return -ENODEV;
647 
648 	if (!test_bit(HCI_UP, &hdev->flags)) {
649 		err = -ENETDOWN;
650 		goto done;
651 	}
652 
653 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
654 		err = -EBUSY;
655 		goto done;
656 	}
657 
658 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
659 		err = -EOPNOTSUPP;
660 		goto done;
661 	}
662 
663 	err = hci_dev_do_reset(hdev);
664 
665 done:
666 	hci_dev_put(hdev);
667 	return err;
668 }
669 
hci_dev_reset_stat(__u16 dev)670 int hci_dev_reset_stat(__u16 dev)
671 {
672 	struct hci_dev *hdev;
673 	int ret = 0;
674 
675 	hdev = hci_dev_get(dev);
676 	if (!hdev)
677 		return -ENODEV;
678 
679 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
680 		ret = -EBUSY;
681 		goto done;
682 	}
683 
684 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
685 		ret = -EOPNOTSUPP;
686 		goto done;
687 	}
688 
689 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
690 
691 done:
692 	hci_dev_put(hdev);
693 	return ret;
694 }
695 
hci_update_passive_scan_state(struct hci_dev * hdev,u8 scan)696 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
697 {
698 	bool conn_changed, discov_changed;
699 
700 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
701 
702 	if ((scan & SCAN_PAGE))
703 		conn_changed = !hci_dev_test_and_set_flag(hdev,
704 							  HCI_CONNECTABLE);
705 	else
706 		conn_changed = hci_dev_test_and_clear_flag(hdev,
707 							   HCI_CONNECTABLE);
708 
709 	if ((scan & SCAN_INQUIRY)) {
710 		discov_changed = !hci_dev_test_and_set_flag(hdev,
711 							    HCI_DISCOVERABLE);
712 	} else {
713 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
714 		discov_changed = hci_dev_test_and_clear_flag(hdev,
715 							     HCI_DISCOVERABLE);
716 	}
717 
718 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
719 		return;
720 
721 	if (conn_changed || discov_changed) {
722 		/* In case this was disabled through mgmt */
723 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
724 
725 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
726 			hci_update_adv_data(hdev, hdev->cur_adv_instance);
727 
728 		mgmt_new_settings(hdev);
729 	}
730 }
731 
hci_dev_cmd(unsigned int cmd,void __user * arg)732 int hci_dev_cmd(unsigned int cmd, void __user *arg)
733 {
734 	struct hci_dev *hdev;
735 	struct hci_dev_req dr;
736 	int err = 0;
737 
738 	if (copy_from_user(&dr, arg, sizeof(dr)))
739 		return -EFAULT;
740 
741 	hdev = hci_dev_get(dr.dev_id);
742 	if (!hdev)
743 		return -ENODEV;
744 
745 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
746 		err = -EBUSY;
747 		goto done;
748 	}
749 
750 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
751 		err = -EOPNOTSUPP;
752 		goto done;
753 	}
754 
755 	if (hdev->dev_type != HCI_PRIMARY) {
756 		err = -EOPNOTSUPP;
757 		goto done;
758 	}
759 
760 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
761 		err = -EOPNOTSUPP;
762 		goto done;
763 	}
764 
765 	switch (cmd) {
766 	case HCISETAUTH:
767 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
768 				   HCI_INIT_TIMEOUT, NULL);
769 		break;
770 
771 	case HCISETENCRYPT:
772 		if (!lmp_encrypt_capable(hdev)) {
773 			err = -EOPNOTSUPP;
774 			break;
775 		}
776 
777 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
778 			/* Auth must be enabled first */
779 			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
780 					   HCI_INIT_TIMEOUT, NULL);
781 			if (err)
782 				break;
783 		}
784 
785 		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
786 				   HCI_INIT_TIMEOUT, NULL);
787 		break;
788 
789 	case HCISETSCAN:
790 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
791 				   HCI_INIT_TIMEOUT, NULL);
792 
793 		/* Ensure that the connectable and discoverable states
794 		 * get correctly modified as this was a non-mgmt change.
795 		 */
796 		if (!err)
797 			hci_update_passive_scan_state(hdev, dr.dev_opt);
798 		break;
799 
800 	case HCISETLINKPOL:
801 		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
802 				   HCI_INIT_TIMEOUT, NULL);
803 		break;
804 
805 	case HCISETLINKMODE:
806 		hdev->link_mode = ((__u16) dr.dev_opt) &
807 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
808 		break;
809 
810 	case HCISETPTYPE:
811 		if (hdev->pkt_type == (__u16) dr.dev_opt)
812 			break;
813 
814 		hdev->pkt_type = (__u16) dr.dev_opt;
815 		mgmt_phy_configuration_changed(hdev, NULL);
816 		break;
817 
818 	case HCISETACLMTU:
819 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
820 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
821 		break;
822 
823 	case HCISETSCOMTU:
824 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
825 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
826 		break;
827 
828 	default:
829 		err = -EINVAL;
830 		break;
831 	}
832 
833 done:
834 	hci_dev_put(hdev);
835 	return err;
836 }
837 
hci_get_dev_list(void __user * arg)838 int hci_get_dev_list(void __user *arg)
839 {
840 	struct hci_dev *hdev;
841 	struct hci_dev_list_req *dl;
842 	struct hci_dev_req *dr;
843 	int n = 0, size, err;
844 	__u16 dev_num;
845 
846 	if (get_user(dev_num, (__u16 __user *) arg))
847 		return -EFAULT;
848 
849 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
850 		return -EINVAL;
851 
852 	size = sizeof(*dl) + dev_num * sizeof(*dr);
853 
854 	dl = kzalloc(size, GFP_KERNEL);
855 	if (!dl)
856 		return -ENOMEM;
857 
858 	dr = dl->dev_req;
859 
860 	read_lock(&hci_dev_list_lock);
861 	list_for_each_entry(hdev, &hci_dev_list, list) {
862 		unsigned long flags = hdev->flags;
863 
864 		/* When the auto-off is configured it means the transport
865 		 * is running, but in that case still indicate that the
866 		 * device is actually down.
867 		 */
868 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
869 			flags &= ~BIT(HCI_UP);
870 
871 		(dr + n)->dev_id  = hdev->id;
872 		(dr + n)->dev_opt = flags;
873 
874 		if (++n >= dev_num)
875 			break;
876 	}
877 	read_unlock(&hci_dev_list_lock);
878 
879 	dl->dev_num = n;
880 	size = sizeof(*dl) + n * sizeof(*dr);
881 
882 	err = copy_to_user(arg, dl, size);
883 	kfree(dl);
884 
885 	return err ? -EFAULT : 0;
886 }
887 
hci_get_dev_info(void __user * arg)888 int hci_get_dev_info(void __user *arg)
889 {
890 	struct hci_dev *hdev;
891 	struct hci_dev_info di;
892 	unsigned long flags;
893 	int err = 0;
894 
895 	if (copy_from_user(&di, arg, sizeof(di)))
896 		return -EFAULT;
897 
898 	hdev = hci_dev_get(di.dev_id);
899 	if (!hdev)
900 		return -ENODEV;
901 
902 	/* When the auto-off is configured it means the transport
903 	 * is running, but in that case still indicate that the
904 	 * device is actually down.
905 	 */
906 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
907 		flags = hdev->flags & ~BIT(HCI_UP);
908 	else
909 		flags = hdev->flags;
910 
911 	strcpy(di.name, hdev->name);
912 	di.bdaddr   = hdev->bdaddr;
913 	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
914 	di.flags    = flags;
915 	di.pkt_type = hdev->pkt_type;
916 	if (lmp_bredr_capable(hdev)) {
917 		di.acl_mtu  = hdev->acl_mtu;
918 		di.acl_pkts = hdev->acl_pkts;
919 		di.sco_mtu  = hdev->sco_mtu;
920 		di.sco_pkts = hdev->sco_pkts;
921 	} else {
922 		di.acl_mtu  = hdev->le_mtu;
923 		di.acl_pkts = hdev->le_pkts;
924 		di.sco_mtu  = 0;
925 		di.sco_pkts = 0;
926 	}
927 	di.link_policy = hdev->link_policy;
928 	di.link_mode   = hdev->link_mode;
929 
930 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
931 	memcpy(&di.features, &hdev->features, sizeof(di.features));
932 
933 	if (copy_to_user(arg, &di, sizeof(di)))
934 		err = -EFAULT;
935 
936 	hci_dev_put(hdev);
937 
938 	return err;
939 }
940 
941 /* ---- Interface to HCI drivers ---- */
942 
hci_rfkill_set_block(void * data,bool blocked)943 static int hci_rfkill_set_block(void *data, bool blocked)
944 {
945 	struct hci_dev *hdev = data;
946 
947 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
948 
949 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
950 		return -EBUSY;
951 
952 	if (blocked) {
953 		hci_dev_set_flag(hdev, HCI_RFKILLED);
954 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
955 		    !hci_dev_test_flag(hdev, HCI_CONFIG))
956 			hci_dev_do_close(hdev);
957 	} else {
958 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
959 	}
960 
961 	return 0;
962 }
963 
964 static const struct rfkill_ops hci_rfkill_ops = {
965 	.set_block = hci_rfkill_set_block,
966 };
967 
hci_power_on(struct work_struct * work)968 static void hci_power_on(struct work_struct *work)
969 {
970 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
971 	int err;
972 
973 	BT_DBG("%s", hdev->name);
974 
975 	if (test_bit(HCI_UP, &hdev->flags) &&
976 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
977 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
978 		cancel_delayed_work(&hdev->power_off);
979 		err = hci_powered_update_sync(hdev);
980 		mgmt_power_on(hdev, err);
981 		return;
982 	}
983 
984 	err = hci_dev_do_open(hdev);
985 	if (err < 0) {
986 		hci_dev_lock(hdev);
987 		mgmt_set_powered_failed(hdev, err);
988 		hci_dev_unlock(hdev);
989 		return;
990 	}
991 
992 	/* During the HCI setup phase, a few error conditions are
993 	 * ignored and they need to be checked now. If they are still
994 	 * valid, it is important to turn the device back off.
995 	 */
996 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
997 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
998 	    (hdev->dev_type == HCI_PRIMARY &&
999 	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1000 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1001 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1002 		hci_dev_do_close(hdev);
1003 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1004 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1005 				   HCI_AUTO_OFF_TIMEOUT);
1006 	}
1007 
1008 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1009 		/* For unconfigured devices, set the HCI_RAW flag
1010 		 * so that userspace can easily identify them.
1011 		 */
1012 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1013 			set_bit(HCI_RAW, &hdev->flags);
1014 
1015 		/* For fully configured devices, this will send
1016 		 * the Index Added event. For unconfigured devices,
1017 		 * it will send Unconfigued Index Added event.
1018 		 *
1019 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1020 		 * and no event will be send.
1021 		 */
1022 		mgmt_index_added(hdev);
1023 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1024 		/* When the controller is now configured, then it
1025 		 * is important to clear the HCI_RAW flag.
1026 		 */
1027 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1028 			clear_bit(HCI_RAW, &hdev->flags);
1029 
1030 		/* Powering on the controller with HCI_CONFIG set only
1031 		 * happens with the transition from unconfigured to
1032 		 * configured. This will send the Index Added event.
1033 		 */
1034 		mgmt_index_added(hdev);
1035 	}
1036 }
1037 
hci_power_off(struct work_struct * work)1038 static void hci_power_off(struct work_struct *work)
1039 {
1040 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1041 					    power_off.work);
1042 
1043 	BT_DBG("%s", hdev->name);
1044 
1045 	hci_dev_do_close(hdev);
1046 }
1047 
hci_error_reset(struct work_struct * work)1048 static void hci_error_reset(struct work_struct *work)
1049 {
1050 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1051 
1052 	hci_dev_hold(hdev);
1053 	BT_DBG("%s", hdev->name);
1054 
1055 	if (hdev->hw_error)
1056 		hdev->hw_error(hdev, hdev->hw_error_code);
1057 	else
1058 		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1059 
1060 	if (!hci_dev_do_close(hdev))
1061 		hci_dev_do_open(hdev);
1062 
1063 	hci_dev_put(hdev);
1064 }
1065 
hci_uuids_clear(struct hci_dev * hdev)1066 void hci_uuids_clear(struct hci_dev *hdev)
1067 {
1068 	struct bt_uuid *uuid, *tmp;
1069 
1070 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1071 		list_del(&uuid->list);
1072 		kfree(uuid);
1073 	}
1074 }
1075 
hci_link_keys_clear(struct hci_dev * hdev)1076 void hci_link_keys_clear(struct hci_dev *hdev)
1077 {
1078 	struct link_key *key, *tmp;
1079 
1080 	list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1081 		list_del_rcu(&key->list);
1082 		kfree_rcu(key, rcu);
1083 	}
1084 }
1085 
hci_smp_ltks_clear(struct hci_dev * hdev)1086 void hci_smp_ltks_clear(struct hci_dev *hdev)
1087 {
1088 	struct smp_ltk *k, *tmp;
1089 
1090 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1091 		list_del_rcu(&k->list);
1092 		kfree_rcu(k, rcu);
1093 	}
1094 }
1095 
hci_smp_irks_clear(struct hci_dev * hdev)1096 void hci_smp_irks_clear(struct hci_dev *hdev)
1097 {
1098 	struct smp_irk *k, *tmp;
1099 
1100 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1101 		list_del_rcu(&k->list);
1102 		kfree_rcu(k, rcu);
1103 	}
1104 }
1105 
hci_blocked_keys_clear(struct hci_dev * hdev)1106 void hci_blocked_keys_clear(struct hci_dev *hdev)
1107 {
1108 	struct blocked_key *b, *tmp;
1109 
1110 	list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1111 		list_del_rcu(&b->list);
1112 		kfree_rcu(b, rcu);
1113 	}
1114 }
1115 
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])1116 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1117 {
1118 	bool blocked = false;
1119 	struct blocked_key *b;
1120 
1121 	rcu_read_lock();
1122 	list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1123 		if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1124 			blocked = true;
1125 			break;
1126 		}
1127 	}
1128 
1129 	rcu_read_unlock();
1130 	return blocked;
1131 }
1132 
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1133 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1134 {
1135 	struct link_key *k;
1136 
1137 	rcu_read_lock();
1138 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1139 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
1140 			rcu_read_unlock();
1141 
1142 			if (hci_is_blocked_key(hdev,
1143 					       HCI_BLOCKED_KEY_TYPE_LINKKEY,
1144 					       k->val)) {
1145 				bt_dev_warn_ratelimited(hdev,
1146 							"Link key blocked for %pMR",
1147 							&k->bdaddr);
1148 				return NULL;
1149 			}
1150 
1151 			return k;
1152 		}
1153 	}
1154 	rcu_read_unlock();
1155 
1156 	return NULL;
1157 }
1158 
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1159 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1160 			       u8 key_type, u8 old_key_type)
1161 {
1162 	/* Legacy key */
1163 	if (key_type < 0x03)
1164 		return true;
1165 
1166 	/* Debug keys are insecure so don't store them persistently */
1167 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1168 		return false;
1169 
1170 	/* Changed combination key and there's no previous one */
1171 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1172 		return false;
1173 
1174 	/* Security mode 3 case */
1175 	if (!conn)
1176 		return true;
1177 
1178 	/* BR/EDR key derived using SC from an LE link */
1179 	if (conn->type == LE_LINK)
1180 		return true;
1181 
1182 	/* Neither local nor remote side had no-bonding as requirement */
1183 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1184 		return true;
1185 
1186 	/* Local side had dedicated bonding as requirement */
1187 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1188 		return true;
1189 
1190 	/* Remote side had dedicated bonding as requirement */
1191 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1192 		return true;
1193 
1194 	/* If none of the above criteria match, then don't store the key
1195 	 * persistently */
1196 	return false;
1197 }
1198 
ltk_role(u8 type)1199 static u8 ltk_role(u8 type)
1200 {
1201 	if (type == SMP_LTK)
1202 		return HCI_ROLE_MASTER;
1203 
1204 	return HCI_ROLE_SLAVE;
1205 }
1206 
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)1207 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1208 			     u8 addr_type, u8 role)
1209 {
1210 	struct smp_ltk *k;
1211 
1212 	rcu_read_lock();
1213 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1214 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1215 			continue;
1216 
1217 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1218 			rcu_read_unlock();
1219 
1220 			if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1221 					       k->val)) {
1222 				bt_dev_warn_ratelimited(hdev,
1223 							"LTK blocked for %pMR",
1224 							&k->bdaddr);
1225 				return NULL;
1226 			}
1227 
1228 			return k;
1229 		}
1230 	}
1231 	rcu_read_unlock();
1232 
1233 	return NULL;
1234 }
1235 
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)1236 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1237 {
1238 	struct smp_irk *irk_to_return = NULL;
1239 	struct smp_irk *irk;
1240 
1241 	rcu_read_lock();
1242 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1243 		if (!bacmp(&irk->rpa, rpa)) {
1244 			irk_to_return = irk;
1245 			goto done;
1246 		}
1247 	}
1248 
1249 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1250 		if (smp_irk_matches(hdev, irk->val, rpa)) {
1251 			bacpy(&irk->rpa, rpa);
1252 			irk_to_return = irk;
1253 			goto done;
1254 		}
1255 	}
1256 
1257 done:
1258 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1259 						irk_to_return->val)) {
1260 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1261 					&irk_to_return->bdaddr);
1262 		irk_to_return = NULL;
1263 	}
1264 
1265 	rcu_read_unlock();
1266 
1267 	return irk_to_return;
1268 }
1269 
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1270 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1271 				     u8 addr_type)
1272 {
1273 	struct smp_irk *irk_to_return = NULL;
1274 	struct smp_irk *irk;
1275 
1276 	/* Identity Address must be public or static random */
1277 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1278 		return NULL;
1279 
1280 	rcu_read_lock();
1281 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1282 		if (addr_type == irk->addr_type &&
1283 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
1284 			irk_to_return = irk;
1285 			goto done;
1286 		}
1287 	}
1288 
1289 done:
1290 
1291 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1292 						irk_to_return->val)) {
1293 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1294 					&irk_to_return->bdaddr);
1295 		irk_to_return = NULL;
1296 	}
1297 
1298 	rcu_read_unlock();
1299 
1300 	return irk_to_return;
1301 }
1302 
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)1303 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1304 				  bdaddr_t *bdaddr, u8 *val, u8 type,
1305 				  u8 pin_len, bool *persistent)
1306 {
1307 	struct link_key *key, *old_key;
1308 	u8 old_key_type;
1309 
1310 	old_key = hci_find_link_key(hdev, bdaddr);
1311 	if (old_key) {
1312 		old_key_type = old_key->type;
1313 		key = old_key;
1314 	} else {
1315 		old_key_type = conn ? conn->key_type : 0xff;
1316 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1317 		if (!key)
1318 			return NULL;
1319 		list_add_rcu(&key->list, &hdev->link_keys);
1320 	}
1321 
1322 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1323 
1324 	/* Some buggy controller combinations generate a changed
1325 	 * combination key for legacy pairing even when there's no
1326 	 * previous key */
1327 	if (type == HCI_LK_CHANGED_COMBINATION &&
1328 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1329 		type = HCI_LK_COMBINATION;
1330 		if (conn)
1331 			conn->key_type = type;
1332 	}
1333 
1334 	bacpy(&key->bdaddr, bdaddr);
1335 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1336 	key->pin_len = pin_len;
1337 
1338 	if (type == HCI_LK_CHANGED_COMBINATION)
1339 		key->type = old_key_type;
1340 	else
1341 		key->type = type;
1342 
1343 	if (persistent)
1344 		*persistent = hci_persistent_key(hdev, conn, type,
1345 						 old_key_type);
1346 
1347 	return key;
1348 }
1349 
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)1350 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1351 			    u8 addr_type, u8 type, u8 authenticated,
1352 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1353 {
1354 	struct smp_ltk *key, *old_key;
1355 	u8 role = ltk_role(type);
1356 
1357 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1358 	if (old_key)
1359 		key = old_key;
1360 	else {
1361 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1362 		if (!key)
1363 			return NULL;
1364 		list_add_rcu(&key->list, &hdev->long_term_keys);
1365 	}
1366 
1367 	bacpy(&key->bdaddr, bdaddr);
1368 	key->bdaddr_type = addr_type;
1369 	memcpy(key->val, tk, sizeof(key->val));
1370 	key->authenticated = authenticated;
1371 	key->ediv = ediv;
1372 	key->rand = rand;
1373 	key->enc_size = enc_size;
1374 	key->type = type;
1375 
1376 	return key;
1377 }
1378 
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)1379 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1380 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
1381 {
1382 	struct smp_irk *irk;
1383 
1384 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1385 	if (!irk) {
1386 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1387 		if (!irk)
1388 			return NULL;
1389 
1390 		bacpy(&irk->bdaddr, bdaddr);
1391 		irk->addr_type = addr_type;
1392 
1393 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1394 	}
1395 
1396 	memcpy(irk->val, val, 16);
1397 	bacpy(&irk->rpa, rpa);
1398 
1399 	return irk;
1400 }
1401 
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1402 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1403 {
1404 	struct link_key *key;
1405 
1406 	key = hci_find_link_key(hdev, bdaddr);
1407 	if (!key)
1408 		return -ENOENT;
1409 
1410 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1411 
1412 	list_del_rcu(&key->list);
1413 	kfree_rcu(key, rcu);
1414 
1415 	return 0;
1416 }
1417 
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1418 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1419 {
1420 	struct smp_ltk *k, *tmp;
1421 	int removed = 0;
1422 
1423 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1424 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1425 			continue;
1426 
1427 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1428 
1429 		list_del_rcu(&k->list);
1430 		kfree_rcu(k, rcu);
1431 		removed++;
1432 	}
1433 
1434 	return removed ? 0 : -ENOENT;
1435 }
1436 
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1437 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1438 {
1439 	struct smp_irk *k, *tmp;
1440 
1441 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1442 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1443 			continue;
1444 
1445 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1446 
1447 		list_del_rcu(&k->list);
1448 		kfree_rcu(k, rcu);
1449 	}
1450 }
1451 
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1452 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1453 {
1454 	struct smp_ltk *k;
1455 	struct smp_irk *irk;
1456 	u8 addr_type;
1457 
1458 	if (type == BDADDR_BREDR) {
1459 		if (hci_find_link_key(hdev, bdaddr))
1460 			return true;
1461 		return false;
1462 	}
1463 
1464 	/* Convert to HCI addr type which struct smp_ltk uses */
1465 	if (type == BDADDR_LE_PUBLIC)
1466 		addr_type = ADDR_LE_DEV_PUBLIC;
1467 	else
1468 		addr_type = ADDR_LE_DEV_RANDOM;
1469 
1470 	irk = hci_get_irk(hdev, bdaddr, addr_type);
1471 	if (irk) {
1472 		bdaddr = &irk->bdaddr;
1473 		addr_type = irk->addr_type;
1474 	}
1475 
1476 	rcu_read_lock();
1477 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1478 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1479 			rcu_read_unlock();
1480 			return true;
1481 		}
1482 	}
1483 	rcu_read_unlock();
1484 
1485 	return false;
1486 }
1487 
1488 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)1489 static void hci_cmd_timeout(struct work_struct *work)
1490 {
1491 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1492 					    cmd_timer.work);
1493 
1494 	if (hdev->sent_cmd) {
1495 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1496 		u16 opcode = __le16_to_cpu(sent->opcode);
1497 
1498 		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1499 	} else {
1500 		bt_dev_err(hdev, "command tx timeout");
1501 	}
1502 
1503 	if (hdev->cmd_timeout)
1504 		hdev->cmd_timeout(hdev);
1505 
1506 	atomic_set(&hdev->cmd_cnt, 1);
1507 	queue_work(hdev->workqueue, &hdev->cmd_work);
1508 }
1509 
1510 /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)1511 static void hci_ncmd_timeout(struct work_struct *work)
1512 {
1513 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1514 					    ncmd_timer.work);
1515 
1516 	bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1517 
1518 	/* During HCI_INIT phase no events can be injected if the ncmd timer
1519 	 * triggers since the procedure has its own timeout handling.
1520 	 */
1521 	if (test_bit(HCI_INIT, &hdev->flags))
1522 		return;
1523 
1524 	/* This is an irrecoverable state, inject hardware error event */
1525 	hci_reset_dev(hdev);
1526 }
1527 
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1528 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1529 					  bdaddr_t *bdaddr, u8 bdaddr_type)
1530 {
1531 	struct oob_data *data;
1532 
1533 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
1534 		if (bacmp(bdaddr, &data->bdaddr) != 0)
1535 			continue;
1536 		if (data->bdaddr_type != bdaddr_type)
1537 			continue;
1538 		return data;
1539 	}
1540 
1541 	return NULL;
1542 }
1543 
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1544 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1545 			       u8 bdaddr_type)
1546 {
1547 	struct oob_data *data;
1548 
1549 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1550 	if (!data)
1551 		return -ENOENT;
1552 
1553 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1554 
1555 	list_del(&data->list);
1556 	kfree(data);
1557 
1558 	return 0;
1559 }
1560 
hci_remote_oob_data_clear(struct hci_dev * hdev)1561 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1562 {
1563 	struct oob_data *data, *n;
1564 
1565 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1566 		list_del(&data->list);
1567 		kfree(data);
1568 	}
1569 }
1570 
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)1571 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1572 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
1573 			    u8 *hash256, u8 *rand256)
1574 {
1575 	struct oob_data *data;
1576 
1577 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1578 	if (!data) {
1579 		data = kmalloc(sizeof(*data), GFP_KERNEL);
1580 		if (!data)
1581 			return -ENOMEM;
1582 
1583 		bacpy(&data->bdaddr, bdaddr);
1584 		data->bdaddr_type = bdaddr_type;
1585 		list_add(&data->list, &hdev->remote_oob_data);
1586 	}
1587 
1588 	if (hash192 && rand192) {
1589 		memcpy(data->hash192, hash192, sizeof(data->hash192));
1590 		memcpy(data->rand192, rand192, sizeof(data->rand192));
1591 		if (hash256 && rand256)
1592 			data->present = 0x03;
1593 	} else {
1594 		memset(data->hash192, 0, sizeof(data->hash192));
1595 		memset(data->rand192, 0, sizeof(data->rand192));
1596 		if (hash256 && rand256)
1597 			data->present = 0x02;
1598 		else
1599 			data->present = 0x00;
1600 	}
1601 
1602 	if (hash256 && rand256) {
1603 		memcpy(data->hash256, hash256, sizeof(data->hash256));
1604 		memcpy(data->rand256, rand256, sizeof(data->rand256));
1605 	} else {
1606 		memset(data->hash256, 0, sizeof(data->hash256));
1607 		memset(data->rand256, 0, sizeof(data->rand256));
1608 		if (hash192 && rand192)
1609 			data->present = 0x01;
1610 	}
1611 
1612 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
1613 
1614 	return 0;
1615 }
1616 
1617 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)1618 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1619 {
1620 	struct adv_info *adv_instance;
1621 
1622 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1623 		if (adv_instance->instance == instance)
1624 			return adv_instance;
1625 	}
1626 
1627 	return NULL;
1628 }
1629 
1630 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)1631 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1632 {
1633 	struct adv_info *cur_instance;
1634 
1635 	cur_instance = hci_find_adv_instance(hdev, instance);
1636 	if (!cur_instance)
1637 		return NULL;
1638 
1639 	if (cur_instance == list_last_entry(&hdev->adv_instances,
1640 					    struct adv_info, list))
1641 		return list_first_entry(&hdev->adv_instances,
1642 						 struct adv_info, list);
1643 	else
1644 		return list_next_entry(cur_instance, list);
1645 }
1646 
1647 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)1648 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1649 {
1650 	struct adv_info *adv_instance;
1651 
1652 	adv_instance = hci_find_adv_instance(hdev, instance);
1653 	if (!adv_instance)
1654 		return -ENOENT;
1655 
1656 	BT_DBG("%s removing %dMR", hdev->name, instance);
1657 
1658 	if (hdev->cur_adv_instance == instance) {
1659 		if (hdev->adv_instance_timeout) {
1660 			cancel_delayed_work(&hdev->adv_instance_expire);
1661 			hdev->adv_instance_timeout = 0;
1662 		}
1663 		hdev->cur_adv_instance = 0x00;
1664 	}
1665 
1666 	cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1667 
1668 	list_del(&adv_instance->list);
1669 	kfree(adv_instance);
1670 
1671 	hdev->adv_instance_cnt--;
1672 
1673 	return 0;
1674 }
1675 
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)1676 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1677 {
1678 	struct adv_info *adv_instance, *n;
1679 
1680 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1681 		adv_instance->rpa_expired = rpa_expired;
1682 }
1683 
1684 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)1685 void hci_adv_instances_clear(struct hci_dev *hdev)
1686 {
1687 	struct adv_info *adv_instance, *n;
1688 
1689 	if (hdev->adv_instance_timeout) {
1690 		cancel_delayed_work(&hdev->adv_instance_expire);
1691 		hdev->adv_instance_timeout = 0;
1692 	}
1693 
1694 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1695 		cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1696 		list_del(&adv_instance->list);
1697 		kfree(adv_instance);
1698 	}
1699 
1700 	hdev->adv_instance_cnt = 0;
1701 	hdev->cur_adv_instance = 0x00;
1702 }
1703 
adv_instance_rpa_expired(struct work_struct * work)1704 static void adv_instance_rpa_expired(struct work_struct *work)
1705 {
1706 	struct adv_info *adv_instance = container_of(work, struct adv_info,
1707 						     rpa_expired_cb.work);
1708 
1709 	BT_DBG("");
1710 
1711 	adv_instance->rpa_expired = true;
1712 }
1713 
1714 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval,u8 mesh_handle)1715 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1716 				      u32 flags, u16 adv_data_len, u8 *adv_data,
1717 				      u16 scan_rsp_len, u8 *scan_rsp_data,
1718 				      u16 timeout, u16 duration, s8 tx_power,
1719 				      u32 min_interval, u32 max_interval,
1720 				      u8 mesh_handle)
1721 {
1722 	struct adv_info *adv;
1723 
1724 	adv = hci_find_adv_instance(hdev, instance);
1725 	if (adv) {
1726 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1727 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1728 		memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1729 	} else {
1730 		if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1731 		    instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1732 			return ERR_PTR(-EOVERFLOW);
1733 
1734 		adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1735 		if (!adv)
1736 			return ERR_PTR(-ENOMEM);
1737 
1738 		adv->pending = true;
1739 		adv->instance = instance;
1740 		list_add(&adv->list, &hdev->adv_instances);
1741 		hdev->adv_instance_cnt++;
1742 	}
1743 
1744 	adv->flags = flags;
1745 	adv->min_interval = min_interval;
1746 	adv->max_interval = max_interval;
1747 	adv->tx_power = tx_power;
1748 	/* Defining a mesh_handle changes the timing units to ms,
1749 	 * rather than seconds, and ties the instance to the requested
1750 	 * mesh_tx queue.
1751 	 */
1752 	adv->mesh = mesh_handle;
1753 
1754 	hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1755 				  scan_rsp_len, scan_rsp_data);
1756 
1757 	adv->timeout = timeout;
1758 	adv->remaining_time = timeout;
1759 
1760 	if (duration == 0)
1761 		adv->duration = hdev->def_multi_adv_rotation_duration;
1762 	else
1763 		adv->duration = duration;
1764 
1765 	INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1766 
1767 	BT_DBG("%s for %dMR", hdev->name, instance);
1768 
1769 	return adv;
1770 }
1771 
1772 /* This function requires the caller holds hdev->lock */
hci_add_per_instance(struct hci_dev * hdev,u8 instance,u32 flags,u8 data_len,u8 * data,u32 min_interval,u32 max_interval)1773 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1774 				      u32 flags, u8 data_len, u8 *data,
1775 				      u32 min_interval, u32 max_interval)
1776 {
1777 	struct adv_info *adv;
1778 
1779 	adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1780 				   0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1781 				   min_interval, max_interval, 0);
1782 	if (IS_ERR(adv))
1783 		return adv;
1784 
1785 	adv->periodic = true;
1786 	adv->per_adv_data_len = data_len;
1787 
1788 	if (data)
1789 		memcpy(adv->per_adv_data, data, data_len);
1790 
1791 	return adv;
1792 }
1793 
1794 /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)1795 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1796 			      u16 adv_data_len, u8 *adv_data,
1797 			      u16 scan_rsp_len, u8 *scan_rsp_data)
1798 {
1799 	struct adv_info *adv;
1800 
1801 	adv = hci_find_adv_instance(hdev, instance);
1802 
1803 	/* If advertisement doesn't exist, we can't modify its data */
1804 	if (!adv)
1805 		return -ENOENT;
1806 
1807 	if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1808 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1809 		memcpy(adv->adv_data, adv_data, adv_data_len);
1810 		adv->adv_data_len = adv_data_len;
1811 		adv->adv_data_changed = true;
1812 	}
1813 
1814 	if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1815 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1816 		memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1817 		adv->scan_rsp_len = scan_rsp_len;
1818 		adv->scan_rsp_changed = true;
1819 	}
1820 
1821 	/* Mark as changed if there are flags which would affect it */
1822 	if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1823 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1824 		adv->scan_rsp_changed = true;
1825 
1826 	return 0;
1827 }
1828 
1829 /* This function requires the caller holds hdev->lock */
hci_adv_instance_flags(struct hci_dev * hdev,u8 instance)1830 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1831 {
1832 	u32 flags;
1833 	struct adv_info *adv;
1834 
1835 	if (instance == 0x00) {
1836 		/* Instance 0 always manages the "Tx Power" and "Flags"
1837 		 * fields
1838 		 */
1839 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1840 
1841 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1842 		 * corresponds to the "connectable" instance flag.
1843 		 */
1844 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1845 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1846 
1847 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1848 			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1849 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1850 			flags |= MGMT_ADV_FLAG_DISCOV;
1851 
1852 		return flags;
1853 	}
1854 
1855 	adv = hci_find_adv_instance(hdev, instance);
1856 
1857 	/* Return 0 when we got an invalid instance identifier. */
1858 	if (!adv)
1859 		return 0;
1860 
1861 	return adv->flags;
1862 }
1863 
hci_adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1864 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1865 {
1866 	struct adv_info *adv;
1867 
1868 	/* Instance 0x00 always set local name */
1869 	if (instance == 0x00)
1870 		return true;
1871 
1872 	adv = hci_find_adv_instance(hdev, instance);
1873 	if (!adv)
1874 		return false;
1875 
1876 	if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1877 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1878 		return true;
1879 
1880 	return adv->scan_rsp_len ? true : false;
1881 }
1882 
1883 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)1884 void hci_adv_monitors_clear(struct hci_dev *hdev)
1885 {
1886 	struct adv_monitor *monitor;
1887 	int handle;
1888 
1889 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1890 		hci_free_adv_monitor(hdev, monitor);
1891 
1892 	idr_destroy(&hdev->adv_monitors_idr);
1893 }
1894 
1895 /* Frees the monitor structure and do some bookkeepings.
1896  * This function requires the caller holds hdev->lock.
1897  */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1898 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1899 {
1900 	struct adv_pattern *pattern;
1901 	struct adv_pattern *tmp;
1902 
1903 	if (!monitor)
1904 		return;
1905 
1906 	list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1907 		list_del(&pattern->list);
1908 		kfree(pattern);
1909 	}
1910 
1911 	if (monitor->handle)
1912 		idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1913 
1914 	if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1915 		hdev->adv_monitors_cnt--;
1916 		mgmt_adv_monitor_removed(hdev, monitor->handle);
1917 	}
1918 
1919 	kfree(monitor);
1920 }
1921 
1922 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1923  * also attempts to forward the request to the controller.
1924  * This function requires the caller holds hci_req_sync_lock.
1925  */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1926 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1927 {
1928 	int min, max, handle;
1929 	int status = 0;
1930 
1931 	if (!monitor)
1932 		return -EINVAL;
1933 
1934 	hci_dev_lock(hdev);
1935 
1936 	min = HCI_MIN_ADV_MONITOR_HANDLE;
1937 	max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1938 	handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1939 			   GFP_KERNEL);
1940 
1941 	hci_dev_unlock(hdev);
1942 
1943 	if (handle < 0)
1944 		return handle;
1945 
1946 	monitor->handle = handle;
1947 
1948 	if (!hdev_is_powered(hdev))
1949 		return status;
1950 
1951 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1952 	case HCI_ADV_MONITOR_EXT_NONE:
1953 		bt_dev_dbg(hdev, "add monitor %d status %d",
1954 			   monitor->handle, status);
1955 		/* Message was not forwarded to controller - not an error */
1956 		break;
1957 
1958 	case HCI_ADV_MONITOR_EXT_MSFT:
1959 		status = msft_add_monitor_pattern(hdev, monitor);
1960 		bt_dev_dbg(hdev, "add monitor %d msft status %d",
1961 			   handle, status);
1962 		break;
1963 	}
1964 
1965 	return status;
1966 }
1967 
1968 /* Attempts to tell the controller and free the monitor. If somehow the
1969  * controller doesn't have a corresponding handle, remove anyway.
1970  * This function requires the caller holds hci_req_sync_lock.
1971  */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1972 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1973 				  struct adv_monitor *monitor)
1974 {
1975 	int status = 0;
1976 	int handle;
1977 
1978 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1979 	case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1980 		bt_dev_dbg(hdev, "remove monitor %d status %d",
1981 			   monitor->handle, status);
1982 		goto free_monitor;
1983 
1984 	case HCI_ADV_MONITOR_EXT_MSFT:
1985 		handle = monitor->handle;
1986 		status = msft_remove_monitor(hdev, monitor);
1987 		bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1988 			   handle, status);
1989 		break;
1990 	}
1991 
1992 	/* In case no matching handle registered, just free the monitor */
1993 	if (status == -ENOENT)
1994 		goto free_monitor;
1995 
1996 	return status;
1997 
1998 free_monitor:
1999 	if (status == -ENOENT)
2000 		bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2001 			    monitor->handle);
2002 	hci_free_adv_monitor(hdev, monitor);
2003 
2004 	return status;
2005 }
2006 
2007 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle)2008 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2009 {
2010 	struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2011 
2012 	if (!monitor)
2013 		return -EINVAL;
2014 
2015 	return hci_remove_adv_monitor(hdev, monitor);
2016 }
2017 
2018 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_all_adv_monitor(struct hci_dev * hdev)2019 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2020 {
2021 	struct adv_monitor *monitor;
2022 	int idr_next_id = 0;
2023 	int status = 0;
2024 
2025 	while (1) {
2026 		monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2027 		if (!monitor)
2028 			break;
2029 
2030 		status = hci_remove_adv_monitor(hdev, monitor);
2031 		if (status)
2032 			return status;
2033 
2034 		idr_next_id++;
2035 	}
2036 
2037 	return status;
2038 }
2039 
2040 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)2041 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2042 {
2043 	return !idr_is_empty(&hdev->adv_monitors_idr);
2044 }
2045 
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)2046 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2047 {
2048 	if (msft_monitor_supported(hdev))
2049 		return HCI_ADV_MONITOR_EXT_MSFT;
2050 
2051 	return HCI_ADV_MONITOR_EXT_NONE;
2052 }
2053 
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2054 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2055 					 bdaddr_t *bdaddr, u8 type)
2056 {
2057 	struct bdaddr_list *b;
2058 
2059 	list_for_each_entry(b, bdaddr_list, list) {
2060 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2061 			return b;
2062 	}
2063 
2064 	return NULL;
2065 }
2066 
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2067 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2068 				struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2069 				u8 type)
2070 {
2071 	struct bdaddr_list_with_irk *b;
2072 
2073 	list_for_each_entry(b, bdaddr_list, list) {
2074 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2075 			return b;
2076 	}
2077 
2078 	return NULL;
2079 }
2080 
2081 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2082 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2083 				  bdaddr_t *bdaddr, u8 type)
2084 {
2085 	struct bdaddr_list_with_flags *b;
2086 
2087 	list_for_each_entry(b, bdaddr_list, list) {
2088 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2089 			return b;
2090 	}
2091 
2092 	return NULL;
2093 }
2094 
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2095 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2096 {
2097 	struct bdaddr_list *b, *n;
2098 
2099 	list_for_each_entry_safe(b, n, bdaddr_list, list) {
2100 		list_del(&b->list);
2101 		kfree(b);
2102 	}
2103 }
2104 
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2105 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2106 {
2107 	struct bdaddr_list *entry;
2108 
2109 	if (!bacmp(bdaddr, BDADDR_ANY))
2110 		return -EBADF;
2111 
2112 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2113 		return -EEXIST;
2114 
2115 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2116 	if (!entry)
2117 		return -ENOMEM;
2118 
2119 	bacpy(&entry->bdaddr, bdaddr);
2120 	entry->bdaddr_type = type;
2121 
2122 	list_add(&entry->list, list);
2123 
2124 	return 0;
2125 }
2126 
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2127 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2128 					u8 type, u8 *peer_irk, u8 *local_irk)
2129 {
2130 	struct bdaddr_list_with_irk *entry;
2131 
2132 	if (!bacmp(bdaddr, BDADDR_ANY))
2133 		return -EBADF;
2134 
2135 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2136 		return -EEXIST;
2137 
2138 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2139 	if (!entry)
2140 		return -ENOMEM;
2141 
2142 	bacpy(&entry->bdaddr, bdaddr);
2143 	entry->bdaddr_type = type;
2144 
2145 	if (peer_irk)
2146 		memcpy(entry->peer_irk, peer_irk, 16);
2147 
2148 	if (local_irk)
2149 		memcpy(entry->local_irk, local_irk, 16);
2150 
2151 	list_add(&entry->list, list);
2152 
2153 	return 0;
2154 }
2155 
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)2156 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2157 				   u8 type, u32 flags)
2158 {
2159 	struct bdaddr_list_with_flags *entry;
2160 
2161 	if (!bacmp(bdaddr, BDADDR_ANY))
2162 		return -EBADF;
2163 
2164 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2165 		return -EEXIST;
2166 
2167 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2168 	if (!entry)
2169 		return -ENOMEM;
2170 
2171 	bacpy(&entry->bdaddr, bdaddr);
2172 	entry->bdaddr_type = type;
2173 	entry->flags = flags;
2174 
2175 	list_add(&entry->list, list);
2176 
2177 	return 0;
2178 }
2179 
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2180 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2181 {
2182 	struct bdaddr_list *entry;
2183 
2184 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2185 		hci_bdaddr_list_clear(list);
2186 		return 0;
2187 	}
2188 
2189 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2190 	if (!entry)
2191 		return -ENOENT;
2192 
2193 	list_del(&entry->list);
2194 	kfree(entry);
2195 
2196 	return 0;
2197 }
2198 
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2199 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2200 							u8 type)
2201 {
2202 	struct bdaddr_list_with_irk *entry;
2203 
2204 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2205 		hci_bdaddr_list_clear(list);
2206 		return 0;
2207 	}
2208 
2209 	entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2210 	if (!entry)
2211 		return -ENOENT;
2212 
2213 	list_del(&entry->list);
2214 	kfree(entry);
2215 
2216 	return 0;
2217 }
2218 
hci_bdaddr_list_del_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type)2219 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2220 				   u8 type)
2221 {
2222 	struct bdaddr_list_with_flags *entry;
2223 
2224 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2225 		hci_bdaddr_list_clear(list);
2226 		return 0;
2227 	}
2228 
2229 	entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2230 	if (!entry)
2231 		return -ENOENT;
2232 
2233 	list_del(&entry->list);
2234 	kfree(entry);
2235 
2236 	return 0;
2237 }
2238 
2239 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2240 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2241 					       bdaddr_t *addr, u8 addr_type)
2242 {
2243 	struct hci_conn_params *params;
2244 
2245 	list_for_each_entry(params, &hdev->le_conn_params, list) {
2246 		if (bacmp(&params->addr, addr) == 0 &&
2247 		    params->addr_type == addr_type) {
2248 			return params;
2249 		}
2250 	}
2251 
2252 	return NULL;
2253 }
2254 
2255 /* This function requires the caller holds hdev->lock or rcu_read_lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2256 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2257 						  bdaddr_t *addr, u8 addr_type)
2258 {
2259 	struct hci_conn_params *param;
2260 
2261 	rcu_read_lock();
2262 
2263 	list_for_each_entry_rcu(param, list, action) {
2264 		if (bacmp(&param->addr, addr) == 0 &&
2265 		    param->addr_type == addr_type) {
2266 			rcu_read_unlock();
2267 			return param;
2268 		}
2269 	}
2270 
2271 	rcu_read_unlock();
2272 
2273 	return NULL;
2274 }
2275 
2276 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_del_init(struct hci_conn_params * param)2277 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2278 {
2279 	if (list_empty(&param->action))
2280 		return;
2281 
2282 	list_del_rcu(&param->action);
2283 	synchronize_rcu();
2284 	INIT_LIST_HEAD(&param->action);
2285 }
2286 
2287 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_add(struct hci_conn_params * param,struct list_head * list)2288 void hci_pend_le_list_add(struct hci_conn_params *param,
2289 			  struct list_head *list)
2290 {
2291 	list_add_rcu(&param->action, list);
2292 }
2293 
2294 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2295 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2296 					    bdaddr_t *addr, u8 addr_type)
2297 {
2298 	struct hci_conn_params *params;
2299 
2300 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2301 	if (params)
2302 		return params;
2303 
2304 	params = kzalloc(sizeof(*params), GFP_KERNEL);
2305 	if (!params) {
2306 		bt_dev_err(hdev, "out of memory");
2307 		return NULL;
2308 	}
2309 
2310 	bacpy(&params->addr, addr);
2311 	params->addr_type = addr_type;
2312 
2313 	list_add(&params->list, &hdev->le_conn_params);
2314 	INIT_LIST_HEAD(&params->action);
2315 
2316 	params->conn_min_interval = hdev->le_conn_min_interval;
2317 	params->conn_max_interval = hdev->le_conn_max_interval;
2318 	params->conn_latency = hdev->le_conn_latency;
2319 	params->supervision_timeout = hdev->le_supv_timeout;
2320 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
2321 
2322 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2323 
2324 	return params;
2325 }
2326 
hci_conn_params_free(struct hci_conn_params * params)2327 void hci_conn_params_free(struct hci_conn_params *params)
2328 {
2329 	hci_pend_le_list_del_init(params);
2330 
2331 	if (params->conn) {
2332 		hci_conn_drop(params->conn);
2333 		hci_conn_put(params->conn);
2334 	}
2335 
2336 	list_del(&params->list);
2337 	kfree(params);
2338 }
2339 
2340 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2341 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2342 {
2343 	struct hci_conn_params *params;
2344 
2345 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2346 	if (!params)
2347 		return;
2348 
2349 	hci_conn_params_free(params);
2350 
2351 	hci_update_passive_scan(hdev);
2352 
2353 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2354 }
2355 
2356 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2357 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2358 {
2359 	struct hci_conn_params *params, *tmp;
2360 
2361 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2362 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2363 			continue;
2364 
2365 		/* If trying to establish one time connection to disabled
2366 		 * device, leave the params, but mark them as just once.
2367 		 */
2368 		if (params->explicit_connect) {
2369 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2370 			continue;
2371 		}
2372 
2373 		hci_conn_params_free(params);
2374 	}
2375 
2376 	BT_DBG("All LE disabled connection parameters were removed");
2377 }
2378 
2379 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2380 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2381 {
2382 	struct hci_conn_params *params, *tmp;
2383 
2384 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2385 		hci_conn_params_free(params);
2386 
2387 	BT_DBG("All LE connection parameters were removed");
2388 }
2389 
2390 /* Copy the Identity Address of the controller.
2391  *
2392  * If the controller has a public BD_ADDR, then by default use that one.
2393  * If this is a LE only controller without a public address, default to
2394  * the static random address.
2395  *
2396  * For debugging purposes it is possible to force controllers with a
2397  * public address to use the static random address instead.
2398  *
2399  * In case BR/EDR has been disabled on a dual-mode controller and
2400  * userspace has configured a static address, then that address
2401  * becomes the identity address instead of the public BR/EDR address.
2402  */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2403 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2404 			       u8 *bdaddr_type)
2405 {
2406 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2407 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2408 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2409 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2410 		bacpy(bdaddr, &hdev->static_addr);
2411 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
2412 	} else {
2413 		bacpy(bdaddr, &hdev->bdaddr);
2414 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
2415 	}
2416 }
2417 
hci_clear_wake_reason(struct hci_dev * hdev)2418 static void hci_clear_wake_reason(struct hci_dev *hdev)
2419 {
2420 	hci_dev_lock(hdev);
2421 
2422 	hdev->wake_reason = 0;
2423 	bacpy(&hdev->wake_addr, BDADDR_ANY);
2424 	hdev->wake_addr_type = 0;
2425 
2426 	hci_dev_unlock(hdev);
2427 }
2428 
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)2429 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2430 				void *data)
2431 {
2432 	struct hci_dev *hdev =
2433 		container_of(nb, struct hci_dev, suspend_notifier);
2434 	int ret = 0;
2435 
2436 	/* Userspace has full control of this device. Do nothing. */
2437 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2438 		return NOTIFY_DONE;
2439 
2440 	/* To avoid a potential race with hci_unregister_dev. */
2441 	hci_dev_hold(hdev);
2442 
2443 	if (action == PM_SUSPEND_PREPARE)
2444 		ret = hci_suspend_dev(hdev);
2445 	else if (action == PM_POST_SUSPEND)
2446 		ret = hci_resume_dev(hdev);
2447 
2448 	if (ret)
2449 		bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2450 			   action, ret);
2451 
2452 	hci_dev_put(hdev);
2453 	return NOTIFY_DONE;
2454 }
2455 
2456 /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)2457 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2458 {
2459 	struct hci_dev *hdev;
2460 	unsigned int alloc_size;
2461 
2462 	alloc_size = sizeof(*hdev);
2463 	if (sizeof_priv) {
2464 		/* Fixme: May need ALIGN-ment? */
2465 		alloc_size += sizeof_priv;
2466 	}
2467 
2468 	hdev = kzalloc(alloc_size, GFP_KERNEL);
2469 	if (!hdev)
2470 		return NULL;
2471 
2472 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2473 	hdev->esco_type = (ESCO_HV1);
2474 	hdev->link_mode = (HCI_LM_ACCEPT);
2475 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
2476 	hdev->io_capability = 0x03;	/* No Input No Output */
2477 	hdev->manufacturer = 0xffff;	/* Default to internal use */
2478 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2479 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2480 	hdev->adv_instance_cnt = 0;
2481 	hdev->cur_adv_instance = 0x00;
2482 	hdev->adv_instance_timeout = 0;
2483 
2484 	hdev->advmon_allowlist_duration = 300;
2485 	hdev->advmon_no_filter_duration = 500;
2486 	hdev->enable_advmon_interleave_scan = 0x00;	/* Default to disable */
2487 
2488 	hdev->sniff_max_interval = 800;
2489 	hdev->sniff_min_interval = 80;
2490 
2491 	hdev->le_adv_channel_map = 0x07;
2492 	hdev->le_adv_min_interval = 0x0800;
2493 	hdev->le_adv_max_interval = 0x0800;
2494 	hdev->le_scan_interval = 0x0060;
2495 	hdev->le_scan_window = 0x0030;
2496 	hdev->le_scan_int_suspend = 0x0400;
2497 	hdev->le_scan_window_suspend = 0x0012;
2498 	hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2499 	hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2500 	hdev->le_scan_int_adv_monitor = 0x0060;
2501 	hdev->le_scan_window_adv_monitor = 0x0030;
2502 	hdev->le_scan_int_connect = 0x0060;
2503 	hdev->le_scan_window_connect = 0x0060;
2504 	hdev->le_conn_min_interval = 0x0018;
2505 	hdev->le_conn_max_interval = 0x0028;
2506 	hdev->le_conn_latency = 0x0000;
2507 	hdev->le_supv_timeout = 0x002a;
2508 	hdev->le_def_tx_len = 0x001b;
2509 	hdev->le_def_tx_time = 0x0148;
2510 	hdev->le_max_tx_len = 0x001b;
2511 	hdev->le_max_tx_time = 0x0148;
2512 	hdev->le_max_rx_len = 0x001b;
2513 	hdev->le_max_rx_time = 0x0148;
2514 	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2515 	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2516 	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2517 	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2518 	hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2519 	hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2520 	hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2521 	hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2522 	hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2523 
2524 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2525 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2526 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2527 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2528 	hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2529 	hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2530 
2531 	/* default 1.28 sec page scan */
2532 	hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2533 	hdev->def_page_scan_int = 0x0800;
2534 	hdev->def_page_scan_window = 0x0012;
2535 
2536 	mutex_init(&hdev->lock);
2537 	mutex_init(&hdev->req_lock);
2538 
2539 	ida_init(&hdev->unset_handle_ida);
2540 
2541 	INIT_LIST_HEAD(&hdev->mesh_pending);
2542 	INIT_LIST_HEAD(&hdev->mgmt_pending);
2543 	INIT_LIST_HEAD(&hdev->reject_list);
2544 	INIT_LIST_HEAD(&hdev->accept_list);
2545 	INIT_LIST_HEAD(&hdev->uuids);
2546 	INIT_LIST_HEAD(&hdev->link_keys);
2547 	INIT_LIST_HEAD(&hdev->long_term_keys);
2548 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2549 	INIT_LIST_HEAD(&hdev->remote_oob_data);
2550 	INIT_LIST_HEAD(&hdev->le_accept_list);
2551 	INIT_LIST_HEAD(&hdev->le_resolv_list);
2552 	INIT_LIST_HEAD(&hdev->le_conn_params);
2553 	INIT_LIST_HEAD(&hdev->pend_le_conns);
2554 	INIT_LIST_HEAD(&hdev->pend_le_reports);
2555 	INIT_LIST_HEAD(&hdev->conn_hash.list);
2556 	INIT_LIST_HEAD(&hdev->adv_instances);
2557 	INIT_LIST_HEAD(&hdev->blocked_keys);
2558 	INIT_LIST_HEAD(&hdev->monitored_devices);
2559 
2560 	INIT_LIST_HEAD(&hdev->local_codecs);
2561 	INIT_WORK(&hdev->rx_work, hci_rx_work);
2562 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2563 	INIT_WORK(&hdev->tx_work, hci_tx_work);
2564 	INIT_WORK(&hdev->power_on, hci_power_on);
2565 	INIT_WORK(&hdev->error_reset, hci_error_reset);
2566 
2567 	hci_cmd_sync_init(hdev);
2568 
2569 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2570 
2571 	skb_queue_head_init(&hdev->rx_q);
2572 	skb_queue_head_init(&hdev->cmd_q);
2573 	skb_queue_head_init(&hdev->raw_q);
2574 
2575 	init_waitqueue_head(&hdev->req_wait_q);
2576 
2577 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2578 	INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2579 
2580 	hci_devcd_setup(hdev);
2581 	hci_request_setup(hdev);
2582 
2583 	hci_init_sysfs(hdev);
2584 	discovery_init(hdev);
2585 
2586 	return hdev;
2587 }
2588 EXPORT_SYMBOL(hci_alloc_dev_priv);
2589 
2590 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2591 void hci_free_dev(struct hci_dev *hdev)
2592 {
2593 	/* will free via device release */
2594 	put_device(&hdev->dev);
2595 }
2596 EXPORT_SYMBOL(hci_free_dev);
2597 
2598 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2599 int hci_register_dev(struct hci_dev *hdev)
2600 {
2601 	int id, error;
2602 
2603 	if (!hdev->open || !hdev->close || !hdev->send)
2604 		return -EINVAL;
2605 
2606 	/* Do not allow HCI_AMP devices to register at index 0,
2607 	 * so the index can be used as the AMP controller ID.
2608 	 */
2609 	switch (hdev->dev_type) {
2610 	case HCI_PRIMARY:
2611 		id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2612 		break;
2613 	case HCI_AMP:
2614 		id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2615 		break;
2616 	default:
2617 		return -EINVAL;
2618 	}
2619 
2620 	if (id < 0)
2621 		return id;
2622 
2623 	error = dev_set_name(&hdev->dev, "hci%u", id);
2624 	if (error)
2625 		return error;
2626 
2627 	hdev->name = dev_name(&hdev->dev);
2628 	hdev->id = id;
2629 
2630 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2631 
2632 	hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2633 	if (!hdev->workqueue) {
2634 		error = -ENOMEM;
2635 		goto err;
2636 	}
2637 
2638 	hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2639 						      hdev->name);
2640 	if (!hdev->req_workqueue) {
2641 		destroy_workqueue(hdev->workqueue);
2642 		error = -ENOMEM;
2643 		goto err;
2644 	}
2645 
2646 	if (!IS_ERR_OR_NULL(bt_debugfs))
2647 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2648 
2649 	error = device_add(&hdev->dev);
2650 	if (error < 0)
2651 		goto err_wqueue;
2652 
2653 	hci_leds_init(hdev);
2654 
2655 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2656 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2657 				    hdev);
2658 	if (hdev->rfkill) {
2659 		if (rfkill_register(hdev->rfkill) < 0) {
2660 			rfkill_destroy(hdev->rfkill);
2661 			hdev->rfkill = NULL;
2662 		}
2663 	}
2664 
2665 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2666 		hci_dev_set_flag(hdev, HCI_RFKILLED);
2667 
2668 	hci_dev_set_flag(hdev, HCI_SETUP);
2669 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2670 
2671 	if (hdev->dev_type == HCI_PRIMARY) {
2672 		/* Assume BR/EDR support until proven otherwise (such as
2673 		 * through reading supported features during init.
2674 		 */
2675 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2676 	}
2677 
2678 	write_lock(&hci_dev_list_lock);
2679 	list_add(&hdev->list, &hci_dev_list);
2680 	write_unlock(&hci_dev_list_lock);
2681 
2682 	/* Devices that are marked for raw-only usage are unconfigured
2683 	 * and should not be included in normal operation.
2684 	 */
2685 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2686 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2687 
2688 	/* Mark Remote Wakeup connection flag as supported if driver has wakeup
2689 	 * callback.
2690 	 */
2691 	if (hdev->wakeup)
2692 		hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2693 
2694 	hci_sock_dev_event(hdev, HCI_DEV_REG);
2695 	hci_dev_hold(hdev);
2696 
2697 	error = hci_register_suspend_notifier(hdev);
2698 	if (error)
2699 		BT_WARN("register suspend notifier failed error:%d\n", error);
2700 
2701 	queue_work(hdev->req_workqueue, &hdev->power_on);
2702 
2703 	idr_init(&hdev->adv_monitors_idr);
2704 	msft_register(hdev);
2705 
2706 	return id;
2707 
2708 err_wqueue:
2709 	debugfs_remove_recursive(hdev->debugfs);
2710 	destroy_workqueue(hdev->workqueue);
2711 	destroy_workqueue(hdev->req_workqueue);
2712 err:
2713 	ida_simple_remove(&hci_index_ida, hdev->id);
2714 
2715 	return error;
2716 }
2717 EXPORT_SYMBOL(hci_register_dev);
2718 
2719 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2720 void hci_unregister_dev(struct hci_dev *hdev)
2721 {
2722 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2723 
2724 	mutex_lock(&hdev->unregister_lock);
2725 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
2726 	mutex_unlock(&hdev->unregister_lock);
2727 
2728 	write_lock(&hci_dev_list_lock);
2729 	list_del(&hdev->list);
2730 	write_unlock(&hci_dev_list_lock);
2731 
2732 	cancel_work_sync(&hdev->power_on);
2733 
2734 	hci_cmd_sync_clear(hdev);
2735 
2736 	hci_unregister_suspend_notifier(hdev);
2737 
2738 	msft_unregister(hdev);
2739 
2740 	hci_dev_do_close(hdev);
2741 
2742 	if (!test_bit(HCI_INIT, &hdev->flags) &&
2743 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
2744 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2745 		hci_dev_lock(hdev);
2746 		mgmt_index_removed(hdev);
2747 		hci_dev_unlock(hdev);
2748 	}
2749 
2750 	/* mgmt_index_removed should take care of emptying the
2751 	 * pending list */
2752 	BUG_ON(!list_empty(&hdev->mgmt_pending));
2753 
2754 	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2755 
2756 	if (hdev->rfkill) {
2757 		rfkill_unregister(hdev->rfkill);
2758 		rfkill_destroy(hdev->rfkill);
2759 	}
2760 
2761 	device_del(&hdev->dev);
2762 	/* Actual cleanup is deferred until hci_release_dev(). */
2763 	hci_dev_put(hdev);
2764 }
2765 EXPORT_SYMBOL(hci_unregister_dev);
2766 
2767 /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)2768 void hci_release_dev(struct hci_dev *hdev)
2769 {
2770 	debugfs_remove_recursive(hdev->debugfs);
2771 	kfree_const(hdev->hw_info);
2772 	kfree_const(hdev->fw_info);
2773 
2774 	destroy_workqueue(hdev->workqueue);
2775 	destroy_workqueue(hdev->req_workqueue);
2776 
2777 	hci_dev_lock(hdev);
2778 	hci_bdaddr_list_clear(&hdev->reject_list);
2779 	hci_bdaddr_list_clear(&hdev->accept_list);
2780 	hci_uuids_clear(hdev);
2781 	hci_link_keys_clear(hdev);
2782 	hci_smp_ltks_clear(hdev);
2783 	hci_smp_irks_clear(hdev);
2784 	hci_remote_oob_data_clear(hdev);
2785 	hci_adv_instances_clear(hdev);
2786 	hci_adv_monitors_clear(hdev);
2787 	hci_bdaddr_list_clear(&hdev->le_accept_list);
2788 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2789 	hci_conn_params_clear_all(hdev);
2790 	hci_discovery_filter_clear(hdev);
2791 	hci_blocked_keys_clear(hdev);
2792 	hci_codec_list_clear(&hdev->local_codecs);
2793 	hci_dev_unlock(hdev);
2794 
2795 	ida_destroy(&hdev->unset_handle_ida);
2796 	ida_simple_remove(&hci_index_ida, hdev->id);
2797 	kfree_skb(hdev->sent_cmd);
2798 	kfree_skb(hdev->recv_event);
2799 	kfree(hdev);
2800 }
2801 EXPORT_SYMBOL(hci_release_dev);
2802 
hci_register_suspend_notifier(struct hci_dev * hdev)2803 int hci_register_suspend_notifier(struct hci_dev *hdev)
2804 {
2805 	int ret = 0;
2806 
2807 	if (!hdev->suspend_notifier.notifier_call &&
2808 	    !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2809 		hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2810 		ret = register_pm_notifier(&hdev->suspend_notifier);
2811 	}
2812 
2813 	return ret;
2814 }
2815 
hci_unregister_suspend_notifier(struct hci_dev * hdev)2816 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2817 {
2818 	int ret = 0;
2819 
2820 	if (hdev->suspend_notifier.notifier_call) {
2821 		ret = unregister_pm_notifier(&hdev->suspend_notifier);
2822 		if (!ret)
2823 			hdev->suspend_notifier.notifier_call = NULL;
2824 	}
2825 
2826 	return ret;
2827 }
2828 
2829 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2830 int hci_suspend_dev(struct hci_dev *hdev)
2831 {
2832 	int ret;
2833 
2834 	bt_dev_dbg(hdev, "");
2835 
2836 	/* Suspend should only act on when powered. */
2837 	if (!hdev_is_powered(hdev) ||
2838 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2839 		return 0;
2840 
2841 	/* If powering down don't attempt to suspend */
2842 	if (mgmt_powering_down(hdev))
2843 		return 0;
2844 
2845 	/* Cancel potentially blocking sync operation before suspend */
2846 	__hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
2847 
2848 	hci_req_sync_lock(hdev);
2849 	ret = hci_suspend_sync(hdev);
2850 	hci_req_sync_unlock(hdev);
2851 
2852 	hci_clear_wake_reason(hdev);
2853 	mgmt_suspending(hdev, hdev->suspend_state);
2854 
2855 	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2856 	return ret;
2857 }
2858 EXPORT_SYMBOL(hci_suspend_dev);
2859 
2860 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2861 int hci_resume_dev(struct hci_dev *hdev)
2862 {
2863 	int ret;
2864 
2865 	bt_dev_dbg(hdev, "");
2866 
2867 	/* Resume should only act on when powered. */
2868 	if (!hdev_is_powered(hdev) ||
2869 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2870 		return 0;
2871 
2872 	/* If powering down don't attempt to resume */
2873 	if (mgmt_powering_down(hdev))
2874 		return 0;
2875 
2876 	hci_req_sync_lock(hdev);
2877 	ret = hci_resume_sync(hdev);
2878 	hci_req_sync_unlock(hdev);
2879 
2880 	mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2881 		      hdev->wake_addr_type);
2882 
2883 	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2884 	return ret;
2885 }
2886 EXPORT_SYMBOL(hci_resume_dev);
2887 
2888 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)2889 int hci_reset_dev(struct hci_dev *hdev)
2890 {
2891 	static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2892 	struct sk_buff *skb;
2893 
2894 	skb = bt_skb_alloc(3, GFP_ATOMIC);
2895 	if (!skb)
2896 		return -ENOMEM;
2897 
2898 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2899 	skb_put_data(skb, hw_err, 3);
2900 
2901 	bt_dev_err(hdev, "Injecting HCI hardware error event");
2902 
2903 	/* Send Hardware Error to upper stack */
2904 	return hci_recv_frame(hdev, skb);
2905 }
2906 EXPORT_SYMBOL(hci_reset_dev);
2907 
2908 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)2909 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2910 {
2911 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2912 		      && !test_bit(HCI_INIT, &hdev->flags))) {
2913 		kfree_skb(skb);
2914 		return -ENXIO;
2915 	}
2916 
2917 	switch (hci_skb_pkt_type(skb)) {
2918 	case HCI_EVENT_PKT:
2919 		break;
2920 	case HCI_ACLDATA_PKT:
2921 		/* Detect if ISO packet has been sent as ACL */
2922 		if (hci_conn_num(hdev, ISO_LINK)) {
2923 			__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2924 			__u8 type;
2925 
2926 			type = hci_conn_lookup_type(hdev, hci_handle(handle));
2927 			if (type == ISO_LINK)
2928 				hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2929 		}
2930 		break;
2931 	case HCI_SCODATA_PKT:
2932 		break;
2933 	case HCI_ISODATA_PKT:
2934 		break;
2935 	default:
2936 		kfree_skb(skb);
2937 		return -EINVAL;
2938 	}
2939 
2940 	/* Incoming skb */
2941 	bt_cb(skb)->incoming = 1;
2942 
2943 	/* Time stamp */
2944 	__net_timestamp(skb);
2945 
2946 	skb_queue_tail(&hdev->rx_q, skb);
2947 	queue_work(hdev->workqueue, &hdev->rx_work);
2948 
2949 	return 0;
2950 }
2951 EXPORT_SYMBOL(hci_recv_frame);
2952 
2953 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)2954 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2955 {
2956 	/* Mark as diagnostic packet */
2957 	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2958 
2959 	/* Time stamp */
2960 	__net_timestamp(skb);
2961 
2962 	skb_queue_tail(&hdev->rx_q, skb);
2963 	queue_work(hdev->workqueue, &hdev->rx_work);
2964 
2965 	return 0;
2966 }
2967 EXPORT_SYMBOL(hci_recv_diag);
2968 
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)2969 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2970 {
2971 	va_list vargs;
2972 
2973 	va_start(vargs, fmt);
2974 	kfree_const(hdev->hw_info);
2975 	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2976 	va_end(vargs);
2977 }
2978 EXPORT_SYMBOL(hci_set_hw_info);
2979 
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)2980 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2981 {
2982 	va_list vargs;
2983 
2984 	va_start(vargs, fmt);
2985 	kfree_const(hdev->fw_info);
2986 	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2987 	va_end(vargs);
2988 }
2989 EXPORT_SYMBOL(hci_set_fw_info);
2990 
2991 /* ---- Interface to upper protocols ---- */
2992 
hci_register_cb(struct hci_cb * cb)2993 int hci_register_cb(struct hci_cb *cb)
2994 {
2995 	BT_DBG("%p name %s", cb, cb->name);
2996 
2997 	mutex_lock(&hci_cb_list_lock);
2998 	list_add_tail(&cb->list, &hci_cb_list);
2999 	mutex_unlock(&hci_cb_list_lock);
3000 
3001 	return 0;
3002 }
3003 EXPORT_SYMBOL(hci_register_cb);
3004 
hci_unregister_cb(struct hci_cb * cb)3005 int hci_unregister_cb(struct hci_cb *cb)
3006 {
3007 	BT_DBG("%p name %s", cb, cb->name);
3008 
3009 	mutex_lock(&hci_cb_list_lock);
3010 	list_del(&cb->list);
3011 	mutex_unlock(&hci_cb_list_lock);
3012 
3013 	return 0;
3014 }
3015 EXPORT_SYMBOL(hci_unregister_cb);
3016 
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)3017 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3018 {
3019 	int err;
3020 
3021 	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3022 	       skb->len);
3023 
3024 	/* Time stamp */
3025 	__net_timestamp(skb);
3026 
3027 	/* Send copy to monitor */
3028 	hci_send_to_monitor(hdev, skb);
3029 
3030 	if (atomic_read(&hdev->promisc)) {
3031 		/* Send copy to the sockets */
3032 		hci_send_to_sock(hdev, skb);
3033 	}
3034 
3035 	/* Get rid of skb owner, prior to sending to the driver. */
3036 	skb_orphan(skb);
3037 
3038 	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3039 		kfree_skb(skb);
3040 		return -EINVAL;
3041 	}
3042 
3043 	err = hdev->send(hdev, skb);
3044 	if (err < 0) {
3045 		bt_dev_err(hdev, "sending frame failed (%d)", err);
3046 		kfree_skb(skb);
3047 		return err;
3048 	}
3049 
3050 	return 0;
3051 }
3052 
3053 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3054 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3055 		 const void *param)
3056 {
3057 	struct sk_buff *skb;
3058 
3059 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3060 
3061 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3062 	if (!skb) {
3063 		bt_dev_err(hdev, "no memory for command");
3064 		return -ENOMEM;
3065 	}
3066 
3067 	/* Stand-alone HCI commands must be flagged as
3068 	 * single-command requests.
3069 	 */
3070 	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3071 
3072 	skb_queue_tail(&hdev->cmd_q, skb);
3073 	queue_work(hdev->workqueue, &hdev->cmd_work);
3074 
3075 	return 0;
3076 }
3077 
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3078 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3079 		   const void *param)
3080 {
3081 	struct sk_buff *skb;
3082 
3083 	if (hci_opcode_ogf(opcode) != 0x3f) {
3084 		/* A controller receiving a command shall respond with either
3085 		 * a Command Status Event or a Command Complete Event.
3086 		 * Therefore, all standard HCI commands must be sent via the
3087 		 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3088 		 * Some vendors do not comply with this rule for vendor-specific
3089 		 * commands and do not return any event. We want to support
3090 		 * unresponded commands for such cases only.
3091 		 */
3092 		bt_dev_err(hdev, "unresponded command not supported");
3093 		return -EINVAL;
3094 	}
3095 
3096 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3097 	if (!skb) {
3098 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3099 			   opcode);
3100 		return -ENOMEM;
3101 	}
3102 
3103 	hci_send_frame(hdev, skb);
3104 
3105 	return 0;
3106 }
3107 EXPORT_SYMBOL(__hci_cmd_send);
3108 
3109 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3110 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3111 {
3112 	struct hci_command_hdr *hdr;
3113 
3114 	if (!hdev->sent_cmd)
3115 		return NULL;
3116 
3117 	hdr = (void *) hdev->sent_cmd->data;
3118 
3119 	if (hdr->opcode != cpu_to_le16(opcode))
3120 		return NULL;
3121 
3122 	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3123 
3124 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3125 }
3126 
3127 /* Get data from last received event */
hci_recv_event_data(struct hci_dev * hdev,__u8 event)3128 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3129 {
3130 	struct hci_event_hdr *hdr;
3131 	int offset;
3132 
3133 	if (!hdev->recv_event)
3134 		return NULL;
3135 
3136 	hdr = (void *)hdev->recv_event->data;
3137 	offset = sizeof(*hdr);
3138 
3139 	if (hdr->evt != event) {
3140 		/* In case of LE metaevent check the subevent match */
3141 		if (hdr->evt == HCI_EV_LE_META) {
3142 			struct hci_ev_le_meta *ev;
3143 
3144 			ev = (void *)hdev->recv_event->data + offset;
3145 			offset += sizeof(*ev);
3146 			if (ev->subevent == event)
3147 				goto found;
3148 		}
3149 		return NULL;
3150 	}
3151 
3152 found:
3153 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
3154 
3155 	return hdev->recv_event->data + offset;
3156 }
3157 
3158 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3159 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3160 {
3161 	struct hci_acl_hdr *hdr;
3162 	int len = skb->len;
3163 
3164 	skb_push(skb, HCI_ACL_HDR_SIZE);
3165 	skb_reset_transport_header(skb);
3166 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3167 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3168 	hdr->dlen   = cpu_to_le16(len);
3169 }
3170 
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3171 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3172 			  struct sk_buff *skb, __u16 flags)
3173 {
3174 	struct hci_conn *conn = chan->conn;
3175 	struct hci_dev *hdev = conn->hdev;
3176 	struct sk_buff *list;
3177 
3178 	skb->len = skb_headlen(skb);
3179 	skb->data_len = 0;
3180 
3181 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3182 
3183 	switch (hdev->dev_type) {
3184 	case HCI_PRIMARY:
3185 		hci_add_acl_hdr(skb, conn->handle, flags);
3186 		break;
3187 	case HCI_AMP:
3188 		hci_add_acl_hdr(skb, chan->handle, flags);
3189 		break;
3190 	default:
3191 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3192 		return;
3193 	}
3194 
3195 	list = skb_shinfo(skb)->frag_list;
3196 	if (!list) {
3197 		/* Non fragmented */
3198 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3199 
3200 		skb_queue_tail(queue, skb);
3201 	} else {
3202 		/* Fragmented */
3203 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3204 
3205 		skb_shinfo(skb)->frag_list = NULL;
3206 
3207 		/* Queue all fragments atomically. We need to use spin_lock_bh
3208 		 * here because of 6LoWPAN links, as there this function is
3209 		 * called from softirq and using normal spin lock could cause
3210 		 * deadlocks.
3211 		 */
3212 		spin_lock_bh(&queue->lock);
3213 
3214 		__skb_queue_tail(queue, skb);
3215 
3216 		flags &= ~ACL_START;
3217 		flags |= ACL_CONT;
3218 		do {
3219 			skb = list; list = list->next;
3220 
3221 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3222 			hci_add_acl_hdr(skb, conn->handle, flags);
3223 
3224 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3225 
3226 			__skb_queue_tail(queue, skb);
3227 		} while (list);
3228 
3229 		spin_unlock_bh(&queue->lock);
3230 	}
3231 }
3232 
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3233 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3234 {
3235 	struct hci_dev *hdev = chan->conn->hdev;
3236 
3237 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3238 
3239 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3240 
3241 	queue_work(hdev->workqueue, &hdev->tx_work);
3242 }
3243 
3244 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3245 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3246 {
3247 	struct hci_dev *hdev = conn->hdev;
3248 	struct hci_sco_hdr hdr;
3249 
3250 	BT_DBG("%s len %d", hdev->name, skb->len);
3251 
3252 	hdr.handle = cpu_to_le16(conn->handle);
3253 	hdr.dlen   = skb->len;
3254 
3255 	skb_push(skb, HCI_SCO_HDR_SIZE);
3256 	skb_reset_transport_header(skb);
3257 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3258 
3259 	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3260 
3261 	skb_queue_tail(&conn->data_q, skb);
3262 	queue_work(hdev->workqueue, &hdev->tx_work);
3263 }
3264 
3265 /* Send ISO data */
hci_add_iso_hdr(struct sk_buff * skb,__u16 handle,__u8 flags)3266 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3267 {
3268 	struct hci_iso_hdr *hdr;
3269 	int len = skb->len;
3270 
3271 	skb_push(skb, HCI_ISO_HDR_SIZE);
3272 	skb_reset_transport_header(skb);
3273 	hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3274 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3275 	hdr->dlen   = cpu_to_le16(len);
3276 }
3277 
hci_queue_iso(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb)3278 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3279 			  struct sk_buff *skb)
3280 {
3281 	struct hci_dev *hdev = conn->hdev;
3282 	struct sk_buff *list;
3283 	__u16 flags;
3284 
3285 	skb->len = skb_headlen(skb);
3286 	skb->data_len = 0;
3287 
3288 	hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3289 
3290 	list = skb_shinfo(skb)->frag_list;
3291 
3292 	flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3293 	hci_add_iso_hdr(skb, conn->handle, flags);
3294 
3295 	if (!list) {
3296 		/* Non fragmented */
3297 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3298 
3299 		skb_queue_tail(queue, skb);
3300 	} else {
3301 		/* Fragmented */
3302 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3303 
3304 		skb_shinfo(skb)->frag_list = NULL;
3305 
3306 		__skb_queue_tail(queue, skb);
3307 
3308 		do {
3309 			skb = list; list = list->next;
3310 
3311 			hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3312 			flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3313 						   0x00);
3314 			hci_add_iso_hdr(skb, conn->handle, flags);
3315 
3316 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3317 
3318 			__skb_queue_tail(queue, skb);
3319 		} while (list);
3320 	}
3321 }
3322 
hci_send_iso(struct hci_conn * conn,struct sk_buff * skb)3323 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3324 {
3325 	struct hci_dev *hdev = conn->hdev;
3326 
3327 	BT_DBG("%s len %d", hdev->name, skb->len);
3328 
3329 	hci_queue_iso(conn, &conn->data_q, skb);
3330 
3331 	queue_work(hdev->workqueue, &hdev->tx_work);
3332 }
3333 
3334 /* ---- HCI TX task (outgoing data) ---- */
3335 
3336 /* HCI Connection scheduler */
hci_quote_sent(struct hci_conn * conn,int num,int * quote)3337 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3338 {
3339 	struct hci_dev *hdev;
3340 	int cnt, q;
3341 
3342 	if (!conn) {
3343 		*quote = 0;
3344 		return;
3345 	}
3346 
3347 	hdev = conn->hdev;
3348 
3349 	switch (conn->type) {
3350 	case ACL_LINK:
3351 		cnt = hdev->acl_cnt;
3352 		break;
3353 	case AMP_LINK:
3354 		cnt = hdev->block_cnt;
3355 		break;
3356 	case SCO_LINK:
3357 	case ESCO_LINK:
3358 		cnt = hdev->sco_cnt;
3359 		break;
3360 	case LE_LINK:
3361 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3362 		break;
3363 	case ISO_LINK:
3364 		cnt = hdev->iso_mtu ? hdev->iso_cnt :
3365 			hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3366 		break;
3367 	default:
3368 		cnt = 0;
3369 		bt_dev_err(hdev, "unknown link type %d", conn->type);
3370 	}
3371 
3372 	q = cnt / num;
3373 	*quote = q ? q : 1;
3374 }
3375 
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3376 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3377 				     int *quote)
3378 {
3379 	struct hci_conn_hash *h = &hdev->conn_hash;
3380 	struct hci_conn *conn = NULL, *c;
3381 	unsigned int num = 0, min = ~0;
3382 
3383 	/* We don't have to lock device here. Connections are always
3384 	 * added and removed with TX task disabled. */
3385 
3386 	rcu_read_lock();
3387 
3388 	list_for_each_entry_rcu(c, &h->list, list) {
3389 		if (c->type != type || skb_queue_empty(&c->data_q))
3390 			continue;
3391 
3392 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3393 			continue;
3394 
3395 		num++;
3396 
3397 		if (c->sent < min) {
3398 			min  = c->sent;
3399 			conn = c;
3400 		}
3401 
3402 		if (hci_conn_num(hdev, type) == num)
3403 			break;
3404 	}
3405 
3406 	rcu_read_unlock();
3407 
3408 	hci_quote_sent(conn, num, quote);
3409 
3410 	BT_DBG("conn %p quote %d", conn, *quote);
3411 	return conn;
3412 }
3413 
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3414 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3415 {
3416 	struct hci_conn_hash *h = &hdev->conn_hash;
3417 	struct hci_conn *c;
3418 
3419 	bt_dev_err(hdev, "link tx timeout");
3420 
3421 	rcu_read_lock();
3422 
3423 	/* Kill stalled connections */
3424 	list_for_each_entry_rcu(c, &h->list, list) {
3425 		if (c->type == type && c->sent) {
3426 			bt_dev_err(hdev, "killing stalled connection %pMR",
3427 				   &c->dst);
3428 			/* hci_disconnect might sleep, so, we have to release
3429 			 * the RCU read lock before calling it.
3430 			 */
3431 			rcu_read_unlock();
3432 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3433 			rcu_read_lock();
3434 		}
3435 	}
3436 
3437 	rcu_read_unlock();
3438 }
3439 
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3440 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3441 				      int *quote)
3442 {
3443 	struct hci_conn_hash *h = &hdev->conn_hash;
3444 	struct hci_chan *chan = NULL;
3445 	unsigned int num = 0, min = ~0, cur_prio = 0;
3446 	struct hci_conn *conn;
3447 	int conn_num = 0;
3448 
3449 	BT_DBG("%s", hdev->name);
3450 
3451 	rcu_read_lock();
3452 
3453 	list_for_each_entry_rcu(conn, &h->list, list) {
3454 		struct hci_chan *tmp;
3455 
3456 		if (conn->type != type)
3457 			continue;
3458 
3459 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3460 			continue;
3461 
3462 		conn_num++;
3463 
3464 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3465 			struct sk_buff *skb;
3466 
3467 			if (skb_queue_empty(&tmp->data_q))
3468 				continue;
3469 
3470 			skb = skb_peek(&tmp->data_q);
3471 			if (skb->priority < cur_prio)
3472 				continue;
3473 
3474 			if (skb->priority > cur_prio) {
3475 				num = 0;
3476 				min = ~0;
3477 				cur_prio = skb->priority;
3478 			}
3479 
3480 			num++;
3481 
3482 			if (conn->sent < min) {
3483 				min  = conn->sent;
3484 				chan = tmp;
3485 			}
3486 		}
3487 
3488 		if (hci_conn_num(hdev, type) == conn_num)
3489 			break;
3490 	}
3491 
3492 	rcu_read_unlock();
3493 
3494 	if (!chan)
3495 		return NULL;
3496 
3497 	hci_quote_sent(chan->conn, num, quote);
3498 
3499 	BT_DBG("chan %p quote %d", chan, *quote);
3500 	return chan;
3501 }
3502 
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3503 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3504 {
3505 	struct hci_conn_hash *h = &hdev->conn_hash;
3506 	struct hci_conn *conn;
3507 	int num = 0;
3508 
3509 	BT_DBG("%s", hdev->name);
3510 
3511 	rcu_read_lock();
3512 
3513 	list_for_each_entry_rcu(conn, &h->list, list) {
3514 		struct hci_chan *chan;
3515 
3516 		if (conn->type != type)
3517 			continue;
3518 
3519 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3520 			continue;
3521 
3522 		num++;
3523 
3524 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3525 			struct sk_buff *skb;
3526 
3527 			if (chan->sent) {
3528 				chan->sent = 0;
3529 				continue;
3530 			}
3531 
3532 			if (skb_queue_empty(&chan->data_q))
3533 				continue;
3534 
3535 			skb = skb_peek(&chan->data_q);
3536 			if (skb->priority >= HCI_PRIO_MAX - 1)
3537 				continue;
3538 
3539 			skb->priority = HCI_PRIO_MAX - 1;
3540 
3541 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3542 			       skb->priority);
3543 		}
3544 
3545 		if (hci_conn_num(hdev, type) == num)
3546 			break;
3547 	}
3548 
3549 	rcu_read_unlock();
3550 
3551 }
3552 
__get_blocks(struct hci_dev * hdev,struct sk_buff * skb)3553 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3554 {
3555 	/* Calculate count of blocks used by this packet */
3556 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3557 }
3558 
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)3559 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3560 {
3561 	unsigned long last_tx;
3562 
3563 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3564 		return;
3565 
3566 	switch (type) {
3567 	case LE_LINK:
3568 		last_tx = hdev->le_last_tx;
3569 		break;
3570 	default:
3571 		last_tx = hdev->acl_last_tx;
3572 		break;
3573 	}
3574 
3575 	/* tx timeout must be longer than maximum link supervision timeout
3576 	 * (40.9 seconds)
3577 	 */
3578 	if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3579 		hci_link_tx_to(hdev, type);
3580 }
3581 
3582 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)3583 static void hci_sched_sco(struct hci_dev *hdev)
3584 {
3585 	struct hci_conn *conn;
3586 	struct sk_buff *skb;
3587 	int quote;
3588 
3589 	BT_DBG("%s", hdev->name);
3590 
3591 	if (!hci_conn_num(hdev, SCO_LINK))
3592 		return;
3593 
3594 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3595 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3596 			BT_DBG("skb %p len %d", skb, skb->len);
3597 			hci_send_frame(hdev, skb);
3598 
3599 			conn->sent++;
3600 			if (conn->sent == ~0)
3601 				conn->sent = 0;
3602 		}
3603 	}
3604 }
3605 
hci_sched_esco(struct hci_dev * hdev)3606 static void hci_sched_esco(struct hci_dev *hdev)
3607 {
3608 	struct hci_conn *conn;
3609 	struct sk_buff *skb;
3610 	int quote;
3611 
3612 	BT_DBG("%s", hdev->name);
3613 
3614 	if (!hci_conn_num(hdev, ESCO_LINK))
3615 		return;
3616 
3617 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3618 						     &quote))) {
3619 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3620 			BT_DBG("skb %p len %d", skb, skb->len);
3621 			hci_send_frame(hdev, skb);
3622 
3623 			conn->sent++;
3624 			if (conn->sent == ~0)
3625 				conn->sent = 0;
3626 		}
3627 	}
3628 }
3629 
hci_sched_acl_pkt(struct hci_dev * hdev)3630 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3631 {
3632 	unsigned int cnt = hdev->acl_cnt;
3633 	struct hci_chan *chan;
3634 	struct sk_buff *skb;
3635 	int quote;
3636 
3637 	__check_timeout(hdev, cnt, ACL_LINK);
3638 
3639 	while (hdev->acl_cnt &&
3640 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3641 		u32 priority = (skb_peek(&chan->data_q))->priority;
3642 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3643 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3644 			       skb->len, skb->priority);
3645 
3646 			/* Stop if priority has changed */
3647 			if (skb->priority < priority)
3648 				break;
3649 
3650 			skb = skb_dequeue(&chan->data_q);
3651 
3652 			hci_conn_enter_active_mode(chan->conn,
3653 						   bt_cb(skb)->force_active);
3654 
3655 			hci_send_frame(hdev, skb);
3656 			hdev->acl_last_tx = jiffies;
3657 
3658 			hdev->acl_cnt--;
3659 			chan->sent++;
3660 			chan->conn->sent++;
3661 
3662 			/* Send pending SCO packets right away */
3663 			hci_sched_sco(hdev);
3664 			hci_sched_esco(hdev);
3665 		}
3666 	}
3667 
3668 	if (cnt != hdev->acl_cnt)
3669 		hci_prio_recalculate(hdev, ACL_LINK);
3670 }
3671 
hci_sched_acl_blk(struct hci_dev * hdev)3672 static void hci_sched_acl_blk(struct hci_dev *hdev)
3673 {
3674 	unsigned int cnt = hdev->block_cnt;
3675 	struct hci_chan *chan;
3676 	struct sk_buff *skb;
3677 	int quote;
3678 	u8 type;
3679 
3680 	BT_DBG("%s", hdev->name);
3681 
3682 	if (hdev->dev_type == HCI_AMP)
3683 		type = AMP_LINK;
3684 	else
3685 		type = ACL_LINK;
3686 
3687 	__check_timeout(hdev, cnt, type);
3688 
3689 	while (hdev->block_cnt > 0 &&
3690 	       (chan = hci_chan_sent(hdev, type, &quote))) {
3691 		u32 priority = (skb_peek(&chan->data_q))->priority;
3692 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3693 			int blocks;
3694 
3695 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3696 			       skb->len, skb->priority);
3697 
3698 			/* Stop if priority has changed */
3699 			if (skb->priority < priority)
3700 				break;
3701 
3702 			skb = skb_dequeue(&chan->data_q);
3703 
3704 			blocks = __get_blocks(hdev, skb);
3705 			if (blocks > hdev->block_cnt)
3706 				return;
3707 
3708 			hci_conn_enter_active_mode(chan->conn,
3709 						   bt_cb(skb)->force_active);
3710 
3711 			hci_send_frame(hdev, skb);
3712 			hdev->acl_last_tx = jiffies;
3713 
3714 			hdev->block_cnt -= blocks;
3715 			quote -= blocks;
3716 
3717 			chan->sent += blocks;
3718 			chan->conn->sent += blocks;
3719 		}
3720 	}
3721 
3722 	if (cnt != hdev->block_cnt)
3723 		hci_prio_recalculate(hdev, type);
3724 }
3725 
hci_sched_acl(struct hci_dev * hdev)3726 static void hci_sched_acl(struct hci_dev *hdev)
3727 {
3728 	BT_DBG("%s", hdev->name);
3729 
3730 	/* No ACL link over BR/EDR controller */
3731 	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3732 		return;
3733 
3734 	/* No AMP link over AMP controller */
3735 	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3736 		return;
3737 
3738 	switch (hdev->flow_ctl_mode) {
3739 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
3740 		hci_sched_acl_pkt(hdev);
3741 		break;
3742 
3743 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3744 		hci_sched_acl_blk(hdev);
3745 		break;
3746 	}
3747 }
3748 
hci_sched_le(struct hci_dev * hdev)3749 static void hci_sched_le(struct hci_dev *hdev)
3750 {
3751 	struct hci_chan *chan;
3752 	struct sk_buff *skb;
3753 	int quote, cnt, tmp;
3754 
3755 	BT_DBG("%s", hdev->name);
3756 
3757 	if (!hci_conn_num(hdev, LE_LINK))
3758 		return;
3759 
3760 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3761 
3762 	__check_timeout(hdev, cnt, LE_LINK);
3763 
3764 	tmp = cnt;
3765 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3766 		u32 priority = (skb_peek(&chan->data_q))->priority;
3767 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3768 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3769 			       skb->len, skb->priority);
3770 
3771 			/* Stop if priority has changed */
3772 			if (skb->priority < priority)
3773 				break;
3774 
3775 			skb = skb_dequeue(&chan->data_q);
3776 
3777 			hci_send_frame(hdev, skb);
3778 			hdev->le_last_tx = jiffies;
3779 
3780 			cnt--;
3781 			chan->sent++;
3782 			chan->conn->sent++;
3783 
3784 			/* Send pending SCO packets right away */
3785 			hci_sched_sco(hdev);
3786 			hci_sched_esco(hdev);
3787 		}
3788 	}
3789 
3790 	if (hdev->le_pkts)
3791 		hdev->le_cnt = cnt;
3792 	else
3793 		hdev->acl_cnt = cnt;
3794 
3795 	if (cnt != tmp)
3796 		hci_prio_recalculate(hdev, LE_LINK);
3797 }
3798 
3799 /* Schedule CIS */
hci_sched_iso(struct hci_dev * hdev)3800 static void hci_sched_iso(struct hci_dev *hdev)
3801 {
3802 	struct hci_conn *conn;
3803 	struct sk_buff *skb;
3804 	int quote, *cnt;
3805 
3806 	BT_DBG("%s", hdev->name);
3807 
3808 	if (!hci_conn_num(hdev, ISO_LINK))
3809 		return;
3810 
3811 	cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3812 		hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3813 	while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3814 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3815 			BT_DBG("skb %p len %d", skb, skb->len);
3816 			hci_send_frame(hdev, skb);
3817 
3818 			conn->sent++;
3819 			if (conn->sent == ~0)
3820 				conn->sent = 0;
3821 			(*cnt)--;
3822 		}
3823 	}
3824 }
3825 
hci_tx_work(struct work_struct * work)3826 static void hci_tx_work(struct work_struct *work)
3827 {
3828 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3829 	struct sk_buff *skb;
3830 
3831 	BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3832 	       hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3833 
3834 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3835 		/* Schedule queues and send stuff to HCI driver */
3836 		hci_sched_sco(hdev);
3837 		hci_sched_esco(hdev);
3838 		hci_sched_iso(hdev);
3839 		hci_sched_acl(hdev);
3840 		hci_sched_le(hdev);
3841 	}
3842 
3843 	/* Send next queued raw (unknown type) packet */
3844 	while ((skb = skb_dequeue(&hdev->raw_q)))
3845 		hci_send_frame(hdev, skb);
3846 }
3847 
3848 /* ----- HCI RX task (incoming data processing) ----- */
3849 
3850 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3851 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3852 {
3853 	struct hci_acl_hdr *hdr = (void *) skb->data;
3854 	struct hci_conn *conn;
3855 	__u16 handle, flags;
3856 
3857 	skb_pull(skb, HCI_ACL_HDR_SIZE);
3858 
3859 	handle = __le16_to_cpu(hdr->handle);
3860 	flags  = hci_flags(handle);
3861 	handle = hci_handle(handle);
3862 
3863 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3864 	       handle, flags);
3865 
3866 	hdev->stat.acl_rx++;
3867 
3868 	hci_dev_lock(hdev);
3869 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3870 	hci_dev_unlock(hdev);
3871 
3872 	if (conn) {
3873 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3874 
3875 		/* Send to upper protocol */
3876 		l2cap_recv_acldata(conn, skb, flags);
3877 		return;
3878 	} else {
3879 		bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3880 			   handle);
3881 	}
3882 
3883 	kfree_skb(skb);
3884 }
3885 
3886 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3887 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3888 {
3889 	struct hci_sco_hdr *hdr = (void *) skb->data;
3890 	struct hci_conn *conn;
3891 	__u16 handle, flags;
3892 
3893 	skb_pull(skb, HCI_SCO_HDR_SIZE);
3894 
3895 	handle = __le16_to_cpu(hdr->handle);
3896 	flags  = hci_flags(handle);
3897 	handle = hci_handle(handle);
3898 
3899 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3900 	       handle, flags);
3901 
3902 	hdev->stat.sco_rx++;
3903 
3904 	hci_dev_lock(hdev);
3905 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3906 	hci_dev_unlock(hdev);
3907 
3908 	if (conn) {
3909 		/* Send to upper protocol */
3910 		hci_skb_pkt_status(skb) = flags & 0x03;
3911 		sco_recv_scodata(conn, skb);
3912 		return;
3913 	} else {
3914 		bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3915 				       handle);
3916 	}
3917 
3918 	kfree_skb(skb);
3919 }
3920 
hci_isodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3921 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3922 {
3923 	struct hci_iso_hdr *hdr;
3924 	struct hci_conn *conn;
3925 	__u16 handle, flags;
3926 
3927 	hdr = skb_pull_data(skb, sizeof(*hdr));
3928 	if (!hdr) {
3929 		bt_dev_err(hdev, "ISO packet too small");
3930 		goto drop;
3931 	}
3932 
3933 	handle = __le16_to_cpu(hdr->handle);
3934 	flags  = hci_flags(handle);
3935 	handle = hci_handle(handle);
3936 
3937 	bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3938 		   handle, flags);
3939 
3940 	hci_dev_lock(hdev);
3941 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3942 	hci_dev_unlock(hdev);
3943 
3944 	if (!conn) {
3945 		bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3946 			   handle);
3947 		goto drop;
3948 	}
3949 
3950 	/* Send to upper protocol */
3951 	iso_recv(conn, skb, flags);
3952 	return;
3953 
3954 drop:
3955 	kfree_skb(skb);
3956 }
3957 
hci_req_is_complete(struct hci_dev * hdev)3958 static bool hci_req_is_complete(struct hci_dev *hdev)
3959 {
3960 	struct sk_buff *skb;
3961 
3962 	skb = skb_peek(&hdev->cmd_q);
3963 	if (!skb)
3964 		return true;
3965 
3966 	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3967 }
3968 
hci_resend_last(struct hci_dev * hdev)3969 static void hci_resend_last(struct hci_dev *hdev)
3970 {
3971 	struct hci_command_hdr *sent;
3972 	struct sk_buff *skb;
3973 	u16 opcode;
3974 
3975 	if (!hdev->sent_cmd)
3976 		return;
3977 
3978 	sent = (void *) hdev->sent_cmd->data;
3979 	opcode = __le16_to_cpu(sent->opcode);
3980 	if (opcode == HCI_OP_RESET)
3981 		return;
3982 
3983 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3984 	if (!skb)
3985 		return;
3986 
3987 	skb_queue_head(&hdev->cmd_q, skb);
3988 	queue_work(hdev->workqueue, &hdev->cmd_work);
3989 }
3990 
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3991 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3992 			  hci_req_complete_t *req_complete,
3993 			  hci_req_complete_skb_t *req_complete_skb)
3994 {
3995 	struct sk_buff *skb;
3996 	unsigned long flags;
3997 
3998 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3999 
4000 	/* If the completed command doesn't match the last one that was
4001 	 * sent we need to do special handling of it.
4002 	 */
4003 	if (!hci_sent_cmd_data(hdev, opcode)) {
4004 		/* Some CSR based controllers generate a spontaneous
4005 		 * reset complete event during init and any pending
4006 		 * command will never be completed. In such a case we
4007 		 * need to resend whatever was the last sent
4008 		 * command.
4009 		 */
4010 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4011 			hci_resend_last(hdev);
4012 
4013 		return;
4014 	}
4015 
4016 	/* If we reach this point this event matches the last command sent */
4017 	hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4018 
4019 	/* If the command succeeded and there's still more commands in
4020 	 * this request the request is not yet complete.
4021 	 */
4022 	if (!status && !hci_req_is_complete(hdev))
4023 		return;
4024 
4025 	/* If this was the last command in a request the complete
4026 	 * callback would be found in hdev->sent_cmd instead of the
4027 	 * command queue (hdev->cmd_q).
4028 	 */
4029 	if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4030 		*req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4031 		return;
4032 	}
4033 
4034 	if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4035 		*req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4036 		return;
4037 	}
4038 
4039 	/* Remove all pending commands belonging to this request */
4040 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4041 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4042 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4043 			__skb_queue_head(&hdev->cmd_q, skb);
4044 			break;
4045 		}
4046 
4047 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4048 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4049 		else
4050 			*req_complete = bt_cb(skb)->hci.req_complete;
4051 		dev_kfree_skb_irq(skb);
4052 	}
4053 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4054 }
4055 
hci_rx_work(struct work_struct * work)4056 static void hci_rx_work(struct work_struct *work)
4057 {
4058 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4059 	struct sk_buff *skb;
4060 
4061 	BT_DBG("%s", hdev->name);
4062 
4063 	/* The kcov_remote functions used for collecting packet parsing
4064 	 * coverage information from this background thread and associate
4065 	 * the coverage with the syscall's thread which originally injected
4066 	 * the packet. This helps fuzzing the kernel.
4067 	 */
4068 	for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4069 		kcov_remote_start_common(skb_get_kcov_handle(skb));
4070 
4071 		/* Send copy to monitor */
4072 		hci_send_to_monitor(hdev, skb);
4073 
4074 		if (atomic_read(&hdev->promisc)) {
4075 			/* Send copy to the sockets */
4076 			hci_send_to_sock(hdev, skb);
4077 		}
4078 
4079 		/* If the device has been opened in HCI_USER_CHANNEL,
4080 		 * the userspace has exclusive access to device.
4081 		 * When device is HCI_INIT, we still need to process
4082 		 * the data packets to the driver in order
4083 		 * to complete its setup().
4084 		 */
4085 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4086 		    !test_bit(HCI_INIT, &hdev->flags)) {
4087 			kfree_skb(skb);
4088 			continue;
4089 		}
4090 
4091 		if (test_bit(HCI_INIT, &hdev->flags)) {
4092 			/* Don't process data packets in this states. */
4093 			switch (hci_skb_pkt_type(skb)) {
4094 			case HCI_ACLDATA_PKT:
4095 			case HCI_SCODATA_PKT:
4096 			case HCI_ISODATA_PKT:
4097 				kfree_skb(skb);
4098 				continue;
4099 			}
4100 		}
4101 
4102 		/* Process frame */
4103 		switch (hci_skb_pkt_type(skb)) {
4104 		case HCI_EVENT_PKT:
4105 			BT_DBG("%s Event packet", hdev->name);
4106 			hci_event_packet(hdev, skb);
4107 			break;
4108 
4109 		case HCI_ACLDATA_PKT:
4110 			BT_DBG("%s ACL data packet", hdev->name);
4111 			hci_acldata_packet(hdev, skb);
4112 			break;
4113 
4114 		case HCI_SCODATA_PKT:
4115 			BT_DBG("%s SCO data packet", hdev->name);
4116 			hci_scodata_packet(hdev, skb);
4117 			break;
4118 
4119 		case HCI_ISODATA_PKT:
4120 			BT_DBG("%s ISO data packet", hdev->name);
4121 			hci_isodata_packet(hdev, skb);
4122 			break;
4123 
4124 		default:
4125 			kfree_skb(skb);
4126 			break;
4127 		}
4128 	}
4129 }
4130 
hci_cmd_work(struct work_struct * work)4131 static void hci_cmd_work(struct work_struct *work)
4132 {
4133 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4134 	struct sk_buff *skb;
4135 
4136 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4137 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4138 
4139 	/* Send queued commands */
4140 	if (atomic_read(&hdev->cmd_cnt)) {
4141 		skb = skb_dequeue(&hdev->cmd_q);
4142 		if (!skb)
4143 			return;
4144 
4145 		kfree_skb(hdev->sent_cmd);
4146 
4147 		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4148 		if (hdev->sent_cmd) {
4149 			int res;
4150 			if (hci_req_status_pend(hdev))
4151 				hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4152 			atomic_dec(&hdev->cmd_cnt);
4153 
4154 			res = hci_send_frame(hdev, skb);
4155 			if (res < 0)
4156 				__hci_cmd_sync_cancel(hdev, -res);
4157 
4158 			rcu_read_lock();
4159 			if (test_bit(HCI_RESET, &hdev->flags) ||
4160 			    hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4161 				cancel_delayed_work(&hdev->cmd_timer);
4162 			else
4163 				queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4164 						   HCI_CMD_TIMEOUT);
4165 			rcu_read_unlock();
4166 		} else {
4167 			skb_queue_head(&hdev->cmd_q, skb);
4168 			queue_work(hdev->workqueue, &hdev->cmd_work);
4169 		}
4170 	}
4171 }
4172