1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2014 Intel Corporation
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9 
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23 
24 #include <linux/sched/signal.h>
25 
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29 
30 #include "smp.h"
31 #include "hci_request.h"
32 #include "msft.h"
33 #include "eir.h"
34 
hci_req_init(struct hci_request * req,struct hci_dev * hdev)35 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36 {
37 	skb_queue_head_init(&req->cmd_q);
38 	req->hdev = hdev;
39 	req->err = 0;
40 }
41 
hci_req_purge(struct hci_request * req)42 void hci_req_purge(struct hci_request *req)
43 {
44 	skb_queue_purge(&req->cmd_q);
45 }
46 
hci_req_status_pend(struct hci_dev * hdev)47 bool hci_req_status_pend(struct hci_dev *hdev)
48 {
49 	return hdev->req_status == HCI_REQ_PEND;
50 }
51 
req_run(struct hci_request * req,hci_req_complete_t complete,hci_req_complete_skb_t complete_skb)52 static int req_run(struct hci_request *req, hci_req_complete_t complete,
53 		   hci_req_complete_skb_t complete_skb)
54 {
55 	struct hci_dev *hdev = req->hdev;
56 	struct sk_buff *skb;
57 	unsigned long flags;
58 
59 	bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
60 
61 	/* If an error occurred during request building, remove all HCI
62 	 * commands queued on the HCI request queue.
63 	 */
64 	if (req->err) {
65 		skb_queue_purge(&req->cmd_q);
66 		return req->err;
67 	}
68 
69 	/* Do not allow empty requests */
70 	if (skb_queue_empty(&req->cmd_q))
71 		return -ENODATA;
72 
73 	skb = skb_peek_tail(&req->cmd_q);
74 	if (complete) {
75 		bt_cb(skb)->hci.req_complete = complete;
76 	} else if (complete_skb) {
77 		bt_cb(skb)->hci.req_complete_skb = complete_skb;
78 		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
79 	}
80 
81 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
82 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
83 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
84 
85 	queue_work(hdev->workqueue, &hdev->cmd_work);
86 
87 	return 0;
88 }
89 
hci_req_run(struct hci_request * req,hci_req_complete_t complete)90 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
91 {
92 	return req_run(req, complete, NULL);
93 }
94 
hci_req_run_skb(struct hci_request * req,hci_req_complete_skb_t complete)95 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
96 {
97 	return req_run(req, NULL, complete);
98 }
99 
hci_req_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)100 void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
101 			   struct sk_buff *skb)
102 {
103 	bt_dev_dbg(hdev, "result 0x%2.2x", result);
104 
105 	if (hdev->req_status == HCI_REQ_PEND) {
106 		hdev->req_result = result;
107 		hdev->req_status = HCI_REQ_DONE;
108 		if (skb)
109 			hdev->req_skb = skb_get(skb);
110 		wake_up_interruptible(&hdev->req_wait_q);
111 	}
112 }
113 
114 /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,int (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)115 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
116 						     unsigned long opt),
117 		   unsigned long opt, u32 timeout, u8 *hci_status)
118 {
119 	struct hci_request req;
120 	int err = 0;
121 
122 	bt_dev_dbg(hdev, "start");
123 
124 	hci_req_init(&req, hdev);
125 
126 	hdev->req_status = HCI_REQ_PEND;
127 
128 	err = func(&req, opt);
129 	if (err) {
130 		if (hci_status)
131 			*hci_status = HCI_ERROR_UNSPECIFIED;
132 		return err;
133 	}
134 
135 	err = hci_req_run_skb(&req, hci_req_sync_complete);
136 	if (err < 0) {
137 		hdev->req_status = 0;
138 
139 		/* ENODATA means the HCI request command queue is empty.
140 		 * This can happen when a request with conditionals doesn't
141 		 * trigger any commands to be sent. This is normal behavior
142 		 * and should not trigger an error return.
143 		 */
144 		if (err == -ENODATA) {
145 			if (hci_status)
146 				*hci_status = 0;
147 			return 0;
148 		}
149 
150 		if (hci_status)
151 			*hci_status = HCI_ERROR_UNSPECIFIED;
152 
153 		return err;
154 	}
155 
156 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
157 			hdev->req_status != HCI_REQ_PEND, timeout);
158 
159 	if (err == -ERESTARTSYS)
160 		return -EINTR;
161 
162 	switch (hdev->req_status) {
163 	case HCI_REQ_DONE:
164 		err = -bt_to_errno(hdev->req_result);
165 		if (hci_status)
166 			*hci_status = hdev->req_result;
167 		break;
168 
169 	case HCI_REQ_CANCELED:
170 		err = -hdev->req_result;
171 		if (hci_status)
172 			*hci_status = HCI_ERROR_UNSPECIFIED;
173 		break;
174 
175 	default:
176 		err = -ETIMEDOUT;
177 		if (hci_status)
178 			*hci_status = HCI_ERROR_UNSPECIFIED;
179 		break;
180 	}
181 
182 	kfree_skb(hdev->req_skb);
183 	hdev->req_skb = NULL;
184 	hdev->req_status = hdev->req_result = 0;
185 
186 	bt_dev_dbg(hdev, "end: err %d", err);
187 
188 	return err;
189 }
190 
hci_req_sync(struct hci_dev * hdev,int (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)191 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
192 						  unsigned long opt),
193 		 unsigned long opt, u32 timeout, u8 *hci_status)
194 {
195 	int ret;
196 
197 	/* Serialize all requests */
198 	hci_req_sync_lock(hdev);
199 	/* check the state after obtaing the lock to protect the HCI_UP
200 	 * against any races from hci_dev_do_close when the controller
201 	 * gets removed.
202 	 */
203 	if (test_bit(HCI_UP, &hdev->flags))
204 		ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
205 	else
206 		ret = -ENETDOWN;
207 	hci_req_sync_unlock(hdev);
208 
209 	return ret;
210 }
211 
hci_prepare_cmd(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)212 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
213 				const void *param)
214 {
215 	int len = HCI_COMMAND_HDR_SIZE + plen;
216 	struct hci_command_hdr *hdr;
217 	struct sk_buff *skb;
218 
219 	skb = bt_skb_alloc(len, GFP_ATOMIC);
220 	if (!skb)
221 		return NULL;
222 
223 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
224 	hdr->opcode = cpu_to_le16(opcode);
225 	hdr->plen   = plen;
226 
227 	if (plen)
228 		skb_put_data(skb, param, plen);
229 
230 	bt_dev_dbg(hdev, "skb len %d", skb->len);
231 
232 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
233 	hci_skb_opcode(skb) = opcode;
234 
235 	return skb;
236 }
237 
238 /* Queue a command to an asynchronous HCI request */
hci_req_add_ev(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event)239 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
240 		    const void *param, u8 event)
241 {
242 	struct hci_dev *hdev = req->hdev;
243 	struct sk_buff *skb;
244 
245 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
246 
247 	/* If an error occurred during request building, there is no point in
248 	 * queueing the HCI command. We can simply return.
249 	 */
250 	if (req->err)
251 		return;
252 
253 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
254 	if (!skb) {
255 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
256 			   opcode);
257 		req->err = -ENOMEM;
258 		return;
259 	}
260 
261 	if (skb_queue_empty(&req->cmd_q))
262 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
263 
264 	hci_skb_event(skb) = event;
265 
266 	skb_queue_tail(&req->cmd_q, skb);
267 }
268 
hci_req_add(struct hci_request * req,u16 opcode,u32 plen,const void * param)269 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
270 		 const void *param)
271 {
272 	hci_req_add_ev(req, opcode, plen, param, 0);
273 }
274 
__hci_req_write_fast_connectable(struct hci_request * req,bool enable)275 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
276 {
277 	struct hci_dev *hdev = req->hdev;
278 	struct hci_cp_write_page_scan_activity acp;
279 	u8 type;
280 
281 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
282 		return;
283 
284 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
285 		return;
286 
287 	if (enable) {
288 		type = PAGE_SCAN_TYPE_INTERLACED;
289 
290 		/* 160 msec page scan interval */
291 		acp.interval = cpu_to_le16(0x0100);
292 	} else {
293 		type = hdev->def_page_scan_type;
294 		acp.interval = cpu_to_le16(hdev->def_page_scan_int);
295 	}
296 
297 	acp.window = cpu_to_le16(hdev->def_page_scan_window);
298 
299 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
300 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
301 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
302 			    sizeof(acp), &acp);
303 
304 	if (hdev->page_scan_type != type)
305 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
306 }
307 
start_interleave_scan(struct hci_dev * hdev)308 static void start_interleave_scan(struct hci_dev *hdev)
309 {
310 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
311 	queue_delayed_work(hdev->req_workqueue,
312 			   &hdev->interleave_scan, 0);
313 }
314 
is_interleave_scanning(struct hci_dev * hdev)315 static bool is_interleave_scanning(struct hci_dev *hdev)
316 {
317 	return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
318 }
319 
cancel_interleave_scan(struct hci_dev * hdev)320 static void cancel_interleave_scan(struct hci_dev *hdev)
321 {
322 	bt_dev_dbg(hdev, "cancelling interleave scan");
323 
324 	cancel_delayed_work_sync(&hdev->interleave_scan);
325 
326 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
327 }
328 
329 /* Return true if interleave_scan wasn't started until exiting this function,
330  * otherwise, return false
331  */
__hci_update_interleaved_scan(struct hci_dev * hdev)332 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
333 {
334 	/* Do interleaved scan only if all of the following are true:
335 	 * - There is at least one ADV monitor
336 	 * - At least one pending LE connection or one device to be scanned for
337 	 * - Monitor offloading is not supported
338 	 * If so, we should alternate between allowlist scan and one without
339 	 * any filters to save power.
340 	 */
341 	bool use_interleaving = hci_is_adv_monitoring(hdev) &&
342 				!(list_empty(&hdev->pend_le_conns) &&
343 				  list_empty(&hdev->pend_le_reports)) &&
344 				hci_get_adv_monitor_offload_ext(hdev) ==
345 				    HCI_ADV_MONITOR_EXT_NONE;
346 	bool is_interleaving = is_interleave_scanning(hdev);
347 
348 	if (use_interleaving && !is_interleaving) {
349 		start_interleave_scan(hdev);
350 		bt_dev_dbg(hdev, "starting interleave scan");
351 		return true;
352 	}
353 
354 	if (!use_interleaving && is_interleaving)
355 		cancel_interleave_scan(hdev);
356 
357 	return false;
358 }
359 
__hci_req_update_name(struct hci_request * req)360 void __hci_req_update_name(struct hci_request *req)
361 {
362 	struct hci_dev *hdev = req->hdev;
363 	struct hci_cp_write_local_name cp;
364 
365 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
366 
367 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
368 }
369 
__hci_req_update_eir(struct hci_request * req)370 void __hci_req_update_eir(struct hci_request *req)
371 {
372 	struct hci_dev *hdev = req->hdev;
373 	struct hci_cp_write_eir cp;
374 
375 	if (!hdev_is_powered(hdev))
376 		return;
377 
378 	if (!lmp_ext_inq_capable(hdev))
379 		return;
380 
381 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
382 		return;
383 
384 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
385 		return;
386 
387 	memset(&cp, 0, sizeof(cp));
388 
389 	eir_create(hdev, cp.data);
390 
391 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
392 		return;
393 
394 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
395 
396 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
397 }
398 
hci_req_add_le_scan_disable(struct hci_request * req,bool rpa_le_conn)399 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
400 {
401 	struct hci_dev *hdev = req->hdev;
402 
403 	if (hdev->scanning_paused) {
404 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
405 		return;
406 	}
407 
408 	if (use_ext_scan(hdev)) {
409 		struct hci_cp_le_set_ext_scan_enable cp;
410 
411 		memset(&cp, 0, sizeof(cp));
412 		cp.enable = LE_SCAN_DISABLE;
413 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
414 			    &cp);
415 	} else {
416 		struct hci_cp_le_set_scan_enable cp;
417 
418 		memset(&cp, 0, sizeof(cp));
419 		cp.enable = LE_SCAN_DISABLE;
420 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
421 	}
422 
423 	/* Disable address resolution */
424 	if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
425 		__u8 enable = 0x00;
426 
427 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
428 	}
429 }
430 
del_from_accept_list(struct hci_request * req,bdaddr_t * bdaddr,u8 bdaddr_type)431 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
432 				 u8 bdaddr_type)
433 {
434 	struct hci_cp_le_del_from_accept_list cp;
435 
436 	cp.bdaddr_type = bdaddr_type;
437 	bacpy(&cp.bdaddr, bdaddr);
438 
439 	bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
440 		   cp.bdaddr_type);
441 	hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
442 
443 	if (use_ll_privacy(req->hdev)) {
444 		struct smp_irk *irk;
445 
446 		irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
447 		if (irk) {
448 			struct hci_cp_le_del_from_resolv_list cp;
449 
450 			cp.bdaddr_type = bdaddr_type;
451 			bacpy(&cp.bdaddr, bdaddr);
452 
453 			hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
454 				    sizeof(cp), &cp);
455 		}
456 	}
457 }
458 
459 /* Adds connection to accept list if needed. On error, returns -1. */
add_to_accept_list(struct hci_request * req,struct hci_conn_params * params,u8 * num_entries,bool allow_rpa)460 static int add_to_accept_list(struct hci_request *req,
461 			      struct hci_conn_params *params, u8 *num_entries,
462 			      bool allow_rpa)
463 {
464 	struct hci_cp_le_add_to_accept_list cp;
465 	struct hci_dev *hdev = req->hdev;
466 
467 	/* Already in accept list */
468 	if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
469 				   params->addr_type))
470 		return 0;
471 
472 	/* Select filter policy to accept all advertising */
473 	if (*num_entries >= hdev->le_accept_list_size)
474 		return -1;
475 
476 	/* Accept list can not be used with RPAs */
477 	if (!allow_rpa &&
478 	    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
479 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
480 		return -1;
481 	}
482 
483 	/* During suspend, only wakeable devices can be in accept list */
484 	if (hdev->suspended &&
485 	    !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
486 		return 0;
487 
488 	*num_entries += 1;
489 	cp.bdaddr_type = params->addr_type;
490 	bacpy(&cp.bdaddr, &params->addr);
491 
492 	bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
493 		   cp.bdaddr_type);
494 	hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
495 
496 	if (use_ll_privacy(hdev)) {
497 		struct smp_irk *irk;
498 
499 		irk = hci_find_irk_by_addr(hdev, &params->addr,
500 					   params->addr_type);
501 		if (irk) {
502 			struct hci_cp_le_add_to_resolv_list cp;
503 
504 			cp.bdaddr_type = params->addr_type;
505 			bacpy(&cp.bdaddr, &params->addr);
506 			memcpy(cp.peer_irk, irk->val, 16);
507 
508 			if (hci_dev_test_flag(hdev, HCI_PRIVACY))
509 				memcpy(cp.local_irk, hdev->irk, 16);
510 			else
511 				memset(cp.local_irk, 0, 16);
512 
513 			hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
514 				    sizeof(cp), &cp);
515 		}
516 	}
517 
518 	return 0;
519 }
520 
update_accept_list(struct hci_request * req)521 static u8 update_accept_list(struct hci_request *req)
522 {
523 	struct hci_dev *hdev = req->hdev;
524 	struct hci_conn_params *params;
525 	struct bdaddr_list *b;
526 	u8 num_entries = 0;
527 	bool pend_conn, pend_report;
528 	/* We allow usage of accept list even with RPAs in suspend. In the worst
529 	 * case, we won't be able to wake from devices that use the privacy1.2
530 	 * features. Additionally, once we support privacy1.2 and IRK
531 	 * offloading, we can update this to also check for those conditions.
532 	 */
533 	bool allow_rpa = hdev->suspended;
534 
535 	if (use_ll_privacy(hdev))
536 		allow_rpa = true;
537 
538 	/* Go through the current accept list programmed into the
539 	 * controller one by one and check if that address is still
540 	 * in the list of pending connections or list of devices to
541 	 * report. If not present in either list, then queue the
542 	 * command to remove it from the controller.
543 	 */
544 	list_for_each_entry(b, &hdev->le_accept_list, list) {
545 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
546 						      &b->bdaddr,
547 						      b->bdaddr_type);
548 		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
549 							&b->bdaddr,
550 							b->bdaddr_type);
551 
552 		/* If the device is not likely to connect or report,
553 		 * remove it from the accept list.
554 		 */
555 		if (!pend_conn && !pend_report) {
556 			del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
557 			continue;
558 		}
559 
560 		/* Accept list can not be used with RPAs */
561 		if (!allow_rpa &&
562 		    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
563 		    hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
564 			return 0x00;
565 		}
566 
567 		num_entries++;
568 	}
569 
570 	/* Since all no longer valid accept list entries have been
571 	 * removed, walk through the list of pending connections
572 	 * and ensure that any new device gets programmed into
573 	 * the controller.
574 	 *
575 	 * If the list of the devices is larger than the list of
576 	 * available accept list entries in the controller, then
577 	 * just abort and return filer policy value to not use the
578 	 * accept list.
579 	 */
580 	list_for_each_entry(params, &hdev->pend_le_conns, action) {
581 		if (add_to_accept_list(req, params, &num_entries, allow_rpa))
582 			return 0x00;
583 	}
584 
585 	/* After adding all new pending connections, walk through
586 	 * the list of pending reports and also add these to the
587 	 * accept list if there is still space. Abort if space runs out.
588 	 */
589 	list_for_each_entry(params, &hdev->pend_le_reports, action) {
590 		if (add_to_accept_list(req, params, &num_entries, allow_rpa))
591 			return 0x00;
592 	}
593 
594 	/* Use the allowlist unless the following conditions are all true:
595 	 * - We are not currently suspending
596 	 * - There are 1 or more ADV monitors registered and it's not offloaded
597 	 * - Interleaved scanning is not currently using the allowlist
598 	 */
599 	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
600 	    hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
601 	    hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
602 		return 0x00;
603 
604 	/* Select filter policy to use accept list */
605 	return 0x01;
606 }
607 
scan_use_rpa(struct hci_dev * hdev)608 static bool scan_use_rpa(struct hci_dev *hdev)
609 {
610 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
611 }
612 
hci_req_start_scan(struct hci_request * req,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy,bool filter_dup,bool addr_resolv)613 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
614 			       u16 window, u8 own_addr_type, u8 filter_policy,
615 			       bool filter_dup, bool addr_resolv)
616 {
617 	struct hci_dev *hdev = req->hdev;
618 
619 	if (hdev->scanning_paused) {
620 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
621 		return;
622 	}
623 
624 	if (use_ll_privacy(hdev) && addr_resolv) {
625 		u8 enable = 0x01;
626 
627 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
628 	}
629 
630 	/* Use ext scanning if set ext scan param and ext scan enable is
631 	 * supported
632 	 */
633 	if (use_ext_scan(hdev)) {
634 		struct hci_cp_le_set_ext_scan_params *ext_param_cp;
635 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
636 		struct hci_cp_le_scan_phy_params *phy_params;
637 		u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
638 		u32 plen;
639 
640 		ext_param_cp = (void *)data;
641 		phy_params = (void *)ext_param_cp->data;
642 
643 		memset(ext_param_cp, 0, sizeof(*ext_param_cp));
644 		ext_param_cp->own_addr_type = own_addr_type;
645 		ext_param_cp->filter_policy = filter_policy;
646 
647 		plen = sizeof(*ext_param_cp);
648 
649 		if (scan_1m(hdev) || scan_2m(hdev)) {
650 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
651 
652 			memset(phy_params, 0, sizeof(*phy_params));
653 			phy_params->type = type;
654 			phy_params->interval = cpu_to_le16(interval);
655 			phy_params->window = cpu_to_le16(window);
656 
657 			plen += sizeof(*phy_params);
658 			phy_params++;
659 		}
660 
661 		if (scan_coded(hdev)) {
662 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
663 
664 			memset(phy_params, 0, sizeof(*phy_params));
665 			phy_params->type = type;
666 			phy_params->interval = cpu_to_le16(interval);
667 			phy_params->window = cpu_to_le16(window);
668 
669 			plen += sizeof(*phy_params);
670 			phy_params++;
671 		}
672 
673 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
674 			    plen, ext_param_cp);
675 
676 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
677 		ext_enable_cp.enable = LE_SCAN_ENABLE;
678 		ext_enable_cp.filter_dup = filter_dup;
679 
680 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
681 			    sizeof(ext_enable_cp), &ext_enable_cp);
682 	} else {
683 		struct hci_cp_le_set_scan_param param_cp;
684 		struct hci_cp_le_set_scan_enable enable_cp;
685 
686 		memset(&param_cp, 0, sizeof(param_cp));
687 		param_cp.type = type;
688 		param_cp.interval = cpu_to_le16(interval);
689 		param_cp.window = cpu_to_le16(window);
690 		param_cp.own_address_type = own_addr_type;
691 		param_cp.filter_policy = filter_policy;
692 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
693 			    &param_cp);
694 
695 		memset(&enable_cp, 0, sizeof(enable_cp));
696 		enable_cp.enable = LE_SCAN_ENABLE;
697 		enable_cp.filter_dup = filter_dup;
698 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
699 			    &enable_cp);
700 	}
701 }
702 
703 /* Returns true if an le connection is in the scanning state */
hci_is_le_conn_scanning(struct hci_dev * hdev)704 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
705 {
706 	struct hci_conn_hash *h = &hdev->conn_hash;
707 	struct hci_conn  *c;
708 
709 	rcu_read_lock();
710 
711 	list_for_each_entry_rcu(c, &h->list, list) {
712 		if (c->type == LE_LINK && c->state == BT_CONNECT &&
713 		    test_bit(HCI_CONN_SCANNING, &c->flags)) {
714 			rcu_read_unlock();
715 			return true;
716 		}
717 	}
718 
719 	rcu_read_unlock();
720 
721 	return false;
722 }
723 
724 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
725  * controller based address resolution to be able to reconfigure
726  * resolving list.
727  */
hci_req_add_le_passive_scan(struct hci_request * req)728 void hci_req_add_le_passive_scan(struct hci_request *req)
729 {
730 	struct hci_dev *hdev = req->hdev;
731 	u8 own_addr_type;
732 	u8 filter_policy;
733 	u16 window, interval;
734 	/* Default is to enable duplicates filter */
735 	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
736 	/* Background scanning should run with address resolution */
737 	bool addr_resolv = true;
738 
739 	if (hdev->scanning_paused) {
740 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
741 		return;
742 	}
743 
744 	/* Set require_privacy to false since no SCAN_REQ are send
745 	 * during passive scanning. Not using an non-resolvable address
746 	 * here is important so that peer devices using direct
747 	 * advertising with our address will be correctly reported
748 	 * by the controller.
749 	 */
750 	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
751 				      &own_addr_type))
752 		return;
753 
754 	if (hdev->enable_advmon_interleave_scan &&
755 	    __hci_update_interleaved_scan(hdev))
756 		return;
757 
758 	bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
759 	/* Adding or removing entries from the accept list must
760 	 * happen before enabling scanning. The controller does
761 	 * not allow accept list modification while scanning.
762 	 */
763 	filter_policy = update_accept_list(req);
764 
765 	/* When the controller is using random resolvable addresses and
766 	 * with that having LE privacy enabled, then controllers with
767 	 * Extended Scanner Filter Policies support can now enable support
768 	 * for handling directed advertising.
769 	 *
770 	 * So instead of using filter polices 0x00 (no accept list)
771 	 * and 0x01 (accept list enabled) use the new filter policies
772 	 * 0x02 (no accept list) and 0x03 (accept list enabled).
773 	 */
774 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
775 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
776 		filter_policy |= 0x02;
777 
778 	if (hdev->suspended) {
779 		window = hdev->le_scan_window_suspend;
780 		interval = hdev->le_scan_int_suspend;
781 	} else if (hci_is_le_conn_scanning(hdev)) {
782 		window = hdev->le_scan_window_connect;
783 		interval = hdev->le_scan_int_connect;
784 	} else if (hci_is_adv_monitoring(hdev)) {
785 		window = hdev->le_scan_window_adv_monitor;
786 		interval = hdev->le_scan_int_adv_monitor;
787 
788 		/* Disable duplicates filter when scanning for advertisement
789 		 * monitor for the following reasons.
790 		 *
791 		 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
792 		 * controllers ignore RSSI_Sampling_Period when the duplicates
793 		 * filter is enabled.
794 		 *
795 		 * For SW pattern filtering, when we're not doing interleaved
796 		 * scanning, it is necessary to disable duplicates filter,
797 		 * otherwise hosts can only receive one advertisement and it's
798 		 * impossible to know if a peer is still in range.
799 		 */
800 		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
801 	} else {
802 		window = hdev->le_scan_window;
803 		interval = hdev->le_scan_interval;
804 	}
805 
806 	bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
807 		   filter_policy);
808 	hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
809 			   own_addr_type, filter_policy, filter_dup,
810 			   addr_resolv);
811 }
812 
cancel_adv_timeout(struct hci_dev * hdev)813 static void cancel_adv_timeout(struct hci_dev *hdev)
814 {
815 	if (hdev->adv_instance_timeout) {
816 		hdev->adv_instance_timeout = 0;
817 		cancel_delayed_work(&hdev->adv_instance_expire);
818 	}
819 }
820 
adv_cur_instance_is_scannable(struct hci_dev * hdev)821 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
822 {
823 	return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
824 }
825 
__hci_req_disable_advertising(struct hci_request * req)826 void __hci_req_disable_advertising(struct hci_request *req)
827 {
828 	if (ext_adv_capable(req->hdev)) {
829 		__hci_req_disable_ext_adv_instance(req, 0x00);
830 
831 	} else {
832 		u8 enable = 0x00;
833 
834 		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
835 	}
836 }
837 
adv_use_rpa(struct hci_dev * hdev,uint32_t flags)838 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
839 {
840 	/* If privacy is not enabled don't use RPA */
841 	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
842 		return false;
843 
844 	/* If basic privacy mode is enabled use RPA */
845 	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
846 		return true;
847 
848 	/* If limited privacy mode is enabled don't use RPA if we're
849 	 * both discoverable and bondable.
850 	 */
851 	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
852 	    hci_dev_test_flag(hdev, HCI_BONDABLE))
853 		return false;
854 
855 	/* We're neither bondable nor discoverable in the limited
856 	 * privacy mode, therefore use RPA.
857 	 */
858 	return true;
859 }
860 
is_advertising_allowed(struct hci_dev * hdev,bool connectable)861 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
862 {
863 	/* If there is no connection we are OK to advertise. */
864 	if (hci_conn_num(hdev, LE_LINK) == 0)
865 		return true;
866 
867 	/* Check le_states if there is any connection in peripheral role. */
868 	if (hdev->conn_hash.le_num_peripheral > 0) {
869 		/* Peripheral connection state and non connectable mode bit 20.
870 		 */
871 		if (!connectable && !(hdev->le_states[2] & 0x10))
872 			return false;
873 
874 		/* Peripheral connection state and connectable mode bit 38
875 		 * and scannable bit 21.
876 		 */
877 		if (connectable && (!(hdev->le_states[4] & 0x40) ||
878 				    !(hdev->le_states[2] & 0x20)))
879 			return false;
880 	}
881 
882 	/* Check le_states if there is any connection in central role. */
883 	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
884 		/* Central connection state and non connectable mode bit 18. */
885 		if (!connectable && !(hdev->le_states[2] & 0x02))
886 			return false;
887 
888 		/* Central connection state and connectable mode bit 35 and
889 		 * scannable 19.
890 		 */
891 		if (connectable && (!(hdev->le_states[4] & 0x08) ||
892 				    !(hdev->le_states[2] & 0x08)))
893 			return false;
894 	}
895 
896 	return true;
897 }
898 
__hci_req_enable_advertising(struct hci_request * req)899 void __hci_req_enable_advertising(struct hci_request *req)
900 {
901 	struct hci_dev *hdev = req->hdev;
902 	struct adv_info *adv;
903 	struct hci_cp_le_set_adv_param cp;
904 	u8 own_addr_type, enable = 0x01;
905 	bool connectable;
906 	u16 adv_min_interval, adv_max_interval;
907 	u32 flags;
908 
909 	flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
910 	adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
911 
912 	/* If the "connectable" instance flag was not set, then choose between
913 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
914 	 */
915 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
916 		      mgmt_get_connectable(hdev);
917 
918 	if (!is_advertising_allowed(hdev, connectable))
919 		return;
920 
921 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
922 		__hci_req_disable_advertising(req);
923 
924 	/* Clear the HCI_LE_ADV bit temporarily so that the
925 	 * hci_update_random_address knows that it's safe to go ahead
926 	 * and write a new random address. The flag will be set back on
927 	 * as soon as the SET_ADV_ENABLE HCI command completes.
928 	 */
929 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
930 
931 	/* Set require_privacy to true only when non-connectable
932 	 * advertising is used. In that case it is fine to use a
933 	 * non-resolvable private address.
934 	 */
935 	if (hci_update_random_address(req, !connectable,
936 				      adv_use_rpa(hdev, flags),
937 				      &own_addr_type) < 0)
938 		return;
939 
940 	memset(&cp, 0, sizeof(cp));
941 
942 	if (adv) {
943 		adv_min_interval = adv->min_interval;
944 		adv_max_interval = adv->max_interval;
945 	} else {
946 		adv_min_interval = hdev->le_adv_min_interval;
947 		adv_max_interval = hdev->le_adv_max_interval;
948 	}
949 
950 	if (connectable) {
951 		cp.type = LE_ADV_IND;
952 	} else {
953 		if (adv_cur_instance_is_scannable(hdev))
954 			cp.type = LE_ADV_SCAN_IND;
955 		else
956 			cp.type = LE_ADV_NONCONN_IND;
957 
958 		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
959 		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
960 			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
961 			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
962 		}
963 	}
964 
965 	cp.min_interval = cpu_to_le16(adv_min_interval);
966 	cp.max_interval = cpu_to_le16(adv_max_interval);
967 	cp.own_address_type = own_addr_type;
968 	cp.channel_map = hdev->le_adv_channel_map;
969 
970 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
971 
972 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
973 }
974 
__hci_req_update_scan_rsp_data(struct hci_request * req,u8 instance)975 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
976 {
977 	struct hci_dev *hdev = req->hdev;
978 	u8 len;
979 
980 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
981 		return;
982 
983 	if (ext_adv_capable(hdev)) {
984 		struct {
985 			struct hci_cp_le_set_ext_scan_rsp_data cp;
986 			u8 data[HCI_MAX_EXT_AD_LENGTH];
987 		} pdu;
988 
989 		memset(&pdu, 0, sizeof(pdu));
990 
991 		len = eir_create_scan_rsp(hdev, instance, pdu.data);
992 
993 		if (hdev->scan_rsp_data_len == len &&
994 		    !memcmp(pdu.data, hdev->scan_rsp_data, len))
995 			return;
996 
997 		memcpy(hdev->scan_rsp_data, pdu.data, len);
998 		hdev->scan_rsp_data_len = len;
999 
1000 		pdu.cp.handle = instance;
1001 		pdu.cp.length = len;
1002 		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1003 		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1004 
1005 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1006 			    sizeof(pdu.cp) + len, &pdu.cp);
1007 	} else {
1008 		struct hci_cp_le_set_scan_rsp_data cp;
1009 
1010 		memset(&cp, 0, sizeof(cp));
1011 
1012 		len = eir_create_scan_rsp(hdev, instance, cp.data);
1013 
1014 		if (hdev->scan_rsp_data_len == len &&
1015 		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1016 			return;
1017 
1018 		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1019 		hdev->scan_rsp_data_len = len;
1020 
1021 		cp.length = len;
1022 
1023 		hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1024 	}
1025 }
1026 
__hci_req_update_adv_data(struct hci_request * req,u8 instance)1027 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1028 {
1029 	struct hci_dev *hdev = req->hdev;
1030 	u8 len;
1031 
1032 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1033 		return;
1034 
1035 	if (ext_adv_capable(hdev)) {
1036 		struct {
1037 			struct hci_cp_le_set_ext_adv_data cp;
1038 			u8 data[HCI_MAX_EXT_AD_LENGTH];
1039 		} pdu;
1040 
1041 		memset(&pdu, 0, sizeof(pdu));
1042 
1043 		len = eir_create_adv_data(hdev, instance, pdu.data);
1044 
1045 		/* There's nothing to do if the data hasn't changed */
1046 		if (hdev->adv_data_len == len &&
1047 		    memcmp(pdu.data, hdev->adv_data, len) == 0)
1048 			return;
1049 
1050 		memcpy(hdev->adv_data, pdu.data, len);
1051 		hdev->adv_data_len = len;
1052 
1053 		pdu.cp.length = len;
1054 		pdu.cp.handle = instance;
1055 		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1056 		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1057 
1058 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1059 			    sizeof(pdu.cp) + len, &pdu.cp);
1060 	} else {
1061 		struct hci_cp_le_set_adv_data cp;
1062 
1063 		memset(&cp, 0, sizeof(cp));
1064 
1065 		len = eir_create_adv_data(hdev, instance, cp.data);
1066 
1067 		/* There's nothing to do if the data hasn't changed */
1068 		if (hdev->adv_data_len == len &&
1069 		    memcmp(cp.data, hdev->adv_data, len) == 0)
1070 			return;
1071 
1072 		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1073 		hdev->adv_data_len = len;
1074 
1075 		cp.length = len;
1076 
1077 		hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1078 	}
1079 }
1080 
hci_req_update_adv_data(struct hci_dev * hdev,u8 instance)1081 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1082 {
1083 	struct hci_request req;
1084 
1085 	hci_req_init(&req, hdev);
1086 	__hci_req_update_adv_data(&req, instance);
1087 
1088 	return hci_req_run(&req, NULL);
1089 }
1090 
enable_addr_resolution_complete(struct hci_dev * hdev,u8 status,u16 opcode)1091 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1092 					    u16 opcode)
1093 {
1094 	BT_DBG("%s status %u", hdev->name, status);
1095 }
1096 
hci_req_disable_address_resolution(struct hci_dev * hdev)1097 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1098 {
1099 	struct hci_request req;
1100 	__u8 enable = 0x00;
1101 
1102 	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1103 		return;
1104 
1105 	hci_req_init(&req, hdev);
1106 
1107 	hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1108 
1109 	hci_req_run(&req, enable_addr_resolution_complete);
1110 }
1111 
adv_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1112 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1113 {
1114 	bt_dev_dbg(hdev, "status %u", status);
1115 }
1116 
hci_req_reenable_advertising(struct hci_dev * hdev)1117 void hci_req_reenable_advertising(struct hci_dev *hdev)
1118 {
1119 	struct hci_request req;
1120 
1121 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1122 	    list_empty(&hdev->adv_instances))
1123 		return;
1124 
1125 	hci_req_init(&req, hdev);
1126 
1127 	if (hdev->cur_adv_instance) {
1128 		__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1129 						true);
1130 	} else {
1131 		if (ext_adv_capable(hdev)) {
1132 			__hci_req_start_ext_adv(&req, 0x00);
1133 		} else {
1134 			__hci_req_update_adv_data(&req, 0x00);
1135 			__hci_req_update_scan_rsp_data(&req, 0x00);
1136 			__hci_req_enable_advertising(&req);
1137 		}
1138 	}
1139 
1140 	hci_req_run(&req, adv_enable_complete);
1141 }
1142 
adv_timeout_expire(struct work_struct * work)1143 static void adv_timeout_expire(struct work_struct *work)
1144 {
1145 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1146 					    adv_instance_expire.work);
1147 
1148 	struct hci_request req;
1149 	u8 instance;
1150 
1151 	bt_dev_dbg(hdev, "");
1152 
1153 	hci_dev_lock(hdev);
1154 
1155 	hdev->adv_instance_timeout = 0;
1156 
1157 	instance = hdev->cur_adv_instance;
1158 	if (instance == 0x00)
1159 		goto unlock;
1160 
1161 	hci_req_init(&req, hdev);
1162 
1163 	hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1164 
1165 	if (list_empty(&hdev->adv_instances))
1166 		__hci_req_disable_advertising(&req);
1167 
1168 	hci_req_run(&req, NULL);
1169 
1170 unlock:
1171 	hci_dev_unlock(hdev);
1172 }
1173 
hci_req_add_le_interleaved_scan(struct hci_request * req,unsigned long opt)1174 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1175 					   unsigned long opt)
1176 {
1177 	struct hci_dev *hdev = req->hdev;
1178 	int ret = 0;
1179 
1180 	hci_dev_lock(hdev);
1181 
1182 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1183 		hci_req_add_le_scan_disable(req, false);
1184 	hci_req_add_le_passive_scan(req);
1185 
1186 	switch (hdev->interleave_scan_state) {
1187 	case INTERLEAVE_SCAN_ALLOWLIST:
1188 		bt_dev_dbg(hdev, "next state: allowlist");
1189 		hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1190 		break;
1191 	case INTERLEAVE_SCAN_NO_FILTER:
1192 		bt_dev_dbg(hdev, "next state: no filter");
1193 		hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1194 		break;
1195 	case INTERLEAVE_SCAN_NONE:
1196 		BT_ERR("unexpected error");
1197 		ret = -1;
1198 	}
1199 
1200 	hci_dev_unlock(hdev);
1201 
1202 	return ret;
1203 }
1204 
interleave_scan_work(struct work_struct * work)1205 static void interleave_scan_work(struct work_struct *work)
1206 {
1207 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1208 					    interleave_scan.work);
1209 	u8 status;
1210 	unsigned long timeout;
1211 
1212 	if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1213 		timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1214 	} else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1215 		timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1216 	} else {
1217 		bt_dev_err(hdev, "unexpected error");
1218 		return;
1219 	}
1220 
1221 	hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1222 		     HCI_CMD_TIMEOUT, &status);
1223 
1224 	/* Don't continue interleaving if it was canceled */
1225 	if (is_interleave_scanning(hdev))
1226 		queue_delayed_work(hdev->req_workqueue,
1227 				   &hdev->interleave_scan, timeout);
1228 }
1229 
hci_get_random_address(struct hci_dev * hdev,bool require_privacy,bool use_rpa,struct adv_info * adv_instance,u8 * own_addr_type,bdaddr_t * rand_addr)1230 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1231 			   bool use_rpa, struct adv_info *adv_instance,
1232 			   u8 *own_addr_type, bdaddr_t *rand_addr)
1233 {
1234 	int err;
1235 
1236 	bacpy(rand_addr, BDADDR_ANY);
1237 
1238 	/* If privacy is enabled use a resolvable private address. If
1239 	 * current RPA has expired then generate a new one.
1240 	 */
1241 	if (use_rpa) {
1242 		/* If Controller supports LL Privacy use own address type is
1243 		 * 0x03
1244 		 */
1245 		if (use_ll_privacy(hdev))
1246 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1247 		else
1248 			*own_addr_type = ADDR_LE_DEV_RANDOM;
1249 
1250 		if (adv_instance) {
1251 			if (adv_rpa_valid(adv_instance))
1252 				return 0;
1253 		} else {
1254 			if (rpa_valid(hdev))
1255 				return 0;
1256 		}
1257 
1258 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1259 		if (err < 0) {
1260 			bt_dev_err(hdev, "failed to generate new RPA");
1261 			return err;
1262 		}
1263 
1264 		bacpy(rand_addr, &hdev->rpa);
1265 
1266 		return 0;
1267 	}
1268 
1269 	/* In case of required privacy without resolvable private address,
1270 	 * use an non-resolvable private address. This is useful for
1271 	 * non-connectable advertising.
1272 	 */
1273 	if (require_privacy) {
1274 		bdaddr_t nrpa;
1275 
1276 		while (true) {
1277 			/* The non-resolvable private address is generated
1278 			 * from random six bytes with the two most significant
1279 			 * bits cleared.
1280 			 */
1281 			get_random_bytes(&nrpa, 6);
1282 			nrpa.b[5] &= 0x3f;
1283 
1284 			/* The non-resolvable private address shall not be
1285 			 * equal to the public address.
1286 			 */
1287 			if (bacmp(&hdev->bdaddr, &nrpa))
1288 				break;
1289 		}
1290 
1291 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1292 		bacpy(rand_addr, &nrpa);
1293 
1294 		return 0;
1295 	}
1296 
1297 	/* No privacy so use a public address. */
1298 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1299 
1300 	return 0;
1301 }
1302 
__hci_req_clear_ext_adv_sets(struct hci_request * req)1303 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1304 {
1305 	hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1306 }
1307 
set_random_addr(struct hci_request * req,bdaddr_t * rpa)1308 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1309 {
1310 	struct hci_dev *hdev = req->hdev;
1311 
1312 	/* If we're advertising or initiating an LE connection we can't
1313 	 * go ahead and change the random address at this time. This is
1314 	 * because the eventual initiator address used for the
1315 	 * subsequently created connection will be undefined (some
1316 	 * controllers use the new address and others the one we had
1317 	 * when the operation started).
1318 	 *
1319 	 * In this kind of scenario skip the update and let the random
1320 	 * address be updated at the next cycle.
1321 	 */
1322 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1323 	    hci_lookup_le_connect(hdev)) {
1324 		bt_dev_dbg(hdev, "Deferring random address update");
1325 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1326 		return;
1327 	}
1328 
1329 	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1330 }
1331 
__hci_req_setup_ext_adv_instance(struct hci_request * req,u8 instance)1332 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1333 {
1334 	struct hci_cp_le_set_ext_adv_params cp;
1335 	struct hci_dev *hdev = req->hdev;
1336 	bool connectable;
1337 	u32 flags;
1338 	bdaddr_t random_addr;
1339 	u8 own_addr_type;
1340 	int err;
1341 	struct adv_info *adv_instance;
1342 	bool secondary_adv;
1343 
1344 	if (instance > 0) {
1345 		adv_instance = hci_find_adv_instance(hdev, instance);
1346 		if (!adv_instance)
1347 			return -EINVAL;
1348 	} else {
1349 		adv_instance = NULL;
1350 	}
1351 
1352 	flags = hci_adv_instance_flags(hdev, instance);
1353 
1354 	/* If the "connectable" instance flag was not set, then choose between
1355 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1356 	 */
1357 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1358 		      mgmt_get_connectable(hdev);
1359 
1360 	if (!is_advertising_allowed(hdev, connectable))
1361 		return -EPERM;
1362 
1363 	/* Set require_privacy to true only when non-connectable
1364 	 * advertising is used. In that case it is fine to use a
1365 	 * non-resolvable private address.
1366 	 */
1367 	err = hci_get_random_address(hdev, !connectable,
1368 				     adv_use_rpa(hdev, flags), adv_instance,
1369 				     &own_addr_type, &random_addr);
1370 	if (err < 0)
1371 		return err;
1372 
1373 	memset(&cp, 0, sizeof(cp));
1374 
1375 	if (adv_instance) {
1376 		hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
1377 		hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
1378 		cp.tx_power = adv_instance->tx_power;
1379 	} else {
1380 		hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1381 		hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1382 		cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1383 	}
1384 
1385 	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1386 
1387 	if (connectable) {
1388 		if (secondary_adv)
1389 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1390 		else
1391 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1392 	} else if (hci_adv_instance_is_scannable(hdev, instance) ||
1393 		   (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1394 		if (secondary_adv)
1395 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1396 		else
1397 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1398 	} else {
1399 		if (secondary_adv)
1400 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1401 		else
1402 			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1403 	}
1404 
1405 	cp.own_addr_type = own_addr_type;
1406 	cp.channel_map = hdev->le_adv_channel_map;
1407 	cp.handle = instance;
1408 
1409 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
1410 		cp.primary_phy = HCI_ADV_PHY_1M;
1411 		cp.secondary_phy = HCI_ADV_PHY_2M;
1412 	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1413 		cp.primary_phy = HCI_ADV_PHY_CODED;
1414 		cp.secondary_phy = HCI_ADV_PHY_CODED;
1415 	} else {
1416 		/* In all other cases use 1M */
1417 		cp.primary_phy = HCI_ADV_PHY_1M;
1418 		cp.secondary_phy = HCI_ADV_PHY_1M;
1419 	}
1420 
1421 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1422 
1423 	if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1424 	     own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1425 	    bacmp(&random_addr, BDADDR_ANY)) {
1426 		struct hci_cp_le_set_adv_set_rand_addr cp;
1427 
1428 		/* Check if random address need to be updated */
1429 		if (adv_instance) {
1430 			if (!bacmp(&random_addr, &adv_instance->random_addr))
1431 				return 0;
1432 		} else {
1433 			if (!bacmp(&random_addr, &hdev->random_addr))
1434 				return 0;
1435 			/* Instance 0x00 doesn't have an adv_info, instead it
1436 			 * uses hdev->random_addr to track its address so
1437 			 * whenever it needs to be updated this also set the
1438 			 * random address since hdev->random_addr is shared with
1439 			 * scan state machine.
1440 			 */
1441 			set_random_addr(req, &random_addr);
1442 		}
1443 
1444 		memset(&cp, 0, sizeof(cp));
1445 
1446 		cp.handle = instance;
1447 		bacpy(&cp.bdaddr, &random_addr);
1448 
1449 		hci_req_add(req,
1450 			    HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1451 			    sizeof(cp), &cp);
1452 	}
1453 
1454 	return 0;
1455 }
1456 
__hci_req_enable_ext_advertising(struct hci_request * req,u8 instance)1457 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1458 {
1459 	struct hci_dev *hdev = req->hdev;
1460 	struct hci_cp_le_set_ext_adv_enable *cp;
1461 	struct hci_cp_ext_adv_set *adv_set;
1462 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1463 	struct adv_info *adv_instance;
1464 
1465 	if (instance > 0) {
1466 		adv_instance = hci_find_adv_instance(hdev, instance);
1467 		if (!adv_instance)
1468 			return -EINVAL;
1469 	} else {
1470 		adv_instance = NULL;
1471 	}
1472 
1473 	cp = (void *) data;
1474 	adv_set = (void *) cp->data;
1475 
1476 	memset(cp, 0, sizeof(*cp));
1477 
1478 	cp->enable = 0x01;
1479 	cp->num_of_sets = 0x01;
1480 
1481 	memset(adv_set, 0, sizeof(*adv_set));
1482 
1483 	adv_set->handle = instance;
1484 
1485 	/* Set duration per instance since controller is responsible for
1486 	 * scheduling it.
1487 	 */
1488 	if (adv_instance && adv_instance->duration) {
1489 		u16 duration = adv_instance->timeout * MSEC_PER_SEC;
1490 
1491 		/* Time = N * 10 ms */
1492 		adv_set->duration = cpu_to_le16(duration / 10);
1493 	}
1494 
1495 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1496 		    sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1497 		    data);
1498 
1499 	return 0;
1500 }
1501 
__hci_req_disable_ext_adv_instance(struct hci_request * req,u8 instance)1502 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
1503 {
1504 	struct hci_dev *hdev = req->hdev;
1505 	struct hci_cp_le_set_ext_adv_enable *cp;
1506 	struct hci_cp_ext_adv_set *adv_set;
1507 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1508 	u8 req_size;
1509 
1510 	/* If request specifies an instance that doesn't exist, fail */
1511 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1512 		return -EINVAL;
1513 
1514 	memset(data, 0, sizeof(data));
1515 
1516 	cp = (void *)data;
1517 	adv_set = (void *)cp->data;
1518 
1519 	/* Instance 0x00 indicates all advertising instances will be disabled */
1520 	cp->num_of_sets = !!instance;
1521 	cp->enable = 0x00;
1522 
1523 	adv_set->handle = instance;
1524 
1525 	req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
1526 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
1527 
1528 	return 0;
1529 }
1530 
__hci_req_remove_ext_adv_instance(struct hci_request * req,u8 instance)1531 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
1532 {
1533 	struct hci_dev *hdev = req->hdev;
1534 
1535 	/* If request specifies an instance that doesn't exist, fail */
1536 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1537 		return -EINVAL;
1538 
1539 	hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
1540 
1541 	return 0;
1542 }
1543 
__hci_req_start_ext_adv(struct hci_request * req,u8 instance)1544 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1545 {
1546 	struct hci_dev *hdev = req->hdev;
1547 	struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
1548 	int err;
1549 
1550 	/* If instance isn't pending, the chip knows about it, and it's safe to
1551 	 * disable
1552 	 */
1553 	if (adv_instance && !adv_instance->pending)
1554 		__hci_req_disable_ext_adv_instance(req, instance);
1555 
1556 	err = __hci_req_setup_ext_adv_instance(req, instance);
1557 	if (err < 0)
1558 		return err;
1559 
1560 	__hci_req_update_scan_rsp_data(req, instance);
1561 	__hci_req_enable_ext_advertising(req, instance);
1562 
1563 	return 0;
1564 }
1565 
__hci_req_schedule_adv_instance(struct hci_request * req,u8 instance,bool force)1566 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1567 				    bool force)
1568 {
1569 	struct hci_dev *hdev = req->hdev;
1570 	struct adv_info *adv_instance = NULL;
1571 	u16 timeout;
1572 
1573 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1574 	    list_empty(&hdev->adv_instances))
1575 		return -EPERM;
1576 
1577 	if (hdev->adv_instance_timeout)
1578 		return -EBUSY;
1579 
1580 	adv_instance = hci_find_adv_instance(hdev, instance);
1581 	if (!adv_instance)
1582 		return -ENOENT;
1583 
1584 	/* A zero timeout means unlimited advertising. As long as there is
1585 	 * only one instance, duration should be ignored. We still set a timeout
1586 	 * in case further instances are being added later on.
1587 	 *
1588 	 * If the remaining lifetime of the instance is more than the duration
1589 	 * then the timeout corresponds to the duration, otherwise it will be
1590 	 * reduced to the remaining instance lifetime.
1591 	 */
1592 	if (adv_instance->timeout == 0 ||
1593 	    adv_instance->duration <= adv_instance->remaining_time)
1594 		timeout = adv_instance->duration;
1595 	else
1596 		timeout = adv_instance->remaining_time;
1597 
1598 	/* The remaining time is being reduced unless the instance is being
1599 	 * advertised without time limit.
1600 	 */
1601 	if (adv_instance->timeout)
1602 		adv_instance->remaining_time =
1603 				adv_instance->remaining_time - timeout;
1604 
1605 	/* Only use work for scheduling instances with legacy advertising */
1606 	if (!ext_adv_capable(hdev)) {
1607 		hdev->adv_instance_timeout = timeout;
1608 		queue_delayed_work(hdev->req_workqueue,
1609 			   &hdev->adv_instance_expire,
1610 			   msecs_to_jiffies(timeout * 1000));
1611 	}
1612 
1613 	/* If we're just re-scheduling the same instance again then do not
1614 	 * execute any HCI commands. This happens when a single instance is
1615 	 * being advertised.
1616 	 */
1617 	if (!force && hdev->cur_adv_instance == instance &&
1618 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
1619 		return 0;
1620 
1621 	hdev->cur_adv_instance = instance;
1622 	if (ext_adv_capable(hdev)) {
1623 		__hci_req_start_ext_adv(req, instance);
1624 	} else {
1625 		__hci_req_update_adv_data(req, instance);
1626 		__hci_req_update_scan_rsp_data(req, instance);
1627 		__hci_req_enable_advertising(req);
1628 	}
1629 
1630 	return 0;
1631 }
1632 
1633 /* For a single instance:
1634  * - force == true: The instance will be removed even when its remaining
1635  *   lifetime is not zero.
1636  * - force == false: the instance will be deactivated but kept stored unless
1637  *   the remaining lifetime is zero.
1638  *
1639  * For instance == 0x00:
1640  * - force == true: All instances will be removed regardless of their timeout
1641  *   setting.
1642  * - force == false: Only instances that have a timeout will be removed.
1643  */
hci_req_clear_adv_instance(struct hci_dev * hdev,struct sock * sk,struct hci_request * req,u8 instance,bool force)1644 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1645 				struct hci_request *req, u8 instance,
1646 				bool force)
1647 {
1648 	struct adv_info *adv_instance, *n, *next_instance = NULL;
1649 	int err;
1650 	u8 rem_inst;
1651 
1652 	/* Cancel any timeout concerning the removed instance(s). */
1653 	if (!instance || hdev->cur_adv_instance == instance)
1654 		cancel_adv_timeout(hdev);
1655 
1656 	/* Get the next instance to advertise BEFORE we remove
1657 	 * the current one. This can be the same instance again
1658 	 * if there is only one instance.
1659 	 */
1660 	if (instance && hdev->cur_adv_instance == instance)
1661 		next_instance = hci_get_next_instance(hdev, instance);
1662 
1663 	if (instance == 0x00) {
1664 		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1665 					 list) {
1666 			if (!(force || adv_instance->timeout))
1667 				continue;
1668 
1669 			rem_inst = adv_instance->instance;
1670 			err = hci_remove_adv_instance(hdev, rem_inst);
1671 			if (!err)
1672 				mgmt_advertising_removed(sk, hdev, rem_inst);
1673 		}
1674 	} else {
1675 		adv_instance = hci_find_adv_instance(hdev, instance);
1676 
1677 		if (force || (adv_instance && adv_instance->timeout &&
1678 			      !adv_instance->remaining_time)) {
1679 			/* Don't advertise a removed instance. */
1680 			if (next_instance &&
1681 			    next_instance->instance == instance)
1682 				next_instance = NULL;
1683 
1684 			err = hci_remove_adv_instance(hdev, instance);
1685 			if (!err)
1686 				mgmt_advertising_removed(sk, hdev, instance);
1687 		}
1688 	}
1689 
1690 	if (!req || !hdev_is_powered(hdev) ||
1691 	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
1692 		return;
1693 
1694 	if (next_instance && !ext_adv_capable(hdev))
1695 		__hci_req_schedule_adv_instance(req, next_instance->instance,
1696 						false);
1697 }
1698 
hci_update_random_address(struct hci_request * req,bool require_privacy,bool use_rpa,u8 * own_addr_type)1699 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1700 			      bool use_rpa, u8 *own_addr_type)
1701 {
1702 	struct hci_dev *hdev = req->hdev;
1703 	int err;
1704 
1705 	/* If privacy is enabled use a resolvable private address. If
1706 	 * current RPA has expired or there is something else than
1707 	 * the current RPA in use, then generate a new one.
1708 	 */
1709 	if (use_rpa) {
1710 		/* If Controller supports LL Privacy use own address type is
1711 		 * 0x03
1712 		 */
1713 		if (use_ll_privacy(hdev))
1714 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1715 		else
1716 			*own_addr_type = ADDR_LE_DEV_RANDOM;
1717 
1718 		if (rpa_valid(hdev))
1719 			return 0;
1720 
1721 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1722 		if (err < 0) {
1723 			bt_dev_err(hdev, "failed to generate new RPA");
1724 			return err;
1725 		}
1726 
1727 		set_random_addr(req, &hdev->rpa);
1728 
1729 		return 0;
1730 	}
1731 
1732 	/* In case of required privacy without resolvable private address,
1733 	 * use an non-resolvable private address. This is useful for active
1734 	 * scanning and non-connectable advertising.
1735 	 */
1736 	if (require_privacy) {
1737 		bdaddr_t nrpa;
1738 
1739 		while (true) {
1740 			/* The non-resolvable private address is generated
1741 			 * from random six bytes with the two most significant
1742 			 * bits cleared.
1743 			 */
1744 			get_random_bytes(&nrpa, 6);
1745 			nrpa.b[5] &= 0x3f;
1746 
1747 			/* The non-resolvable private address shall not be
1748 			 * equal to the public address.
1749 			 */
1750 			if (bacmp(&hdev->bdaddr, &nrpa))
1751 				break;
1752 		}
1753 
1754 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1755 		set_random_addr(req, &nrpa);
1756 		return 0;
1757 	}
1758 
1759 	/* If forcing static address is in use or there is no public
1760 	 * address use the static address as random address (but skip
1761 	 * the HCI command if the current random address is already the
1762 	 * static one.
1763 	 *
1764 	 * In case BR/EDR has been disabled on a dual-mode controller
1765 	 * and a static address has been configured, then use that
1766 	 * address instead of the public BR/EDR address.
1767 	 */
1768 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1769 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1770 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1771 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
1772 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1773 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
1774 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1775 				    &hdev->static_addr);
1776 		return 0;
1777 	}
1778 
1779 	/* Neither privacy nor static address is being used so use a
1780 	 * public address.
1781 	 */
1782 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1783 
1784 	return 0;
1785 }
1786 
disconnected_accept_list_entries(struct hci_dev * hdev)1787 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
1788 {
1789 	struct bdaddr_list *b;
1790 
1791 	list_for_each_entry(b, &hdev->accept_list, list) {
1792 		struct hci_conn *conn;
1793 
1794 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1795 		if (!conn)
1796 			return true;
1797 
1798 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1799 			return true;
1800 	}
1801 
1802 	return false;
1803 }
1804 
__hci_req_update_scan(struct hci_request * req)1805 void __hci_req_update_scan(struct hci_request *req)
1806 {
1807 	struct hci_dev *hdev = req->hdev;
1808 	u8 scan;
1809 
1810 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1811 		return;
1812 
1813 	if (!hdev_is_powered(hdev))
1814 		return;
1815 
1816 	if (mgmt_powering_down(hdev))
1817 		return;
1818 
1819 	if (hdev->scanning_paused)
1820 		return;
1821 
1822 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1823 	    disconnected_accept_list_entries(hdev))
1824 		scan = SCAN_PAGE;
1825 	else
1826 		scan = SCAN_DISABLED;
1827 
1828 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1829 		scan |= SCAN_INQUIRY;
1830 
1831 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1832 	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1833 		return;
1834 
1835 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1836 }
1837 
update_scan(struct hci_request * req,unsigned long opt)1838 static int update_scan(struct hci_request *req, unsigned long opt)
1839 {
1840 	hci_dev_lock(req->hdev);
1841 	__hci_req_update_scan(req);
1842 	hci_dev_unlock(req->hdev);
1843 	return 0;
1844 }
1845 
scan_update_work(struct work_struct * work)1846 static void scan_update_work(struct work_struct *work)
1847 {
1848 	struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1849 
1850 	hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1851 }
1852 
get_service_classes(struct hci_dev * hdev)1853 static u8 get_service_classes(struct hci_dev *hdev)
1854 {
1855 	struct bt_uuid *uuid;
1856 	u8 val = 0;
1857 
1858 	list_for_each_entry(uuid, &hdev->uuids, list)
1859 		val |= uuid->svc_hint;
1860 
1861 	return val;
1862 }
1863 
__hci_req_update_class(struct hci_request * req)1864 void __hci_req_update_class(struct hci_request *req)
1865 {
1866 	struct hci_dev *hdev = req->hdev;
1867 	u8 cod[3];
1868 
1869 	bt_dev_dbg(hdev, "");
1870 
1871 	if (!hdev_is_powered(hdev))
1872 		return;
1873 
1874 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1875 		return;
1876 
1877 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1878 		return;
1879 
1880 	cod[0] = hdev->minor_class;
1881 	cod[1] = hdev->major_class;
1882 	cod[2] = get_service_classes(hdev);
1883 
1884 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1885 		cod[1] |= 0x20;
1886 
1887 	if (memcmp(cod, hdev->dev_class, 3) == 0)
1888 		return;
1889 
1890 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1891 }
1892 
write_iac(struct hci_request * req)1893 static void write_iac(struct hci_request *req)
1894 {
1895 	struct hci_dev *hdev = req->hdev;
1896 	struct hci_cp_write_current_iac_lap cp;
1897 
1898 	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1899 		return;
1900 
1901 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1902 		/* Limited discoverable mode */
1903 		cp.num_iac = min_t(u8, hdev->num_iac, 2);
1904 		cp.iac_lap[0] = 0x00;	/* LIAC */
1905 		cp.iac_lap[1] = 0x8b;
1906 		cp.iac_lap[2] = 0x9e;
1907 		cp.iac_lap[3] = 0x33;	/* GIAC */
1908 		cp.iac_lap[4] = 0x8b;
1909 		cp.iac_lap[5] = 0x9e;
1910 	} else {
1911 		/* General discoverable mode */
1912 		cp.num_iac = 1;
1913 		cp.iac_lap[0] = 0x33;	/* GIAC */
1914 		cp.iac_lap[1] = 0x8b;
1915 		cp.iac_lap[2] = 0x9e;
1916 	}
1917 
1918 	hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1919 		    (cp.num_iac * 3) + 1, &cp);
1920 }
1921 
discoverable_update(struct hci_request * req,unsigned long opt)1922 static int discoverable_update(struct hci_request *req, unsigned long opt)
1923 {
1924 	struct hci_dev *hdev = req->hdev;
1925 
1926 	hci_dev_lock(hdev);
1927 
1928 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1929 		write_iac(req);
1930 		__hci_req_update_scan(req);
1931 		__hci_req_update_class(req);
1932 	}
1933 
1934 	/* Advertising instances don't use the global discoverable setting, so
1935 	 * only update AD if advertising was enabled using Set Advertising.
1936 	 */
1937 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1938 		__hci_req_update_adv_data(req, 0x00);
1939 
1940 		/* Discoverable mode affects the local advertising
1941 		 * address in limited privacy mode.
1942 		 */
1943 		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
1944 			if (ext_adv_capable(hdev))
1945 				__hci_req_start_ext_adv(req, 0x00);
1946 			else
1947 				__hci_req_enable_advertising(req);
1948 		}
1949 	}
1950 
1951 	hci_dev_unlock(hdev);
1952 
1953 	return 0;
1954 }
1955 
__hci_abort_conn(struct hci_request * req,struct hci_conn * conn,u8 reason)1956 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1957 		      u8 reason)
1958 {
1959 	switch (conn->state) {
1960 	case BT_CONNECTED:
1961 	case BT_CONFIG:
1962 		if (conn->type == AMP_LINK) {
1963 			struct hci_cp_disconn_phy_link cp;
1964 
1965 			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1966 			cp.reason = reason;
1967 			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1968 				    &cp);
1969 		} else {
1970 			struct hci_cp_disconnect dc;
1971 
1972 			dc.handle = cpu_to_le16(conn->handle);
1973 			dc.reason = reason;
1974 			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1975 		}
1976 
1977 		conn->state = BT_DISCONN;
1978 
1979 		break;
1980 	case BT_CONNECT:
1981 		if (conn->type == LE_LINK) {
1982 			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1983 				break;
1984 			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1985 				    0, NULL);
1986 		} else if (conn->type == ACL_LINK) {
1987 			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1988 				break;
1989 			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1990 				    6, &conn->dst);
1991 		}
1992 		break;
1993 	case BT_CONNECT2:
1994 		if (conn->type == ACL_LINK) {
1995 			struct hci_cp_reject_conn_req rej;
1996 
1997 			bacpy(&rej.bdaddr, &conn->dst);
1998 			rej.reason = reason;
1999 
2000 			hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2001 				    sizeof(rej), &rej);
2002 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2003 			struct hci_cp_reject_sync_conn_req rej;
2004 
2005 			bacpy(&rej.bdaddr, &conn->dst);
2006 
2007 			/* SCO rejection has its own limited set of
2008 			 * allowed error values (0x0D-0x0F) which isn't
2009 			 * compatible with most values passed to this
2010 			 * function. To be safe hard-code one of the
2011 			 * values that's suitable for SCO.
2012 			 */
2013 			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2014 
2015 			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2016 				    sizeof(rej), &rej);
2017 		}
2018 		break;
2019 	default:
2020 		conn->state = BT_CLOSED;
2021 		break;
2022 	}
2023 }
2024 
abort_conn_complete(struct hci_dev * hdev,u8 status,u16 opcode)2025 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2026 {
2027 	if (status)
2028 		bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2029 }
2030 
hci_abort_conn(struct hci_conn * conn,u8 reason)2031 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2032 {
2033 	struct hci_request req;
2034 	int err;
2035 
2036 	hci_req_init(&req, conn->hdev);
2037 
2038 	__hci_abort_conn(&req, conn, reason);
2039 
2040 	err = hci_req_run(&req, abort_conn_complete);
2041 	if (err && err != -ENODATA) {
2042 		bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2043 		return err;
2044 	}
2045 
2046 	return 0;
2047 }
2048 
le_scan_disable(struct hci_request * req,unsigned long opt)2049 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2050 {
2051 	hci_req_add_le_scan_disable(req, false);
2052 	return 0;
2053 }
2054 
bredr_inquiry(struct hci_request * req,unsigned long opt)2055 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2056 {
2057 	u8 length = opt;
2058 	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2059 	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2060 	struct hci_cp_inquiry cp;
2061 
2062 	if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2063 		return 0;
2064 
2065 	bt_dev_dbg(req->hdev, "");
2066 
2067 	hci_dev_lock(req->hdev);
2068 	hci_inquiry_cache_flush(req->hdev);
2069 	hci_dev_unlock(req->hdev);
2070 
2071 	memset(&cp, 0, sizeof(cp));
2072 
2073 	if (req->hdev->discovery.limited)
2074 		memcpy(&cp.lap, liac, sizeof(cp.lap));
2075 	else
2076 		memcpy(&cp.lap, giac, sizeof(cp.lap));
2077 
2078 	cp.length = length;
2079 
2080 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2081 
2082 	return 0;
2083 }
2084 
le_scan_disable_work(struct work_struct * work)2085 static void le_scan_disable_work(struct work_struct *work)
2086 {
2087 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2088 					    le_scan_disable.work);
2089 	u8 status;
2090 
2091 	bt_dev_dbg(hdev, "");
2092 
2093 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2094 		return;
2095 
2096 	cancel_delayed_work(&hdev->le_scan_restart);
2097 
2098 	hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2099 	if (status) {
2100 		bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2101 			   status);
2102 		return;
2103 	}
2104 
2105 	hdev->discovery.scan_start = 0;
2106 
2107 	/* If we were running LE only scan, change discovery state. If
2108 	 * we were running both LE and BR/EDR inquiry simultaneously,
2109 	 * and BR/EDR inquiry is already finished, stop discovery,
2110 	 * otherwise BR/EDR inquiry will stop discovery when finished.
2111 	 * If we will resolve remote device name, do not change
2112 	 * discovery state.
2113 	 */
2114 
2115 	if (hdev->discovery.type == DISCOV_TYPE_LE)
2116 		goto discov_stopped;
2117 
2118 	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2119 		return;
2120 
2121 	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2122 		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2123 		    hdev->discovery.state != DISCOVERY_RESOLVING)
2124 			goto discov_stopped;
2125 
2126 		return;
2127 	}
2128 
2129 	hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2130 		     HCI_CMD_TIMEOUT, &status);
2131 	if (status) {
2132 		bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2133 		goto discov_stopped;
2134 	}
2135 
2136 	return;
2137 
2138 discov_stopped:
2139 	hci_dev_lock(hdev);
2140 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2141 	hci_dev_unlock(hdev);
2142 }
2143 
le_scan_restart(struct hci_request * req,unsigned long opt)2144 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2145 {
2146 	struct hci_dev *hdev = req->hdev;
2147 
2148 	/* If controller is not scanning we are done. */
2149 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2150 		return 0;
2151 
2152 	if (hdev->scanning_paused) {
2153 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2154 		return 0;
2155 	}
2156 
2157 	hci_req_add_le_scan_disable(req, false);
2158 
2159 	if (use_ext_scan(hdev)) {
2160 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2161 
2162 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2163 		ext_enable_cp.enable = LE_SCAN_ENABLE;
2164 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2165 
2166 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2167 			    sizeof(ext_enable_cp), &ext_enable_cp);
2168 	} else {
2169 		struct hci_cp_le_set_scan_enable cp;
2170 
2171 		memset(&cp, 0, sizeof(cp));
2172 		cp.enable = LE_SCAN_ENABLE;
2173 		cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2174 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2175 	}
2176 
2177 	return 0;
2178 }
2179 
le_scan_restart_work(struct work_struct * work)2180 static void le_scan_restart_work(struct work_struct *work)
2181 {
2182 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2183 					    le_scan_restart.work);
2184 	unsigned long timeout, duration, scan_start, now;
2185 	u8 status;
2186 
2187 	bt_dev_dbg(hdev, "");
2188 
2189 	hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2190 	if (status) {
2191 		bt_dev_err(hdev, "failed to restart LE scan: status %d",
2192 			   status);
2193 		return;
2194 	}
2195 
2196 	hci_dev_lock(hdev);
2197 
2198 	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2199 	    !hdev->discovery.scan_start)
2200 		goto unlock;
2201 
2202 	/* When the scan was started, hdev->le_scan_disable has been queued
2203 	 * after duration from scan_start. During scan restart this job
2204 	 * has been canceled, and we need to queue it again after proper
2205 	 * timeout, to make sure that scan does not run indefinitely.
2206 	 */
2207 	duration = hdev->discovery.scan_duration;
2208 	scan_start = hdev->discovery.scan_start;
2209 	now = jiffies;
2210 	if (now - scan_start <= duration) {
2211 		int elapsed;
2212 
2213 		if (now >= scan_start)
2214 			elapsed = now - scan_start;
2215 		else
2216 			elapsed = ULONG_MAX - scan_start + now;
2217 
2218 		timeout = duration - elapsed;
2219 	} else {
2220 		timeout = 0;
2221 	}
2222 
2223 	queue_delayed_work(hdev->req_workqueue,
2224 			   &hdev->le_scan_disable, timeout);
2225 
2226 unlock:
2227 	hci_dev_unlock(hdev);
2228 }
2229 
active_scan(struct hci_request * req,unsigned long opt)2230 static int active_scan(struct hci_request *req, unsigned long opt)
2231 {
2232 	uint16_t interval = opt;
2233 	struct hci_dev *hdev = req->hdev;
2234 	u8 own_addr_type;
2235 	/* Accept list is not used for discovery */
2236 	u8 filter_policy = 0x00;
2237 	/* Default is to enable duplicates filter */
2238 	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2239 	/* Discovery doesn't require controller address resolution */
2240 	bool addr_resolv = false;
2241 	int err;
2242 
2243 	bt_dev_dbg(hdev, "");
2244 
2245 	/* If controller is scanning, it means the background scanning is
2246 	 * running. Thus, we should temporarily stop it in order to set the
2247 	 * discovery scanning parameters.
2248 	 */
2249 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2250 		hci_req_add_le_scan_disable(req, false);
2251 		cancel_interleave_scan(hdev);
2252 	}
2253 
2254 	/* All active scans will be done with either a resolvable private
2255 	 * address (when privacy feature has been enabled) or non-resolvable
2256 	 * private address.
2257 	 */
2258 	err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2259 					&own_addr_type);
2260 	if (err < 0)
2261 		own_addr_type = ADDR_LE_DEV_PUBLIC;
2262 
2263 	hci_dev_lock(hdev);
2264 	if (hci_is_adv_monitoring(hdev)) {
2265 		/* Duplicate filter should be disabled when some advertisement
2266 		 * monitor is activated, otherwise AdvMon can only receive one
2267 		 * advertisement for one peer(*) during active scanning, and
2268 		 * might report loss to these peers.
2269 		 *
2270 		 * Note that different controllers have different meanings of
2271 		 * |duplicate|. Some of them consider packets with the same
2272 		 * address as duplicate, and others consider packets with the
2273 		 * same address and the same RSSI as duplicate. Although in the
2274 		 * latter case we don't need to disable duplicate filter, but
2275 		 * it is common to have active scanning for a short period of
2276 		 * time, the power impact should be neglectable.
2277 		 */
2278 		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2279 	}
2280 	hci_dev_unlock(hdev);
2281 
2282 	hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2283 			   hdev->le_scan_window_discovery, own_addr_type,
2284 			   filter_policy, filter_dup, addr_resolv);
2285 	return 0;
2286 }
2287 
interleaved_discov(struct hci_request * req,unsigned long opt)2288 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2289 {
2290 	int err;
2291 
2292 	bt_dev_dbg(req->hdev, "");
2293 
2294 	err = active_scan(req, opt);
2295 	if (err)
2296 		return err;
2297 
2298 	return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2299 }
2300 
start_discovery(struct hci_dev * hdev,u8 * status)2301 static void start_discovery(struct hci_dev *hdev, u8 *status)
2302 {
2303 	unsigned long timeout;
2304 
2305 	bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
2306 
2307 	switch (hdev->discovery.type) {
2308 	case DISCOV_TYPE_BREDR:
2309 		if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2310 			hci_req_sync(hdev, bredr_inquiry,
2311 				     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2312 				     status);
2313 		return;
2314 	case DISCOV_TYPE_INTERLEAVED:
2315 		/* When running simultaneous discovery, the LE scanning time
2316 		 * should occupy the whole discovery time sine BR/EDR inquiry
2317 		 * and LE scanning are scheduled by the controller.
2318 		 *
2319 		 * For interleaving discovery in comparison, BR/EDR inquiry
2320 		 * and LE scanning are done sequentially with separate
2321 		 * timeouts.
2322 		 */
2323 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2324 			     &hdev->quirks)) {
2325 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2326 			/* During simultaneous discovery, we double LE scan
2327 			 * interval. We must leave some time for the controller
2328 			 * to do BR/EDR inquiry.
2329 			 */
2330 			hci_req_sync(hdev, interleaved_discov,
2331 				     hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
2332 				     status);
2333 			break;
2334 		}
2335 
2336 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2337 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2338 			     HCI_CMD_TIMEOUT, status);
2339 		break;
2340 	case DISCOV_TYPE_LE:
2341 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2342 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2343 			     HCI_CMD_TIMEOUT, status);
2344 		break;
2345 	default:
2346 		*status = HCI_ERROR_UNSPECIFIED;
2347 		return;
2348 	}
2349 
2350 	if (*status)
2351 		return;
2352 
2353 	bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
2354 
2355 	/* When service discovery is used and the controller has a
2356 	 * strict duplicate filter, it is important to remember the
2357 	 * start and duration of the scan. This is required for
2358 	 * restarting scanning during the discovery phase.
2359 	 */
2360 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2361 		     hdev->discovery.result_filtering) {
2362 		hdev->discovery.scan_start = jiffies;
2363 		hdev->discovery.scan_duration = timeout;
2364 	}
2365 
2366 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2367 			   timeout);
2368 }
2369 
hci_req_stop_discovery(struct hci_request * req)2370 bool hci_req_stop_discovery(struct hci_request *req)
2371 {
2372 	struct hci_dev *hdev = req->hdev;
2373 	struct discovery_state *d = &hdev->discovery;
2374 	struct hci_cp_remote_name_req_cancel cp;
2375 	struct inquiry_entry *e;
2376 	bool ret = false;
2377 
2378 	bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
2379 
2380 	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2381 		if (test_bit(HCI_INQUIRY, &hdev->flags))
2382 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2383 
2384 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2385 			cancel_delayed_work(&hdev->le_scan_disable);
2386 			cancel_delayed_work(&hdev->le_scan_restart);
2387 			hci_req_add_le_scan_disable(req, false);
2388 		}
2389 
2390 		ret = true;
2391 	} else {
2392 		/* Passive scanning */
2393 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2394 			hci_req_add_le_scan_disable(req, false);
2395 			ret = true;
2396 		}
2397 	}
2398 
2399 	/* No further actions needed for LE-only discovery */
2400 	if (d->type == DISCOV_TYPE_LE)
2401 		return ret;
2402 
2403 	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2404 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2405 						     NAME_PENDING);
2406 		if (!e)
2407 			return ret;
2408 
2409 		bacpy(&cp.bdaddr, &e->data.bdaddr);
2410 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2411 			    &cp);
2412 		ret = true;
2413 	}
2414 
2415 	return ret;
2416 }
2417 
config_data_path_complete(struct hci_dev * hdev,u8 status,u16 opcode)2418 static void config_data_path_complete(struct hci_dev *hdev, u8 status,
2419 				      u16 opcode)
2420 {
2421 	bt_dev_dbg(hdev, "status %u", status);
2422 }
2423 
hci_req_configure_datapath(struct hci_dev * hdev,struct bt_codec * codec)2424 int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
2425 {
2426 	struct hci_request req;
2427 	int err;
2428 	__u8 vnd_len, *vnd_data = NULL;
2429 	struct hci_op_configure_data_path *cmd = NULL;
2430 
2431 	hci_req_init(&req, hdev);
2432 
2433 	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
2434 					  &vnd_data);
2435 	if (err < 0)
2436 		goto error;
2437 
2438 	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
2439 	if (!cmd) {
2440 		err = -ENOMEM;
2441 		goto error;
2442 	}
2443 
2444 	err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
2445 	if (err < 0)
2446 		goto error;
2447 
2448 	cmd->vnd_len = vnd_len;
2449 	memcpy(cmd->vnd_data, vnd_data, vnd_len);
2450 
2451 	cmd->direction = 0x00;
2452 	hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2453 
2454 	cmd->direction = 0x01;
2455 	hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2456 
2457 	err = hci_req_run(&req, config_data_path_complete);
2458 error:
2459 
2460 	kfree(cmd);
2461 	kfree(vnd_data);
2462 	return err;
2463 }
2464 
stop_discovery(struct hci_request * req,unsigned long opt)2465 static int stop_discovery(struct hci_request *req, unsigned long opt)
2466 {
2467 	hci_dev_lock(req->hdev);
2468 	hci_req_stop_discovery(req);
2469 	hci_dev_unlock(req->hdev);
2470 
2471 	return 0;
2472 }
2473 
discov_update(struct work_struct * work)2474 static void discov_update(struct work_struct *work)
2475 {
2476 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2477 					    discov_update);
2478 	u8 status = 0;
2479 
2480 	switch (hdev->discovery.state) {
2481 	case DISCOVERY_STARTING:
2482 		start_discovery(hdev, &status);
2483 		mgmt_start_discovery_complete(hdev, status);
2484 		if (status)
2485 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2486 		else
2487 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2488 		break;
2489 	case DISCOVERY_STOPPING:
2490 		hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2491 		mgmt_stop_discovery_complete(hdev, status);
2492 		if (!status)
2493 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2494 		break;
2495 	case DISCOVERY_STOPPED:
2496 	default:
2497 		return;
2498 	}
2499 }
2500 
discov_off(struct work_struct * work)2501 static void discov_off(struct work_struct *work)
2502 {
2503 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2504 					    discov_off.work);
2505 
2506 	bt_dev_dbg(hdev, "");
2507 
2508 	hci_dev_lock(hdev);
2509 
2510 	/* When discoverable timeout triggers, then just make sure
2511 	 * the limited discoverable flag is cleared. Even in the case
2512 	 * of a timeout triggered from general discoverable, it is
2513 	 * safe to unconditionally clear the flag.
2514 	 */
2515 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2516 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2517 	hdev->discov_timeout = 0;
2518 
2519 	hci_dev_unlock(hdev);
2520 
2521 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2522 	mgmt_new_settings(hdev);
2523 }
2524 
powered_update_hci(struct hci_request * req,unsigned long opt)2525 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2526 {
2527 	struct hci_dev *hdev = req->hdev;
2528 	u8 link_sec;
2529 
2530 	hci_dev_lock(hdev);
2531 
2532 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2533 	    !lmp_host_ssp_capable(hdev)) {
2534 		u8 mode = 0x01;
2535 
2536 		hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2537 
2538 		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2539 			u8 support = 0x01;
2540 
2541 			hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2542 				    sizeof(support), &support);
2543 		}
2544 	}
2545 
2546 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2547 	    lmp_bredr_capable(hdev)) {
2548 		struct hci_cp_write_le_host_supported cp;
2549 
2550 		cp.le = 0x01;
2551 		cp.simul = 0x00;
2552 
2553 		/* Check first if we already have the right
2554 		 * host state (host features set)
2555 		 */
2556 		if (cp.le != lmp_host_le_capable(hdev) ||
2557 		    cp.simul != lmp_host_le_br_capable(hdev))
2558 			hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2559 				    sizeof(cp), &cp);
2560 	}
2561 
2562 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2563 		/* Make sure the controller has a good default for
2564 		 * advertising data. This also applies to the case
2565 		 * where BR/EDR was toggled during the AUTO_OFF phase.
2566 		 */
2567 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2568 		    list_empty(&hdev->adv_instances)) {
2569 			int err;
2570 
2571 			if (ext_adv_capable(hdev)) {
2572 				err = __hci_req_setup_ext_adv_instance(req,
2573 								       0x00);
2574 				if (!err)
2575 					__hci_req_update_scan_rsp_data(req,
2576 								       0x00);
2577 			} else {
2578 				err = 0;
2579 				__hci_req_update_adv_data(req, 0x00);
2580 				__hci_req_update_scan_rsp_data(req, 0x00);
2581 			}
2582 
2583 			if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2584 				if (!ext_adv_capable(hdev))
2585 					__hci_req_enable_advertising(req);
2586 				else if (!err)
2587 					__hci_req_enable_ext_advertising(req,
2588 									 0x00);
2589 			}
2590 		} else if (!list_empty(&hdev->adv_instances)) {
2591 			struct adv_info *adv_instance;
2592 
2593 			adv_instance = list_first_entry(&hdev->adv_instances,
2594 							struct adv_info, list);
2595 			__hci_req_schedule_adv_instance(req,
2596 							adv_instance->instance,
2597 							true);
2598 		}
2599 	}
2600 
2601 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2602 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2603 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2604 			    sizeof(link_sec), &link_sec);
2605 
2606 	if (lmp_bredr_capable(hdev)) {
2607 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2608 			__hci_req_write_fast_connectable(req, true);
2609 		else
2610 			__hci_req_write_fast_connectable(req, false);
2611 		__hci_req_update_scan(req);
2612 		__hci_req_update_class(req);
2613 		__hci_req_update_name(req);
2614 		__hci_req_update_eir(req);
2615 	}
2616 
2617 	hci_dev_unlock(hdev);
2618 	return 0;
2619 }
2620 
__hci_req_hci_power_on(struct hci_dev * hdev)2621 int __hci_req_hci_power_on(struct hci_dev *hdev)
2622 {
2623 	/* Register the available SMP channels (BR/EDR and LE) only when
2624 	 * successfully powering on the controller. This late
2625 	 * registration is required so that LE SMP can clearly decide if
2626 	 * the public address or static address is used.
2627 	 */
2628 	smp_register(hdev);
2629 
2630 	return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2631 			      NULL);
2632 }
2633 
hci_request_setup(struct hci_dev * hdev)2634 void hci_request_setup(struct hci_dev *hdev)
2635 {
2636 	INIT_WORK(&hdev->discov_update, discov_update);
2637 	INIT_WORK(&hdev->scan_update, scan_update_work);
2638 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2639 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2640 	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2641 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2642 	INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
2643 }
2644 
hci_request_cancel_all(struct hci_dev * hdev)2645 void hci_request_cancel_all(struct hci_dev *hdev)
2646 {
2647 	__hci_cmd_sync_cancel(hdev, ENODEV);
2648 
2649 	cancel_work_sync(&hdev->discov_update);
2650 	cancel_work_sync(&hdev->scan_update);
2651 	cancel_delayed_work_sync(&hdev->discov_off);
2652 	cancel_delayed_work_sync(&hdev->le_scan_disable);
2653 	cancel_delayed_work_sync(&hdev->le_scan_restart);
2654 
2655 	if (hdev->adv_instance_timeout) {
2656 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
2657 		hdev->adv_instance_timeout = 0;
2658 	}
2659 
2660 	cancel_interleave_scan(hdev);
2661 }
2662