1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2014 Intel Corporation
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9 
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23 
24 #include <linux/sched/signal.h>
25 
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29 
30 #include "smp.h"
31 #include "hci_request.h"
32 #include "msft.h"
33 #include "eir.h"
34 
hci_req_init(struct hci_request * req,struct hci_dev * hdev)35 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36 {
37 	skb_queue_head_init(&req->cmd_q);
38 	req->hdev = hdev;
39 	req->err = 0;
40 }
41 
hci_req_purge(struct hci_request * req)42 void hci_req_purge(struct hci_request *req)
43 {
44 	skb_queue_purge(&req->cmd_q);
45 }
46 
hci_req_status_pend(struct hci_dev * hdev)47 bool hci_req_status_pend(struct hci_dev *hdev)
48 {
49 	return hdev->req_status == HCI_REQ_PEND;
50 }
51 
req_run(struct hci_request * req,hci_req_complete_t complete,hci_req_complete_skb_t complete_skb)52 static int req_run(struct hci_request *req, hci_req_complete_t complete,
53 		   hci_req_complete_skb_t complete_skb)
54 {
55 	struct hci_dev *hdev = req->hdev;
56 	struct sk_buff *skb;
57 	unsigned long flags;
58 
59 	bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
60 
61 	/* If an error occurred during request building, remove all HCI
62 	 * commands queued on the HCI request queue.
63 	 */
64 	if (req->err) {
65 		skb_queue_purge(&req->cmd_q);
66 		return req->err;
67 	}
68 
69 	/* Do not allow empty requests */
70 	if (skb_queue_empty(&req->cmd_q))
71 		return -ENODATA;
72 
73 	skb = skb_peek_tail(&req->cmd_q);
74 	if (complete) {
75 		bt_cb(skb)->hci.req_complete = complete;
76 	} else if (complete_skb) {
77 		bt_cb(skb)->hci.req_complete_skb = complete_skb;
78 		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
79 	}
80 
81 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
82 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
83 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
84 
85 	queue_work(hdev->workqueue, &hdev->cmd_work);
86 
87 	return 0;
88 }
89 
hci_req_run(struct hci_request * req,hci_req_complete_t complete)90 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
91 {
92 	return req_run(req, complete, NULL);
93 }
94 
hci_req_run_skb(struct hci_request * req,hci_req_complete_skb_t complete)95 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
96 {
97 	return req_run(req, NULL, complete);
98 }
99 
hci_req_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)100 void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
101 			   struct sk_buff *skb)
102 {
103 	bt_dev_dbg(hdev, "result 0x%2.2x", result);
104 
105 	if (hdev->req_status == HCI_REQ_PEND) {
106 		hdev->req_result = result;
107 		hdev->req_status = HCI_REQ_DONE;
108 		if (skb)
109 			hdev->req_skb = skb_get(skb);
110 		wake_up_interruptible(&hdev->req_wait_q);
111 	}
112 }
113 
114 /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,int (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)115 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
116 						     unsigned long opt),
117 		   unsigned long opt, u32 timeout, u8 *hci_status)
118 {
119 	struct hci_request req;
120 	int err = 0;
121 
122 	bt_dev_dbg(hdev, "start");
123 
124 	hci_req_init(&req, hdev);
125 
126 	hdev->req_status = HCI_REQ_PEND;
127 
128 	err = func(&req, opt);
129 	if (err) {
130 		if (hci_status)
131 			*hci_status = HCI_ERROR_UNSPECIFIED;
132 		return err;
133 	}
134 
135 	err = hci_req_run_skb(&req, hci_req_sync_complete);
136 	if (err < 0) {
137 		hdev->req_status = 0;
138 
139 		/* ENODATA means the HCI request command queue is empty.
140 		 * This can happen when a request with conditionals doesn't
141 		 * trigger any commands to be sent. This is normal behavior
142 		 * and should not trigger an error return.
143 		 */
144 		if (err == -ENODATA) {
145 			if (hci_status)
146 				*hci_status = 0;
147 			return 0;
148 		}
149 
150 		if (hci_status)
151 			*hci_status = HCI_ERROR_UNSPECIFIED;
152 
153 		return err;
154 	}
155 
156 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
157 			hdev->req_status != HCI_REQ_PEND, timeout);
158 
159 	if (err == -ERESTARTSYS)
160 		return -EINTR;
161 
162 	switch (hdev->req_status) {
163 	case HCI_REQ_DONE:
164 		err = -bt_to_errno(hdev->req_result);
165 		if (hci_status)
166 			*hci_status = hdev->req_result;
167 		break;
168 
169 	case HCI_REQ_CANCELED:
170 		err = -hdev->req_result;
171 		if (hci_status)
172 			*hci_status = HCI_ERROR_UNSPECIFIED;
173 		break;
174 
175 	default:
176 		err = -ETIMEDOUT;
177 		if (hci_status)
178 			*hci_status = HCI_ERROR_UNSPECIFIED;
179 		break;
180 	}
181 
182 	kfree_skb(hdev->req_skb);
183 	hdev->req_skb = NULL;
184 	hdev->req_status = hdev->req_result = 0;
185 
186 	bt_dev_dbg(hdev, "end: err %d", err);
187 
188 	return err;
189 }
190 
hci_req_sync(struct hci_dev * hdev,int (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)191 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
192 						  unsigned long opt),
193 		 unsigned long opt, u32 timeout, u8 *hci_status)
194 {
195 	int ret;
196 
197 	/* Serialize all requests */
198 	hci_req_sync_lock(hdev);
199 	/* check the state after obtaing the lock to protect the HCI_UP
200 	 * against any races from hci_dev_do_close when the controller
201 	 * gets removed.
202 	 */
203 	if (test_bit(HCI_UP, &hdev->flags))
204 		ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
205 	else
206 		ret = -ENETDOWN;
207 	hci_req_sync_unlock(hdev);
208 
209 	return ret;
210 }
211 
hci_prepare_cmd(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)212 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
213 				const void *param)
214 {
215 	int len = HCI_COMMAND_HDR_SIZE + plen;
216 	struct hci_command_hdr *hdr;
217 	struct sk_buff *skb;
218 
219 	skb = bt_skb_alloc(len, GFP_ATOMIC);
220 	if (!skb)
221 		return NULL;
222 
223 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
224 	hdr->opcode = cpu_to_le16(opcode);
225 	hdr->plen   = plen;
226 
227 	if (plen)
228 		skb_put_data(skb, param, plen);
229 
230 	bt_dev_dbg(hdev, "skb len %d", skb->len);
231 
232 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
233 	hci_skb_opcode(skb) = opcode;
234 
235 	return skb;
236 }
237 
238 /* Queue a command to an asynchronous HCI request */
hci_req_add_ev(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event)239 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
240 		    const void *param, u8 event)
241 {
242 	struct hci_dev *hdev = req->hdev;
243 	struct sk_buff *skb;
244 
245 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
246 
247 	/* If an error occurred during request building, there is no point in
248 	 * queueing the HCI command. We can simply return.
249 	 */
250 	if (req->err)
251 		return;
252 
253 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
254 	if (!skb) {
255 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
256 			   opcode);
257 		req->err = -ENOMEM;
258 		return;
259 	}
260 
261 	if (skb_queue_empty(&req->cmd_q))
262 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
263 
264 	hci_skb_event(skb) = event;
265 
266 	skb_queue_tail(&req->cmd_q, skb);
267 }
268 
hci_req_add(struct hci_request * req,u16 opcode,u32 plen,const void * param)269 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
270 		 const void *param)
271 {
272 	bt_dev_dbg(req->hdev, "HCI_REQ-0x%4.4x", opcode);
273 	hci_req_add_ev(req, opcode, plen, param, 0);
274 }
275 
start_interleave_scan(struct hci_dev * hdev)276 static void start_interleave_scan(struct hci_dev *hdev)
277 {
278 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
279 	queue_delayed_work(hdev->req_workqueue,
280 			   &hdev->interleave_scan, 0);
281 }
282 
is_interleave_scanning(struct hci_dev * hdev)283 static bool is_interleave_scanning(struct hci_dev *hdev)
284 {
285 	return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
286 }
287 
cancel_interleave_scan(struct hci_dev * hdev)288 static void cancel_interleave_scan(struct hci_dev *hdev)
289 {
290 	bt_dev_dbg(hdev, "cancelling interleave scan");
291 
292 	cancel_delayed_work_sync(&hdev->interleave_scan);
293 
294 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
295 }
296 
297 /* Return true if interleave_scan wasn't started until exiting this function,
298  * otherwise, return false
299  */
__hci_update_interleaved_scan(struct hci_dev * hdev)300 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
301 {
302 	/* Do interleaved scan only if all of the following are true:
303 	 * - There is at least one ADV monitor
304 	 * - At least one pending LE connection or one device to be scanned for
305 	 * - Monitor offloading is not supported
306 	 * If so, we should alternate between allowlist scan and one without
307 	 * any filters to save power.
308 	 */
309 	bool use_interleaving = hci_is_adv_monitoring(hdev) &&
310 				!(list_empty(&hdev->pend_le_conns) &&
311 				  list_empty(&hdev->pend_le_reports)) &&
312 				hci_get_adv_monitor_offload_ext(hdev) ==
313 				    HCI_ADV_MONITOR_EXT_NONE;
314 	bool is_interleaving = is_interleave_scanning(hdev);
315 
316 	if (use_interleaving && !is_interleaving) {
317 		start_interleave_scan(hdev);
318 		bt_dev_dbg(hdev, "starting interleave scan");
319 		return true;
320 	}
321 
322 	if (!use_interleaving && is_interleaving)
323 		cancel_interleave_scan(hdev);
324 
325 	return false;
326 }
327 
hci_req_add_le_scan_disable(struct hci_request * req,bool rpa_le_conn)328 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
329 {
330 	struct hci_dev *hdev = req->hdev;
331 
332 	if (hdev->scanning_paused) {
333 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
334 		return;
335 	}
336 
337 	if (use_ext_scan(hdev)) {
338 		struct hci_cp_le_set_ext_scan_enable cp;
339 
340 		memset(&cp, 0, sizeof(cp));
341 		cp.enable = LE_SCAN_DISABLE;
342 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
343 			    &cp);
344 	} else {
345 		struct hci_cp_le_set_scan_enable cp;
346 
347 		memset(&cp, 0, sizeof(cp));
348 		cp.enable = LE_SCAN_DISABLE;
349 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
350 	}
351 
352 	/* Disable address resolution */
353 	if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
354 		__u8 enable = 0x00;
355 
356 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
357 	}
358 }
359 
del_from_accept_list(struct hci_request * req,bdaddr_t * bdaddr,u8 bdaddr_type)360 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
361 				 u8 bdaddr_type)
362 {
363 	struct hci_cp_le_del_from_accept_list cp;
364 
365 	cp.bdaddr_type = bdaddr_type;
366 	bacpy(&cp.bdaddr, bdaddr);
367 
368 	bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
369 		   cp.bdaddr_type);
370 	hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
371 
372 	if (use_ll_privacy(req->hdev)) {
373 		struct smp_irk *irk;
374 
375 		irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
376 		if (irk) {
377 			struct hci_cp_le_del_from_resolv_list cp;
378 
379 			cp.bdaddr_type = bdaddr_type;
380 			bacpy(&cp.bdaddr, bdaddr);
381 
382 			hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
383 				    sizeof(cp), &cp);
384 		}
385 	}
386 }
387 
388 /* Adds connection to accept list if needed. On error, returns -1. */
add_to_accept_list(struct hci_request * req,struct hci_conn_params * params,u8 * num_entries,bool allow_rpa)389 static int add_to_accept_list(struct hci_request *req,
390 			      struct hci_conn_params *params, u8 *num_entries,
391 			      bool allow_rpa)
392 {
393 	struct hci_cp_le_add_to_accept_list cp;
394 	struct hci_dev *hdev = req->hdev;
395 
396 	/* Already in accept list */
397 	if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
398 				   params->addr_type))
399 		return 0;
400 
401 	/* Select filter policy to accept all advertising */
402 	if (*num_entries >= hdev->le_accept_list_size)
403 		return -1;
404 
405 	/* Accept list can not be used with RPAs */
406 	if (!allow_rpa &&
407 	    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
408 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
409 		return -1;
410 	}
411 
412 	/* During suspend, only wakeable devices can be in accept list */
413 	if (hdev->suspended &&
414 	    !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
415 		return 0;
416 
417 	*num_entries += 1;
418 	cp.bdaddr_type = params->addr_type;
419 	bacpy(&cp.bdaddr, &params->addr);
420 
421 	bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
422 		   cp.bdaddr_type);
423 	hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
424 
425 	if (use_ll_privacy(hdev)) {
426 		struct smp_irk *irk;
427 
428 		irk = hci_find_irk_by_addr(hdev, &params->addr,
429 					   params->addr_type);
430 		if (irk) {
431 			struct hci_cp_le_add_to_resolv_list cp;
432 
433 			cp.bdaddr_type = params->addr_type;
434 			bacpy(&cp.bdaddr, &params->addr);
435 			memcpy(cp.peer_irk, irk->val, 16);
436 
437 			if (hci_dev_test_flag(hdev, HCI_PRIVACY))
438 				memcpy(cp.local_irk, hdev->irk, 16);
439 			else
440 				memset(cp.local_irk, 0, 16);
441 
442 			hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
443 				    sizeof(cp), &cp);
444 		}
445 	}
446 
447 	return 0;
448 }
449 
update_accept_list(struct hci_request * req)450 static u8 update_accept_list(struct hci_request *req)
451 {
452 	struct hci_dev *hdev = req->hdev;
453 	struct hci_conn_params *params;
454 	struct bdaddr_list *b;
455 	u8 num_entries = 0;
456 	bool pend_conn, pend_report;
457 	/* We allow usage of accept list even with RPAs in suspend. In the worst
458 	 * case, we won't be able to wake from devices that use the privacy1.2
459 	 * features. Additionally, once we support privacy1.2 and IRK
460 	 * offloading, we can update this to also check for those conditions.
461 	 */
462 	bool allow_rpa = hdev->suspended;
463 
464 	if (use_ll_privacy(hdev))
465 		allow_rpa = true;
466 
467 	/* Go through the current accept list programmed into the
468 	 * controller one by one and check if that address is still
469 	 * in the list of pending connections or list of devices to
470 	 * report. If not present in either list, then queue the
471 	 * command to remove it from the controller.
472 	 */
473 	list_for_each_entry(b, &hdev->le_accept_list, list) {
474 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
475 						      &b->bdaddr,
476 						      b->bdaddr_type);
477 		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
478 							&b->bdaddr,
479 							b->bdaddr_type);
480 
481 		/* If the device is not likely to connect or report,
482 		 * remove it from the accept list.
483 		 */
484 		if (!pend_conn && !pend_report) {
485 			del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
486 			continue;
487 		}
488 
489 		/* Accept list can not be used with RPAs */
490 		if (!allow_rpa &&
491 		    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
492 		    hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
493 			return 0x00;
494 		}
495 
496 		num_entries++;
497 	}
498 
499 	/* Since all no longer valid accept list entries have been
500 	 * removed, walk through the list of pending connections
501 	 * and ensure that any new device gets programmed into
502 	 * the controller.
503 	 *
504 	 * If the list of the devices is larger than the list of
505 	 * available accept list entries in the controller, then
506 	 * just abort and return filer policy value to not use the
507 	 * accept list.
508 	 */
509 	list_for_each_entry(params, &hdev->pend_le_conns, action) {
510 		if (add_to_accept_list(req, params, &num_entries, allow_rpa))
511 			return 0x00;
512 	}
513 
514 	/* After adding all new pending connections, walk through
515 	 * the list of pending reports and also add these to the
516 	 * accept list if there is still space. Abort if space runs out.
517 	 */
518 	list_for_each_entry(params, &hdev->pend_le_reports, action) {
519 		if (add_to_accept_list(req, params, &num_entries, allow_rpa))
520 			return 0x00;
521 	}
522 
523 	/* Use the allowlist unless the following conditions are all true:
524 	 * - We are not currently suspending
525 	 * - There are 1 or more ADV monitors registered and it's not offloaded
526 	 * - Interleaved scanning is not currently using the allowlist
527 	 */
528 	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
529 	    hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
530 	    hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
531 		return 0x00;
532 
533 	/* Select filter policy to use accept list */
534 	return 0x01;
535 }
536 
scan_use_rpa(struct hci_dev * hdev)537 static bool scan_use_rpa(struct hci_dev *hdev)
538 {
539 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
540 }
541 
hci_req_start_scan(struct hci_request * req,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy,bool filter_dup,bool addr_resolv)542 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
543 			       u16 window, u8 own_addr_type, u8 filter_policy,
544 			       bool filter_dup, bool addr_resolv)
545 {
546 	struct hci_dev *hdev = req->hdev;
547 
548 	if (hdev->scanning_paused) {
549 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
550 		return;
551 	}
552 
553 	if (use_ll_privacy(hdev) && addr_resolv) {
554 		u8 enable = 0x01;
555 
556 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
557 	}
558 
559 	/* Use ext scanning if set ext scan param and ext scan enable is
560 	 * supported
561 	 */
562 	if (use_ext_scan(hdev)) {
563 		struct hci_cp_le_set_ext_scan_params *ext_param_cp;
564 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
565 		struct hci_cp_le_scan_phy_params *phy_params;
566 		u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
567 		u32 plen;
568 
569 		ext_param_cp = (void *)data;
570 		phy_params = (void *)ext_param_cp->data;
571 
572 		memset(ext_param_cp, 0, sizeof(*ext_param_cp));
573 		ext_param_cp->own_addr_type = own_addr_type;
574 		ext_param_cp->filter_policy = filter_policy;
575 
576 		plen = sizeof(*ext_param_cp);
577 
578 		if (scan_1m(hdev) || scan_2m(hdev)) {
579 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
580 
581 			memset(phy_params, 0, sizeof(*phy_params));
582 			phy_params->type = type;
583 			phy_params->interval = cpu_to_le16(interval);
584 			phy_params->window = cpu_to_le16(window);
585 
586 			plen += sizeof(*phy_params);
587 			phy_params++;
588 		}
589 
590 		if (scan_coded(hdev)) {
591 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
592 
593 			memset(phy_params, 0, sizeof(*phy_params));
594 			phy_params->type = type;
595 			phy_params->interval = cpu_to_le16(interval);
596 			phy_params->window = cpu_to_le16(window);
597 
598 			plen += sizeof(*phy_params);
599 			phy_params++;
600 		}
601 
602 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
603 			    plen, ext_param_cp);
604 
605 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
606 		ext_enable_cp.enable = LE_SCAN_ENABLE;
607 		ext_enable_cp.filter_dup = filter_dup;
608 
609 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
610 			    sizeof(ext_enable_cp), &ext_enable_cp);
611 	} else {
612 		struct hci_cp_le_set_scan_param param_cp;
613 		struct hci_cp_le_set_scan_enable enable_cp;
614 
615 		memset(&param_cp, 0, sizeof(param_cp));
616 		param_cp.type = type;
617 		param_cp.interval = cpu_to_le16(interval);
618 		param_cp.window = cpu_to_le16(window);
619 		param_cp.own_address_type = own_addr_type;
620 		param_cp.filter_policy = filter_policy;
621 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
622 			    &param_cp);
623 
624 		memset(&enable_cp, 0, sizeof(enable_cp));
625 		enable_cp.enable = LE_SCAN_ENABLE;
626 		enable_cp.filter_dup = filter_dup;
627 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
628 			    &enable_cp);
629 	}
630 }
631 
632 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa);
hci_update_random_address(struct hci_request * req,bool require_privacy,bool use_rpa,u8 * own_addr_type)633 static int hci_update_random_address(struct hci_request *req,
634 				     bool require_privacy, bool use_rpa,
635 				     u8 *own_addr_type)
636 {
637 	struct hci_dev *hdev = req->hdev;
638 	int err;
639 
640 	/* If privacy is enabled use a resolvable private address. If
641 	 * current RPA has expired or there is something else than
642 	 * the current RPA in use, then generate a new one.
643 	 */
644 	if (use_rpa) {
645 		/* If Controller supports LL Privacy use own address type is
646 		 * 0x03
647 		 */
648 		if (use_ll_privacy(hdev))
649 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
650 		else
651 			*own_addr_type = ADDR_LE_DEV_RANDOM;
652 
653 		if (rpa_valid(hdev))
654 			return 0;
655 
656 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
657 		if (err < 0) {
658 			bt_dev_err(hdev, "failed to generate new RPA");
659 			return err;
660 		}
661 
662 		set_random_addr(req, &hdev->rpa);
663 
664 		return 0;
665 	}
666 
667 	/* In case of required privacy without resolvable private address,
668 	 * use an non-resolvable private address. This is useful for active
669 	 * scanning and non-connectable advertising.
670 	 */
671 	if (require_privacy) {
672 		bdaddr_t nrpa;
673 
674 		while (true) {
675 			/* The non-resolvable private address is generated
676 			 * from random six bytes with the two most significant
677 			 * bits cleared.
678 			 */
679 			get_random_bytes(&nrpa, 6);
680 			nrpa.b[5] &= 0x3f;
681 
682 			/* The non-resolvable private address shall not be
683 			 * equal to the public address.
684 			 */
685 			if (bacmp(&hdev->bdaddr, &nrpa))
686 				break;
687 		}
688 
689 		*own_addr_type = ADDR_LE_DEV_RANDOM;
690 		set_random_addr(req, &nrpa);
691 		return 0;
692 	}
693 
694 	/* If forcing static address is in use or there is no public
695 	 * address use the static address as random address (but skip
696 	 * the HCI command if the current random address is already the
697 	 * static one.
698 	 *
699 	 * In case BR/EDR has been disabled on a dual-mode controller
700 	 * and a static address has been configured, then use that
701 	 * address instead of the public BR/EDR address.
702 	 */
703 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
704 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
705 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
706 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
707 		*own_addr_type = ADDR_LE_DEV_RANDOM;
708 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
709 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
710 				    &hdev->static_addr);
711 		return 0;
712 	}
713 
714 	/* Neither privacy nor static address is being used so use a
715 	 * public address.
716 	 */
717 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
718 
719 	return 0;
720 }
721 
722 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
723  * controller based address resolution to be able to reconfigure
724  * resolving list.
725  */
hci_req_add_le_passive_scan(struct hci_request * req)726 void hci_req_add_le_passive_scan(struct hci_request *req)
727 {
728 	struct hci_dev *hdev = req->hdev;
729 	u8 own_addr_type;
730 	u8 filter_policy;
731 	u16 window, interval;
732 	/* Default is to enable duplicates filter */
733 	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
734 	/* Background scanning should run with address resolution */
735 	bool addr_resolv = true;
736 
737 	if (hdev->scanning_paused) {
738 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
739 		return;
740 	}
741 
742 	/* Set require_privacy to false since no SCAN_REQ are send
743 	 * during passive scanning. Not using an non-resolvable address
744 	 * here is important so that peer devices using direct
745 	 * advertising with our address will be correctly reported
746 	 * by the controller.
747 	 */
748 	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
749 				      &own_addr_type))
750 		return;
751 
752 	if (hdev->enable_advmon_interleave_scan &&
753 	    __hci_update_interleaved_scan(hdev))
754 		return;
755 
756 	bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
757 	/* Adding or removing entries from the accept list must
758 	 * happen before enabling scanning. The controller does
759 	 * not allow accept list modification while scanning.
760 	 */
761 	filter_policy = update_accept_list(req);
762 
763 	/* When the controller is using random resolvable addresses and
764 	 * with that having LE privacy enabled, then controllers with
765 	 * Extended Scanner Filter Policies support can now enable support
766 	 * for handling directed advertising.
767 	 *
768 	 * So instead of using filter polices 0x00 (no accept list)
769 	 * and 0x01 (accept list enabled) use the new filter policies
770 	 * 0x02 (no accept list) and 0x03 (accept list enabled).
771 	 */
772 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
773 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
774 		filter_policy |= 0x02;
775 
776 	if (hdev->suspended) {
777 		window = hdev->le_scan_window_suspend;
778 		interval = hdev->le_scan_int_suspend;
779 	} else if (hci_is_le_conn_scanning(hdev)) {
780 		window = hdev->le_scan_window_connect;
781 		interval = hdev->le_scan_int_connect;
782 	} else if (hci_is_adv_monitoring(hdev)) {
783 		window = hdev->le_scan_window_adv_monitor;
784 		interval = hdev->le_scan_int_adv_monitor;
785 
786 		/* Disable duplicates filter when scanning for advertisement
787 		 * monitor for the following reasons.
788 		 *
789 		 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
790 		 * controllers ignore RSSI_Sampling_Period when the duplicates
791 		 * filter is enabled.
792 		 *
793 		 * For SW pattern filtering, when we're not doing interleaved
794 		 * scanning, it is necessary to disable duplicates filter,
795 		 * otherwise hosts can only receive one advertisement and it's
796 		 * impossible to know if a peer is still in range.
797 		 */
798 		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
799 	} else {
800 		window = hdev->le_scan_window;
801 		interval = hdev->le_scan_interval;
802 	}
803 
804 	bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
805 		   filter_policy);
806 	hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
807 			   own_addr_type, filter_policy, filter_dup,
808 			   addr_resolv);
809 }
810 
hci_req_add_le_interleaved_scan(struct hci_request * req,unsigned long opt)811 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
812 					   unsigned long opt)
813 {
814 	struct hci_dev *hdev = req->hdev;
815 	int ret = 0;
816 
817 	hci_dev_lock(hdev);
818 
819 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
820 		hci_req_add_le_scan_disable(req, false);
821 	hci_req_add_le_passive_scan(req);
822 
823 	switch (hdev->interleave_scan_state) {
824 	case INTERLEAVE_SCAN_ALLOWLIST:
825 		bt_dev_dbg(hdev, "next state: allowlist");
826 		hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
827 		break;
828 	case INTERLEAVE_SCAN_NO_FILTER:
829 		bt_dev_dbg(hdev, "next state: no filter");
830 		hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
831 		break;
832 	case INTERLEAVE_SCAN_NONE:
833 		BT_ERR("unexpected error");
834 		ret = -1;
835 	}
836 
837 	hci_dev_unlock(hdev);
838 
839 	return ret;
840 }
841 
interleave_scan_work(struct work_struct * work)842 static void interleave_scan_work(struct work_struct *work)
843 {
844 	struct hci_dev *hdev = container_of(work, struct hci_dev,
845 					    interleave_scan.work);
846 	u8 status;
847 	unsigned long timeout;
848 
849 	if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
850 		timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
851 	} else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
852 		timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
853 	} else {
854 		bt_dev_err(hdev, "unexpected error");
855 		return;
856 	}
857 
858 	hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
859 		     HCI_CMD_TIMEOUT, &status);
860 
861 	/* Don't continue interleaving if it was canceled */
862 	if (is_interleave_scanning(hdev))
863 		queue_delayed_work(hdev->req_workqueue,
864 				   &hdev->interleave_scan, timeout);
865 }
866 
set_random_addr(struct hci_request * req,bdaddr_t * rpa)867 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
868 {
869 	struct hci_dev *hdev = req->hdev;
870 
871 	/* If we're advertising or initiating an LE connection we can't
872 	 * go ahead and change the random address at this time. This is
873 	 * because the eventual initiator address used for the
874 	 * subsequently created connection will be undefined (some
875 	 * controllers use the new address and others the one we had
876 	 * when the operation started).
877 	 *
878 	 * In this kind of scenario skip the update and let the random
879 	 * address be updated at the next cycle.
880 	 */
881 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
882 	    hci_lookup_le_connect(hdev)) {
883 		bt_dev_dbg(hdev, "Deferring random address update");
884 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
885 		return;
886 	}
887 
888 	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
889 }
890 
hci_request_setup(struct hci_dev * hdev)891 void hci_request_setup(struct hci_dev *hdev)
892 {
893 	INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
894 }
895 
hci_request_cancel_all(struct hci_dev * hdev)896 void hci_request_cancel_all(struct hci_dev *hdev)
897 {
898 	__hci_cmd_sync_cancel(hdev, ENODEV);
899 
900 	cancel_interleave_scan(hdev);
901 }
902