1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32 #include "msft.h"
33 #include "eir.h"
34
hci_req_init(struct hci_request * req,struct hci_dev * hdev)35 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36 {
37 skb_queue_head_init(&req->cmd_q);
38 req->hdev = hdev;
39 req->err = 0;
40 }
41
hci_req_purge(struct hci_request * req)42 void hci_req_purge(struct hci_request *req)
43 {
44 skb_queue_purge(&req->cmd_q);
45 }
46
hci_req_status_pend(struct hci_dev * hdev)47 bool hci_req_status_pend(struct hci_dev *hdev)
48 {
49 return hdev->req_status == HCI_REQ_PEND;
50 }
51
req_run(struct hci_request * req,hci_req_complete_t complete,hci_req_complete_skb_t complete_skb)52 static int req_run(struct hci_request *req, hci_req_complete_t complete,
53 hci_req_complete_skb_t complete_skb)
54 {
55 struct hci_dev *hdev = req->hdev;
56 struct sk_buff *skb;
57 unsigned long flags;
58
59 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
60
61 /* If an error occurred during request building, remove all HCI
62 * commands queued on the HCI request queue.
63 */
64 if (req->err) {
65 skb_queue_purge(&req->cmd_q);
66 return req->err;
67 }
68
69 /* Do not allow empty requests */
70 if (skb_queue_empty(&req->cmd_q))
71 return -ENODATA;
72
73 skb = skb_peek_tail(&req->cmd_q);
74 if (complete) {
75 bt_cb(skb)->hci.req_complete = complete;
76 } else if (complete_skb) {
77 bt_cb(skb)->hci.req_complete_skb = complete_skb;
78 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
79 }
80
81 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
82 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
83 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
84
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86
87 return 0;
88 }
89
hci_req_run(struct hci_request * req,hci_req_complete_t complete)90 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
91 {
92 return req_run(req, complete, NULL);
93 }
94
hci_req_run_skb(struct hci_request * req,hci_req_complete_skb_t complete)95 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
96 {
97 return req_run(req, NULL, complete);
98 }
99
hci_req_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)100 void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
101 struct sk_buff *skb)
102 {
103 bt_dev_dbg(hdev, "result 0x%2.2x", result);
104
105 if (hdev->req_status == HCI_REQ_PEND) {
106 hdev->req_result = result;
107 hdev->req_status = HCI_REQ_DONE;
108 if (skb)
109 hdev->req_skb = skb_get(skb);
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112 }
113
114 /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,int (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)115 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
116 unsigned long opt),
117 unsigned long opt, u32 timeout, u8 *hci_status)
118 {
119 struct hci_request req;
120 int err = 0;
121
122 bt_dev_dbg(hdev, "start");
123
124 hci_req_init(&req, hdev);
125
126 hdev->req_status = HCI_REQ_PEND;
127
128 err = func(&req, opt);
129 if (err) {
130 if (hci_status)
131 *hci_status = HCI_ERROR_UNSPECIFIED;
132 return err;
133 }
134
135 err = hci_req_run_skb(&req, hci_req_sync_complete);
136 if (err < 0) {
137 hdev->req_status = 0;
138
139 /* ENODATA means the HCI request command queue is empty.
140 * This can happen when a request with conditionals doesn't
141 * trigger any commands to be sent. This is normal behavior
142 * and should not trigger an error return.
143 */
144 if (err == -ENODATA) {
145 if (hci_status)
146 *hci_status = 0;
147 return 0;
148 }
149
150 if (hci_status)
151 *hci_status = HCI_ERROR_UNSPECIFIED;
152
153 return err;
154 }
155
156 err = wait_event_interruptible_timeout(hdev->req_wait_q,
157 hdev->req_status != HCI_REQ_PEND, timeout);
158
159 if (err == -ERESTARTSYS)
160 return -EINTR;
161
162 switch (hdev->req_status) {
163 case HCI_REQ_DONE:
164 err = -bt_to_errno(hdev->req_result);
165 if (hci_status)
166 *hci_status = hdev->req_result;
167 break;
168
169 case HCI_REQ_CANCELED:
170 err = -hdev->req_result;
171 if (hci_status)
172 *hci_status = HCI_ERROR_UNSPECIFIED;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 if (hci_status)
178 *hci_status = HCI_ERROR_UNSPECIFIED;
179 break;
180 }
181
182 kfree_skb(hdev->req_skb);
183 hdev->req_skb = NULL;
184 hdev->req_status = hdev->req_result = 0;
185
186 bt_dev_dbg(hdev, "end: err %d", err);
187
188 return err;
189 }
190
hci_req_sync(struct hci_dev * hdev,int (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)191 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
192 unsigned long opt),
193 unsigned long opt, u32 timeout, u8 *hci_status)
194 {
195 int ret;
196
197 /* Serialize all requests */
198 hci_req_sync_lock(hdev);
199 /* check the state after obtaing the lock to protect the HCI_UP
200 * against any races from hci_dev_do_close when the controller
201 * gets removed.
202 */
203 if (test_bit(HCI_UP, &hdev->flags))
204 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
205 else
206 ret = -ENETDOWN;
207 hci_req_sync_unlock(hdev);
208
209 return ret;
210 }
211
hci_prepare_cmd(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)212 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
213 const void *param)
214 {
215 int len = HCI_COMMAND_HDR_SIZE + plen;
216 struct hci_command_hdr *hdr;
217 struct sk_buff *skb;
218
219 skb = bt_skb_alloc(len, GFP_ATOMIC);
220 if (!skb)
221 return NULL;
222
223 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
224 hdr->opcode = cpu_to_le16(opcode);
225 hdr->plen = plen;
226
227 if (plen)
228 skb_put_data(skb, param, plen);
229
230 bt_dev_dbg(hdev, "skb len %d", skb->len);
231
232 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
233 hci_skb_opcode(skb) = opcode;
234
235 return skb;
236 }
237
238 /* Queue a command to an asynchronous HCI request */
hci_req_add_ev(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event)239 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
240 const void *param, u8 event)
241 {
242 struct hci_dev *hdev = req->hdev;
243 struct sk_buff *skb;
244
245 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
246
247 /* If an error occurred during request building, there is no point in
248 * queueing the HCI command. We can simply return.
249 */
250 if (req->err)
251 return;
252
253 skb = hci_prepare_cmd(hdev, opcode, plen, param);
254 if (!skb) {
255 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
256 opcode);
257 req->err = -ENOMEM;
258 return;
259 }
260
261 if (skb_queue_empty(&req->cmd_q))
262 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
263
264 hci_skb_event(skb) = event;
265
266 skb_queue_tail(&req->cmd_q, skb);
267 }
268
hci_req_add(struct hci_request * req,u16 opcode,u32 plen,const void * param)269 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
270 const void *param)
271 {
272 bt_dev_dbg(req->hdev, "HCI_REQ-0x%4.4x", opcode);
273 hci_req_add_ev(req, opcode, plen, param, 0);
274 }
275
start_interleave_scan(struct hci_dev * hdev)276 static void start_interleave_scan(struct hci_dev *hdev)
277 {
278 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
279 queue_delayed_work(hdev->req_workqueue,
280 &hdev->interleave_scan, 0);
281 }
282
is_interleave_scanning(struct hci_dev * hdev)283 static bool is_interleave_scanning(struct hci_dev *hdev)
284 {
285 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
286 }
287
cancel_interleave_scan(struct hci_dev * hdev)288 static void cancel_interleave_scan(struct hci_dev *hdev)
289 {
290 bt_dev_dbg(hdev, "cancelling interleave scan");
291
292 cancel_delayed_work_sync(&hdev->interleave_scan);
293
294 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
295 }
296
297 /* Return true if interleave_scan wasn't started until exiting this function,
298 * otherwise, return false
299 */
__hci_update_interleaved_scan(struct hci_dev * hdev)300 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
301 {
302 /* Do interleaved scan only if all of the following are true:
303 * - There is at least one ADV monitor
304 * - At least one pending LE connection or one device to be scanned for
305 * - Monitor offloading is not supported
306 * If so, we should alternate between allowlist scan and one without
307 * any filters to save power.
308 */
309 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
310 !(list_empty(&hdev->pend_le_conns) &&
311 list_empty(&hdev->pend_le_reports)) &&
312 hci_get_adv_monitor_offload_ext(hdev) ==
313 HCI_ADV_MONITOR_EXT_NONE;
314 bool is_interleaving = is_interleave_scanning(hdev);
315
316 if (use_interleaving && !is_interleaving) {
317 start_interleave_scan(hdev);
318 bt_dev_dbg(hdev, "starting interleave scan");
319 return true;
320 }
321
322 if (!use_interleaving && is_interleaving)
323 cancel_interleave_scan(hdev);
324
325 return false;
326 }
327
hci_req_add_le_scan_disable(struct hci_request * req,bool rpa_le_conn)328 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
329 {
330 struct hci_dev *hdev = req->hdev;
331
332 if (hdev->scanning_paused) {
333 bt_dev_dbg(hdev, "Scanning is paused for suspend");
334 return;
335 }
336
337 if (use_ext_scan(hdev)) {
338 struct hci_cp_le_set_ext_scan_enable cp;
339
340 memset(&cp, 0, sizeof(cp));
341 cp.enable = LE_SCAN_DISABLE;
342 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
343 &cp);
344 } else {
345 struct hci_cp_le_set_scan_enable cp;
346
347 memset(&cp, 0, sizeof(cp));
348 cp.enable = LE_SCAN_DISABLE;
349 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
350 }
351
352 /* Disable address resolution */
353 if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
354 __u8 enable = 0x00;
355
356 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
357 }
358 }
359
del_from_accept_list(struct hci_request * req,bdaddr_t * bdaddr,u8 bdaddr_type)360 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
361 u8 bdaddr_type)
362 {
363 struct hci_cp_le_del_from_accept_list cp;
364
365 cp.bdaddr_type = bdaddr_type;
366 bacpy(&cp.bdaddr, bdaddr);
367
368 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
369 cp.bdaddr_type);
370 hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
371
372 if (use_ll_privacy(req->hdev)) {
373 struct smp_irk *irk;
374
375 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
376 if (irk) {
377 struct hci_cp_le_del_from_resolv_list cp;
378
379 cp.bdaddr_type = bdaddr_type;
380 bacpy(&cp.bdaddr, bdaddr);
381
382 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
383 sizeof(cp), &cp);
384 }
385 }
386 }
387
388 /* Adds connection to accept list if needed. On error, returns -1. */
add_to_accept_list(struct hci_request * req,struct hci_conn_params * params,u8 * num_entries,bool allow_rpa)389 static int add_to_accept_list(struct hci_request *req,
390 struct hci_conn_params *params, u8 *num_entries,
391 bool allow_rpa)
392 {
393 struct hci_cp_le_add_to_accept_list cp;
394 struct hci_dev *hdev = req->hdev;
395
396 /* Already in accept list */
397 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr,
398 params->addr_type))
399 return 0;
400
401 /* Select filter policy to accept all advertising */
402 if (*num_entries >= hdev->le_accept_list_size)
403 return -1;
404
405 /* Accept list can not be used with RPAs */
406 if (!allow_rpa &&
407 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
408 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
409 return -1;
410 }
411
412 /* During suspend, only wakeable devices can be in accept list */
413 if (hdev->suspended &&
414 !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
415 return 0;
416
417 *num_entries += 1;
418 cp.bdaddr_type = params->addr_type;
419 bacpy(&cp.bdaddr, ¶ms->addr);
420
421 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
422 cp.bdaddr_type);
423 hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
424
425 if (use_ll_privacy(hdev)) {
426 struct smp_irk *irk;
427
428 irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
429 params->addr_type);
430 if (irk) {
431 struct hci_cp_le_add_to_resolv_list cp;
432
433 cp.bdaddr_type = params->addr_type;
434 bacpy(&cp.bdaddr, ¶ms->addr);
435 memcpy(cp.peer_irk, irk->val, 16);
436
437 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
438 memcpy(cp.local_irk, hdev->irk, 16);
439 else
440 memset(cp.local_irk, 0, 16);
441
442 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
443 sizeof(cp), &cp);
444 }
445 }
446
447 return 0;
448 }
449
update_accept_list(struct hci_request * req)450 static u8 update_accept_list(struct hci_request *req)
451 {
452 struct hci_dev *hdev = req->hdev;
453 struct hci_conn_params *params;
454 struct bdaddr_list *b;
455 u8 num_entries = 0;
456 bool pend_conn, pend_report;
457 /* We allow usage of accept list even with RPAs in suspend. In the worst
458 * case, we won't be able to wake from devices that use the privacy1.2
459 * features. Additionally, once we support privacy1.2 and IRK
460 * offloading, we can update this to also check for those conditions.
461 */
462 bool allow_rpa = hdev->suspended;
463
464 if (use_ll_privacy(hdev))
465 allow_rpa = true;
466
467 /* Go through the current accept list programmed into the
468 * controller one by one and check if that address is still
469 * in the list of pending connections or list of devices to
470 * report. If not present in either list, then queue the
471 * command to remove it from the controller.
472 */
473 list_for_each_entry(b, &hdev->le_accept_list, list) {
474 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
475 &b->bdaddr,
476 b->bdaddr_type);
477 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
478 &b->bdaddr,
479 b->bdaddr_type);
480
481 /* If the device is not likely to connect or report,
482 * remove it from the accept list.
483 */
484 if (!pend_conn && !pend_report) {
485 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
486 continue;
487 }
488
489 /* Accept list can not be used with RPAs */
490 if (!allow_rpa &&
491 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
492 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
493 return 0x00;
494 }
495
496 num_entries++;
497 }
498
499 /* Since all no longer valid accept list entries have been
500 * removed, walk through the list of pending connections
501 * and ensure that any new device gets programmed into
502 * the controller.
503 *
504 * If the list of the devices is larger than the list of
505 * available accept list entries in the controller, then
506 * just abort and return filer policy value to not use the
507 * accept list.
508 */
509 list_for_each_entry(params, &hdev->pend_le_conns, action) {
510 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
511 return 0x00;
512 }
513
514 /* After adding all new pending connections, walk through
515 * the list of pending reports and also add these to the
516 * accept list if there is still space. Abort if space runs out.
517 */
518 list_for_each_entry(params, &hdev->pend_le_reports, action) {
519 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
520 return 0x00;
521 }
522
523 /* Use the allowlist unless the following conditions are all true:
524 * - We are not currently suspending
525 * - There are 1 or more ADV monitors registered and it's not offloaded
526 * - Interleaved scanning is not currently using the allowlist
527 */
528 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
529 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
530 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
531 return 0x00;
532
533 /* Select filter policy to use accept list */
534 return 0x01;
535 }
536
scan_use_rpa(struct hci_dev * hdev)537 static bool scan_use_rpa(struct hci_dev *hdev)
538 {
539 return hci_dev_test_flag(hdev, HCI_PRIVACY);
540 }
541
hci_req_start_scan(struct hci_request * req,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy,bool filter_dup,bool addr_resolv)542 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
543 u16 window, u8 own_addr_type, u8 filter_policy,
544 bool filter_dup, bool addr_resolv)
545 {
546 struct hci_dev *hdev = req->hdev;
547
548 if (hdev->scanning_paused) {
549 bt_dev_dbg(hdev, "Scanning is paused for suspend");
550 return;
551 }
552
553 if (use_ll_privacy(hdev) && addr_resolv) {
554 u8 enable = 0x01;
555
556 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
557 }
558
559 /* Use ext scanning if set ext scan param and ext scan enable is
560 * supported
561 */
562 if (use_ext_scan(hdev)) {
563 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
564 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
565 struct hci_cp_le_scan_phy_params *phy_params;
566 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
567 u32 plen;
568
569 ext_param_cp = (void *)data;
570 phy_params = (void *)ext_param_cp->data;
571
572 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
573 ext_param_cp->own_addr_type = own_addr_type;
574 ext_param_cp->filter_policy = filter_policy;
575
576 plen = sizeof(*ext_param_cp);
577
578 if (scan_1m(hdev) || scan_2m(hdev)) {
579 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
580
581 memset(phy_params, 0, sizeof(*phy_params));
582 phy_params->type = type;
583 phy_params->interval = cpu_to_le16(interval);
584 phy_params->window = cpu_to_le16(window);
585
586 plen += sizeof(*phy_params);
587 phy_params++;
588 }
589
590 if (scan_coded(hdev)) {
591 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
592
593 memset(phy_params, 0, sizeof(*phy_params));
594 phy_params->type = type;
595 phy_params->interval = cpu_to_le16(interval);
596 phy_params->window = cpu_to_le16(window);
597
598 plen += sizeof(*phy_params);
599 phy_params++;
600 }
601
602 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
603 plen, ext_param_cp);
604
605 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
606 ext_enable_cp.enable = LE_SCAN_ENABLE;
607 ext_enable_cp.filter_dup = filter_dup;
608
609 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
610 sizeof(ext_enable_cp), &ext_enable_cp);
611 } else {
612 struct hci_cp_le_set_scan_param param_cp;
613 struct hci_cp_le_set_scan_enable enable_cp;
614
615 memset(¶m_cp, 0, sizeof(param_cp));
616 param_cp.type = type;
617 param_cp.interval = cpu_to_le16(interval);
618 param_cp.window = cpu_to_le16(window);
619 param_cp.own_address_type = own_addr_type;
620 param_cp.filter_policy = filter_policy;
621 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
622 ¶m_cp);
623
624 memset(&enable_cp, 0, sizeof(enable_cp));
625 enable_cp.enable = LE_SCAN_ENABLE;
626 enable_cp.filter_dup = filter_dup;
627 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
628 &enable_cp);
629 }
630 }
631
632 /* Returns true if an le connection is in the scanning state */
hci_is_le_conn_scanning(struct hci_dev * hdev)633 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
634 {
635 struct hci_conn_hash *h = &hdev->conn_hash;
636 struct hci_conn *c;
637
638 rcu_read_lock();
639
640 list_for_each_entry_rcu(c, &h->list, list) {
641 if (c->type == LE_LINK && c->state == BT_CONNECT &&
642 test_bit(HCI_CONN_SCANNING, &c->flags)) {
643 rcu_read_unlock();
644 return true;
645 }
646 }
647
648 rcu_read_unlock();
649
650 return false;
651 }
652
653 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa);
hci_update_random_address(struct hci_request * req,bool require_privacy,bool use_rpa,u8 * own_addr_type)654 static int hci_update_random_address(struct hci_request *req,
655 bool require_privacy, bool use_rpa,
656 u8 *own_addr_type)
657 {
658 struct hci_dev *hdev = req->hdev;
659 int err;
660
661 /* If privacy is enabled use a resolvable private address. If
662 * current RPA has expired or there is something else than
663 * the current RPA in use, then generate a new one.
664 */
665 if (use_rpa) {
666 /* If Controller supports LL Privacy use own address type is
667 * 0x03
668 */
669 if (use_ll_privacy(hdev))
670 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
671 else
672 *own_addr_type = ADDR_LE_DEV_RANDOM;
673
674 if (rpa_valid(hdev))
675 return 0;
676
677 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
678 if (err < 0) {
679 bt_dev_err(hdev, "failed to generate new RPA");
680 return err;
681 }
682
683 set_random_addr(req, &hdev->rpa);
684
685 return 0;
686 }
687
688 /* In case of required privacy without resolvable private address,
689 * use an non-resolvable private address. This is useful for active
690 * scanning and non-connectable advertising.
691 */
692 if (require_privacy) {
693 bdaddr_t nrpa;
694
695 while (true) {
696 /* The non-resolvable private address is generated
697 * from random six bytes with the two most significant
698 * bits cleared.
699 */
700 get_random_bytes(&nrpa, 6);
701 nrpa.b[5] &= 0x3f;
702
703 /* The non-resolvable private address shall not be
704 * equal to the public address.
705 */
706 if (bacmp(&hdev->bdaddr, &nrpa))
707 break;
708 }
709
710 *own_addr_type = ADDR_LE_DEV_RANDOM;
711 set_random_addr(req, &nrpa);
712 return 0;
713 }
714
715 /* If forcing static address is in use or there is no public
716 * address use the static address as random address (but skip
717 * the HCI command if the current random address is already the
718 * static one.
719 *
720 * In case BR/EDR has been disabled on a dual-mode controller
721 * and a static address has been configured, then use that
722 * address instead of the public BR/EDR address.
723 */
724 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
725 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
726 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
727 bacmp(&hdev->static_addr, BDADDR_ANY))) {
728 *own_addr_type = ADDR_LE_DEV_RANDOM;
729 if (bacmp(&hdev->static_addr, &hdev->random_addr))
730 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
731 &hdev->static_addr);
732 return 0;
733 }
734
735 /* Neither privacy nor static address is being used so use a
736 * public address.
737 */
738 *own_addr_type = ADDR_LE_DEV_PUBLIC;
739
740 return 0;
741 }
742
743 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
744 * controller based address resolution to be able to reconfigure
745 * resolving list.
746 */
hci_req_add_le_passive_scan(struct hci_request * req)747 void hci_req_add_le_passive_scan(struct hci_request *req)
748 {
749 struct hci_dev *hdev = req->hdev;
750 u8 own_addr_type;
751 u8 filter_policy;
752 u16 window, interval;
753 /* Default is to enable duplicates filter */
754 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
755 /* Background scanning should run with address resolution */
756 bool addr_resolv = true;
757
758 if (hdev->scanning_paused) {
759 bt_dev_dbg(hdev, "Scanning is paused for suspend");
760 return;
761 }
762
763 /* Set require_privacy to false since no SCAN_REQ are send
764 * during passive scanning. Not using an non-resolvable address
765 * here is important so that peer devices using direct
766 * advertising with our address will be correctly reported
767 * by the controller.
768 */
769 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
770 &own_addr_type))
771 return;
772
773 if (hdev->enable_advmon_interleave_scan &&
774 __hci_update_interleaved_scan(hdev))
775 return;
776
777 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
778 /* Adding or removing entries from the accept list must
779 * happen before enabling scanning. The controller does
780 * not allow accept list modification while scanning.
781 */
782 filter_policy = update_accept_list(req);
783
784 /* When the controller is using random resolvable addresses and
785 * with that having LE privacy enabled, then controllers with
786 * Extended Scanner Filter Policies support can now enable support
787 * for handling directed advertising.
788 *
789 * So instead of using filter polices 0x00 (no accept list)
790 * and 0x01 (accept list enabled) use the new filter policies
791 * 0x02 (no accept list) and 0x03 (accept list enabled).
792 */
793 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
794 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
795 filter_policy |= 0x02;
796
797 if (hdev->suspended) {
798 window = hdev->le_scan_window_suspend;
799 interval = hdev->le_scan_int_suspend;
800 } else if (hci_is_le_conn_scanning(hdev)) {
801 window = hdev->le_scan_window_connect;
802 interval = hdev->le_scan_int_connect;
803 } else if (hci_is_adv_monitoring(hdev)) {
804 window = hdev->le_scan_window_adv_monitor;
805 interval = hdev->le_scan_int_adv_monitor;
806
807 /* Disable duplicates filter when scanning for advertisement
808 * monitor for the following reasons.
809 *
810 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
811 * controllers ignore RSSI_Sampling_Period when the duplicates
812 * filter is enabled.
813 *
814 * For SW pattern filtering, when we're not doing interleaved
815 * scanning, it is necessary to disable duplicates filter,
816 * otherwise hosts can only receive one advertisement and it's
817 * impossible to know if a peer is still in range.
818 */
819 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
820 } else {
821 window = hdev->le_scan_window;
822 interval = hdev->le_scan_interval;
823 }
824
825 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
826 filter_policy);
827 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
828 own_addr_type, filter_policy, filter_dup,
829 addr_resolv);
830 }
831
hci_req_add_le_interleaved_scan(struct hci_request * req,unsigned long opt)832 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
833 unsigned long opt)
834 {
835 struct hci_dev *hdev = req->hdev;
836 int ret = 0;
837
838 hci_dev_lock(hdev);
839
840 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
841 hci_req_add_le_scan_disable(req, false);
842 hci_req_add_le_passive_scan(req);
843
844 switch (hdev->interleave_scan_state) {
845 case INTERLEAVE_SCAN_ALLOWLIST:
846 bt_dev_dbg(hdev, "next state: allowlist");
847 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
848 break;
849 case INTERLEAVE_SCAN_NO_FILTER:
850 bt_dev_dbg(hdev, "next state: no filter");
851 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
852 break;
853 case INTERLEAVE_SCAN_NONE:
854 BT_ERR("unexpected error");
855 ret = -1;
856 }
857
858 hci_dev_unlock(hdev);
859
860 return ret;
861 }
862
interleave_scan_work(struct work_struct * work)863 static void interleave_scan_work(struct work_struct *work)
864 {
865 struct hci_dev *hdev = container_of(work, struct hci_dev,
866 interleave_scan.work);
867 u8 status;
868 unsigned long timeout;
869
870 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
871 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
872 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
873 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
874 } else {
875 bt_dev_err(hdev, "unexpected error");
876 return;
877 }
878
879 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
880 HCI_CMD_TIMEOUT, &status);
881
882 /* Don't continue interleaving if it was canceled */
883 if (is_interleave_scanning(hdev))
884 queue_delayed_work(hdev->req_workqueue,
885 &hdev->interleave_scan, timeout);
886 }
887
set_random_addr(struct hci_request * req,bdaddr_t * rpa)888 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
889 {
890 struct hci_dev *hdev = req->hdev;
891
892 /* If we're advertising or initiating an LE connection we can't
893 * go ahead and change the random address at this time. This is
894 * because the eventual initiator address used for the
895 * subsequently created connection will be undefined (some
896 * controllers use the new address and others the one we had
897 * when the operation started).
898 *
899 * In this kind of scenario skip the update and let the random
900 * address be updated at the next cycle.
901 */
902 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
903 hci_lookup_le_connect(hdev)) {
904 bt_dev_dbg(hdev, "Deferring random address update");
905 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
906 return;
907 }
908
909 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
910 }
911
hci_request_setup(struct hci_dev * hdev)912 void hci_request_setup(struct hci_dev *hdev)
913 {
914 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
915 }
916
hci_request_cancel_all(struct hci_dev * hdev)917 void hci_request_cancel_all(struct hci_dev *hdev)
918 {
919 __hci_cmd_sync_cancel(hdev, ENODEV);
920
921 cancel_interleave_scan(hdev);
922 }
923