1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
46
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
49 MGMT_OP_READ_INFO,
50 MGMT_OP_SET_POWERED,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
54 MGMT_OP_SET_BONDABLE,
55 MGMT_OP_SET_LINK_SECURITY,
56 MGMT_OP_SET_SSP,
57 MGMT_OP_SET_HS,
58 MGMT_OP_SET_LE,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
61 MGMT_OP_ADD_UUID,
62 MGMT_OP_REMOVE_UUID,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
65 MGMT_OP_DISCONNECT,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
70 MGMT_OP_PAIR_DEVICE,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
82 MGMT_OP_CONFIRM_NAME,
83 MGMT_OP_BLOCK_DEVICE,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
87 MGMT_OP_SET_BREDR,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
92 MGMT_OP_SET_PRIVACY,
93 MGMT_OP_LOAD_IRKS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
96 MGMT_OP_ADD_DEVICE,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
134 MGMT_OP_MESH_SEND,
135 MGMT_OP_MESH_SEND_CANCEL,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (d->dev_type == HCI_PRIMARY &&
447 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
448 count++;
449 }
450
451 rp_len = sizeof(*rp) + (2 * count);
452 rp = kmalloc(rp_len, GFP_ATOMIC);
453 if (!rp) {
454 read_unlock(&hci_dev_list_lock);
455 return -ENOMEM;
456 }
457
458 count = 0;
459 list_for_each_entry(d, &hci_dev_list, list) {
460 if (hci_dev_test_flag(d, HCI_SETUP) ||
461 hci_dev_test_flag(d, HCI_CONFIG) ||
462 hci_dev_test_flag(d, HCI_USER_CHANNEL))
463 continue;
464
465 /* Devices marked as raw-only are neither configured
466 * nor unconfigured controllers.
467 */
468 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
469 continue;
470
471 if (d->dev_type == HCI_PRIMARY &&
472 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 rp->index[count++] = cpu_to_le16(d->id);
474 bt_dev_dbg(hdev, "Added hci%u", d->id);
475 }
476 }
477
478 rp->num_controllers = cpu_to_le16(count);
479 rp_len = sizeof(*rp) + (2 * count);
480
481 read_unlock(&hci_dev_list_lock);
482
483 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
484 0, rp, rp_len);
485
486 kfree(rp);
487
488 return err;
489 }
490
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 void *data, u16 data_len)
493 {
494 struct mgmt_rp_read_unconf_index_list *rp;
495 struct hci_dev *d;
496 size_t rp_len;
497 u16 count;
498 int err;
499
500 bt_dev_dbg(hdev, "sock %p", sk);
501
502 read_lock(&hci_dev_list_lock);
503
504 count = 0;
505 list_for_each_entry(d, &hci_dev_list, list) {
506 if (d->dev_type == HCI_PRIMARY &&
507 hci_dev_test_flag(d, HCI_UNCONFIGURED))
508 count++;
509 }
510
511 rp_len = sizeof(*rp) + (2 * count);
512 rp = kmalloc(rp_len, GFP_ATOMIC);
513 if (!rp) {
514 read_unlock(&hci_dev_list_lock);
515 return -ENOMEM;
516 }
517
518 count = 0;
519 list_for_each_entry(d, &hci_dev_list, list) {
520 if (hci_dev_test_flag(d, HCI_SETUP) ||
521 hci_dev_test_flag(d, HCI_CONFIG) ||
522 hci_dev_test_flag(d, HCI_USER_CHANNEL))
523 continue;
524
525 /* Devices marked as raw-only are neither configured
526 * nor unconfigured controllers.
527 */
528 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
529 continue;
530
531 if (d->dev_type == HCI_PRIMARY &&
532 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 rp->index[count++] = cpu_to_le16(d->id);
534 bt_dev_dbg(hdev, "Added hci%u", d->id);
535 }
536 }
537
538 rp->num_controllers = cpu_to_le16(count);
539 rp_len = sizeof(*rp) + (2 * count);
540
541 read_unlock(&hci_dev_list_lock);
542
543 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
545
546 kfree(rp);
547
548 return err;
549 }
550
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 void *data, u16 data_len)
553 {
554 struct mgmt_rp_read_ext_index_list *rp;
555 struct hci_dev *d;
556 u16 count;
557 int err;
558
559 bt_dev_dbg(hdev, "sock %p", sk);
560
561 read_lock(&hci_dev_list_lock);
562
563 count = 0;
564 list_for_each_entry(d, &hci_dev_list, list) {
565 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
566 count++;
567 }
568
569 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
570 if (!rp) {
571 read_unlock(&hci_dev_list_lock);
572 return -ENOMEM;
573 }
574
575 count = 0;
576 list_for_each_entry(d, &hci_dev_list, list) {
577 if (hci_dev_test_flag(d, HCI_SETUP) ||
578 hci_dev_test_flag(d, HCI_CONFIG) ||
579 hci_dev_test_flag(d, HCI_USER_CHANNEL))
580 continue;
581
582 /* Devices marked as raw-only are neither configured
583 * nor unconfigured controllers.
584 */
585 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
586 continue;
587
588 if (d->dev_type == HCI_PRIMARY) {
589 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 rp->entry[count].type = 0x01;
591 else
592 rp->entry[count].type = 0x00;
593 } else if (d->dev_type == HCI_AMP) {
594 rp->entry[count].type = 0x02;
595 } else {
596 continue;
597 }
598
599 rp->entry[count].bus = d->bus;
600 rp->entry[count++].index = cpu_to_le16(d->id);
601 bt_dev_dbg(hdev, "Added hci%u", d->id);
602 }
603
604 rp->num_controllers = cpu_to_le16(count);
605
606 read_unlock(&hci_dev_list_lock);
607
608 /* If this command is called at least once, then all the
609 * default index and unconfigured index events are disabled
610 * and from now on only extended index events are used.
611 */
612 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
615
616 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618 struct_size(rp, entry, count));
619
620 kfree(rp);
621
622 return err;
623 }
624
is_configured(struct hci_dev * hdev)625 static bool is_configured(struct hci_dev *hdev)
626 {
627 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
629 return false;
630
631 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 !bacmp(&hdev->public_addr, BDADDR_ANY))
634 return false;
635
636 return true;
637 }
638
get_missing_options(struct hci_dev * hdev)639 static __le32 get_missing_options(struct hci_dev *hdev)
640 {
641 u32 options = 0;
642
643 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 options |= MGMT_OPTION_EXTERNAL_CONFIG;
646
647 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 !bacmp(&hdev->public_addr, BDADDR_ANY))
650 options |= MGMT_OPTION_PUBLIC_ADDRESS;
651
652 return cpu_to_le32(options);
653 }
654
new_options(struct hci_dev * hdev,struct sock * skip)655 static int new_options(struct hci_dev *hdev, struct sock *skip)
656 {
657 __le32 options = get_missing_options(hdev);
658
659 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
661 }
662
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
664 {
665 __le32 options = get_missing_options(hdev);
666
667 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
668 sizeof(options));
669 }
670
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 void *data, u16 data_len)
673 {
674 struct mgmt_rp_read_config_info rp;
675 u32 options = 0;
676
677 bt_dev_dbg(hdev, "sock %p", sk);
678
679 hci_dev_lock(hdev);
680
681 memset(&rp, 0, sizeof(rp));
682 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
683
684 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 options |= MGMT_OPTION_EXTERNAL_CONFIG;
686
687 if (hdev->set_bdaddr)
688 options |= MGMT_OPTION_PUBLIC_ADDRESS;
689
690 rp.supported_options = cpu_to_le32(options);
691 rp.missing_options = get_missing_options(hdev);
692
693 hci_dev_unlock(hdev);
694
695 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
696 &rp, sizeof(rp));
697 }
698
get_supported_phys(struct hci_dev * hdev)699 static u32 get_supported_phys(struct hci_dev *hdev)
700 {
701 u32 supported_phys = 0;
702
703 if (lmp_bredr_capable(hdev)) {
704 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
705
706 if (hdev->features[0][0] & LMP_3SLOT)
707 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
708
709 if (hdev->features[0][0] & LMP_5SLOT)
710 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
711
712 if (lmp_edr_2m_capable(hdev)) {
713 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
714
715 if (lmp_edr_3slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
717
718 if (lmp_edr_5slot_capable(hdev))
719 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
720
721 if (lmp_edr_3m_capable(hdev)) {
722 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
723
724 if (lmp_edr_3slot_capable(hdev))
725 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
726
727 if (lmp_edr_5slot_capable(hdev))
728 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729 }
730 }
731 }
732
733 if (lmp_le_capable(hdev)) {
734 supported_phys |= MGMT_PHY_LE_1M_TX;
735 supported_phys |= MGMT_PHY_LE_1M_RX;
736
737 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 supported_phys |= MGMT_PHY_LE_2M_TX;
739 supported_phys |= MGMT_PHY_LE_2M_RX;
740 }
741
742 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 supported_phys |= MGMT_PHY_LE_CODED_TX;
744 supported_phys |= MGMT_PHY_LE_CODED_RX;
745 }
746 }
747
748 return supported_phys;
749 }
750
get_selected_phys(struct hci_dev * hdev)751 static u32 get_selected_phys(struct hci_dev *hdev)
752 {
753 u32 selected_phys = 0;
754
755 if (lmp_bredr_capable(hdev)) {
756 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
757
758 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
760
761 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
763
764 if (lmp_edr_2m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_2DH1))
766 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_2DH3))
770 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH5))
774 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
775
776 if (lmp_edr_3m_capable(hdev)) {
777 if (!(hdev->pkt_type & HCI_3DH1))
778 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
779
780 if (lmp_edr_3slot_capable(hdev) &&
781 !(hdev->pkt_type & HCI_3DH3))
782 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
783
784 if (lmp_edr_5slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH5))
786 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787 }
788 }
789 }
790
791 if (lmp_le_capable(hdev)) {
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 selected_phys |= MGMT_PHY_LE_1M_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 selected_phys |= MGMT_PHY_LE_1M_RX;
797
798 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 selected_phys |= MGMT_PHY_LE_2M_TX;
800
801 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 selected_phys |= MGMT_PHY_LE_2M_RX;
803
804 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 selected_phys |= MGMT_PHY_LE_CODED_TX;
806
807 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 selected_phys |= MGMT_PHY_LE_CODED_RX;
809 }
810
811 return selected_phys;
812 }
813
get_configurable_phys(struct hci_dev * hdev)814 static u32 get_configurable_phys(struct hci_dev *hdev)
815 {
816 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
818 }
819
get_supported_settings(struct hci_dev * hdev)820 static u32 get_supported_settings(struct hci_dev *hdev)
821 {
822 u32 settings = 0;
823
824 settings |= MGMT_SETTING_POWERED;
825 settings |= MGMT_SETTING_BONDABLE;
826 settings |= MGMT_SETTING_DEBUG_KEYS;
827 settings |= MGMT_SETTING_CONNECTABLE;
828 settings |= MGMT_SETTING_DISCOVERABLE;
829
830 if (lmp_bredr_capable(hdev)) {
831 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 settings |= MGMT_SETTING_BREDR;
834 settings |= MGMT_SETTING_LINK_SECURITY;
835
836 if (lmp_ssp_capable(hdev)) {
837 settings |= MGMT_SETTING_SSP;
838 if (IS_ENABLED(CONFIG_BT_HS))
839 settings |= MGMT_SETTING_HS;
840 }
841
842 if (lmp_sc_capable(hdev))
843 settings |= MGMT_SETTING_SECURE_CONN;
844
845 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
846 &hdev->quirks))
847 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
848 }
849
850 if (lmp_le_capable(hdev)) {
851 settings |= MGMT_SETTING_LE;
852 settings |= MGMT_SETTING_SECURE_CONN;
853 settings |= MGMT_SETTING_PRIVACY;
854 settings |= MGMT_SETTING_STATIC_ADDRESS;
855 settings |= MGMT_SETTING_ADVERTISING;
856 }
857
858 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
859 hdev->set_bdaddr)
860 settings |= MGMT_SETTING_CONFIGURATION;
861
862 if (cis_central_capable(hdev))
863 settings |= MGMT_SETTING_CIS_CENTRAL;
864
865 if (cis_peripheral_capable(hdev))
866 settings |= MGMT_SETTING_CIS_PERIPHERAL;
867
868 settings |= MGMT_SETTING_PHY_CONFIGURATION;
869
870 return settings;
871 }
872
get_current_settings(struct hci_dev * hdev)873 static u32 get_current_settings(struct hci_dev *hdev)
874 {
875 u32 settings = 0;
876
877 if (hdev_is_powered(hdev))
878 settings |= MGMT_SETTING_POWERED;
879
880 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
881 settings |= MGMT_SETTING_CONNECTABLE;
882
883 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
884 settings |= MGMT_SETTING_FAST_CONNECTABLE;
885
886 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
887 settings |= MGMT_SETTING_DISCOVERABLE;
888
889 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
890 settings |= MGMT_SETTING_BONDABLE;
891
892 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
893 settings |= MGMT_SETTING_BREDR;
894
895 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
896 settings |= MGMT_SETTING_LE;
897
898 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
899 settings |= MGMT_SETTING_LINK_SECURITY;
900
901 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
902 settings |= MGMT_SETTING_SSP;
903
904 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
905 settings |= MGMT_SETTING_HS;
906
907 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
908 settings |= MGMT_SETTING_ADVERTISING;
909
910 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
911 settings |= MGMT_SETTING_SECURE_CONN;
912
913 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
914 settings |= MGMT_SETTING_DEBUG_KEYS;
915
916 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
917 settings |= MGMT_SETTING_PRIVACY;
918
919 /* The current setting for static address has two purposes. The
920 * first is to indicate if the static address will be used and
921 * the second is to indicate if it is actually set.
922 *
923 * This means if the static address is not configured, this flag
924 * will never be set. If the address is configured, then if the
925 * address is actually used decides if the flag is set or not.
926 *
927 * For single mode LE only controllers and dual-mode controllers
928 * with BR/EDR disabled, the existence of the static address will
929 * be evaluated.
930 */
931 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
932 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
933 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
934 if (bacmp(&hdev->static_addr, BDADDR_ANY))
935 settings |= MGMT_SETTING_STATIC_ADDRESS;
936 }
937
938 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
939 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
940
941 if (cis_central_capable(hdev))
942 settings |= MGMT_SETTING_CIS_CENTRAL;
943
944 if (cis_peripheral_capable(hdev))
945 settings |= MGMT_SETTING_CIS_PERIPHERAL;
946
947 if (bis_capable(hdev))
948 settings |= MGMT_SETTING_ISO_BROADCASTER;
949
950 if (sync_recv_capable(hdev))
951 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
952
953 return settings;
954 }
955
pending_find(u16 opcode,struct hci_dev * hdev)956 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
957 {
958 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
959 }
960
mgmt_get_adv_discov_flags(struct hci_dev * hdev)961 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
962 {
963 struct mgmt_pending_cmd *cmd;
964
965 /* If there's a pending mgmt command the flags will not yet have
966 * their final values, so check for this first.
967 */
968 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
969 if (cmd) {
970 struct mgmt_mode *cp = cmd->param;
971 if (cp->val == 0x01)
972 return LE_AD_GENERAL;
973 else if (cp->val == 0x02)
974 return LE_AD_LIMITED;
975 } else {
976 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
977 return LE_AD_LIMITED;
978 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
979 return LE_AD_GENERAL;
980 }
981
982 return 0;
983 }
984
mgmt_get_connectable(struct hci_dev * hdev)985 bool mgmt_get_connectable(struct hci_dev *hdev)
986 {
987 struct mgmt_pending_cmd *cmd;
988
989 /* If there's a pending mgmt command the flag will not yet have
990 * it's final value, so check for this first.
991 */
992 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
993 if (cmd) {
994 struct mgmt_mode *cp = cmd->param;
995
996 return cp->val;
997 }
998
999 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1000 }
1001
service_cache_sync(struct hci_dev * hdev,void * data)1002 static int service_cache_sync(struct hci_dev *hdev, void *data)
1003 {
1004 hci_update_eir_sync(hdev);
1005 hci_update_class_sync(hdev);
1006
1007 return 0;
1008 }
1009
service_cache_off(struct work_struct * work)1010 static void service_cache_off(struct work_struct *work)
1011 {
1012 struct hci_dev *hdev = container_of(work, struct hci_dev,
1013 service_cache.work);
1014
1015 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1016 return;
1017
1018 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1019 }
1020
rpa_expired_sync(struct hci_dev * hdev,void * data)1021 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1022 {
1023 /* The generation of a new RPA and programming it into the
1024 * controller happens in the hci_req_enable_advertising()
1025 * function.
1026 */
1027 if (ext_adv_capable(hdev))
1028 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1029 else
1030 return hci_enable_advertising_sync(hdev);
1031 }
1032
rpa_expired(struct work_struct * work)1033 static void rpa_expired(struct work_struct *work)
1034 {
1035 struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 rpa_expired.work);
1037
1038 bt_dev_dbg(hdev, "");
1039
1040 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1041
1042 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1043 return;
1044
1045 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1046 }
1047
discov_off(struct work_struct * work)1048 static void discov_off(struct work_struct *work)
1049 {
1050 struct hci_dev *hdev = container_of(work, struct hci_dev,
1051 discov_off.work);
1052
1053 bt_dev_dbg(hdev, "");
1054
1055 hci_dev_lock(hdev);
1056
1057 /* When discoverable timeout triggers, then just make sure
1058 * the limited discoverable flag is cleared. Even in the case
1059 * of a timeout triggered from general discoverable, it is
1060 * safe to unconditionally clear the flag.
1061 */
1062 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1063 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1064 hdev->discov_timeout = 0;
1065
1066 hci_update_discoverable(hdev);
1067
1068 mgmt_new_settings(hdev);
1069
1070 hci_dev_unlock(hdev);
1071 }
1072
1073 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1074
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1075 static void mesh_send_complete(struct hci_dev *hdev,
1076 struct mgmt_mesh_tx *mesh_tx, bool silent)
1077 {
1078 u8 handle = mesh_tx->handle;
1079
1080 if (!silent)
1081 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1082 sizeof(handle), NULL);
1083
1084 mgmt_mesh_remove(mesh_tx);
1085 }
1086
mesh_send_done_sync(struct hci_dev * hdev,void * data)1087 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1088 {
1089 struct mgmt_mesh_tx *mesh_tx;
1090
1091 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1092 hci_disable_advertising_sync(hdev);
1093 mesh_tx = mgmt_mesh_next(hdev, NULL);
1094
1095 if (mesh_tx)
1096 mesh_send_complete(hdev, mesh_tx, false);
1097
1098 return 0;
1099 }
1100
1101 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1102 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1103 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1104 {
1105 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1106
1107 if (!mesh_tx)
1108 return;
1109
1110 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1111 mesh_send_start_complete);
1112
1113 if (err < 0)
1114 mesh_send_complete(hdev, mesh_tx, false);
1115 else
1116 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1117 }
1118
mesh_send_done(struct work_struct * work)1119 static void mesh_send_done(struct work_struct *work)
1120 {
1121 struct hci_dev *hdev = container_of(work, struct hci_dev,
1122 mesh_send_done.work);
1123
1124 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1125 return;
1126
1127 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1128 }
1129
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1130 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1131 {
1132 if (hci_dev_test_flag(hdev, HCI_MGMT))
1133 return;
1134
1135 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1136
1137 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1138 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1139 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1140 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1141
1142 /* Non-mgmt controlled devices get this bit set
1143 * implicitly so that pairing works for them, however
1144 * for mgmt we require user-space to explicitly enable
1145 * it
1146 */
1147 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1148
1149 hci_dev_set_flag(hdev, HCI_MGMT);
1150 }
1151
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1152 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1153 void *data, u16 data_len)
1154 {
1155 struct mgmt_rp_read_info rp;
1156
1157 bt_dev_dbg(hdev, "sock %p", sk);
1158
1159 hci_dev_lock(hdev);
1160
1161 memset(&rp, 0, sizeof(rp));
1162
1163 bacpy(&rp.bdaddr, &hdev->bdaddr);
1164
1165 rp.version = hdev->hci_ver;
1166 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1167
1168 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1169 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1170
1171 memcpy(rp.dev_class, hdev->dev_class, 3);
1172
1173 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1174 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1175
1176 hci_dev_unlock(hdev);
1177
1178 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1179 sizeof(rp));
1180 }
1181
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1182 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1183 {
1184 u16 eir_len = 0;
1185 size_t name_len;
1186
1187 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1188 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1189 hdev->dev_class, 3);
1190
1191 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1192 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1193 hdev->appearance);
1194
1195 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1196 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1197 hdev->dev_name, name_len);
1198
1199 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1200 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1201 hdev->short_name, name_len);
1202
1203 return eir_len;
1204 }
1205
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1206 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1207 void *data, u16 data_len)
1208 {
1209 char buf[512];
1210 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1211 u16 eir_len;
1212
1213 bt_dev_dbg(hdev, "sock %p", sk);
1214
1215 memset(&buf, 0, sizeof(buf));
1216
1217 hci_dev_lock(hdev);
1218
1219 bacpy(&rp->bdaddr, &hdev->bdaddr);
1220
1221 rp->version = hdev->hci_ver;
1222 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1223
1224 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1225 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1226
1227
1228 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1229 rp->eir_len = cpu_to_le16(eir_len);
1230
1231 hci_dev_unlock(hdev);
1232
1233 /* If this command is called at least once, then the events
1234 * for class of device and local name changes are disabled
1235 * and only the new extended controller information event
1236 * is used.
1237 */
1238 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1239 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1240 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1241
1242 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1243 sizeof(*rp) + eir_len);
1244 }
1245
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1246 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1247 {
1248 char buf[512];
1249 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1250 u16 eir_len;
1251
1252 memset(buf, 0, sizeof(buf));
1253
1254 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1255 ev->eir_len = cpu_to_le16(eir_len);
1256
1257 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1258 sizeof(*ev) + eir_len,
1259 HCI_MGMT_EXT_INFO_EVENTS, skip);
1260 }
1261
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1262 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1263 {
1264 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1265
1266 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1267 sizeof(settings));
1268 }
1269
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1270 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1271 {
1272 struct mgmt_ev_advertising_added ev;
1273
1274 ev.instance = instance;
1275
1276 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1277 }
1278
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1279 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1280 u8 instance)
1281 {
1282 struct mgmt_ev_advertising_removed ev;
1283
1284 ev.instance = instance;
1285
1286 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1287 }
1288
cancel_adv_timeout(struct hci_dev * hdev)1289 static void cancel_adv_timeout(struct hci_dev *hdev)
1290 {
1291 if (hdev->adv_instance_timeout) {
1292 hdev->adv_instance_timeout = 0;
1293 cancel_delayed_work(&hdev->adv_instance_expire);
1294 }
1295 }
1296
1297 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1298 static void restart_le_actions(struct hci_dev *hdev)
1299 {
1300 struct hci_conn_params *p;
1301
1302 list_for_each_entry(p, &hdev->le_conn_params, list) {
1303 /* Needed for AUTO_OFF case where might not "really"
1304 * have been powered off.
1305 */
1306 hci_pend_le_list_del_init(p);
1307
1308 switch (p->auto_connect) {
1309 case HCI_AUTO_CONN_DIRECT:
1310 case HCI_AUTO_CONN_ALWAYS:
1311 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1312 break;
1313 case HCI_AUTO_CONN_REPORT:
1314 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1315 break;
1316 default:
1317 break;
1318 }
1319 }
1320 }
1321
new_settings(struct hci_dev * hdev,struct sock * skip)1322 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1323 {
1324 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1325
1326 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1327 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1328 }
1329
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1330 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1331 {
1332 struct mgmt_pending_cmd *cmd = data;
1333 struct mgmt_mode *cp;
1334
1335 /* Make sure cmd still outstanding. */
1336 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1337 return;
1338
1339 cp = cmd->param;
1340
1341 bt_dev_dbg(hdev, "err %d", err);
1342
1343 if (!err) {
1344 if (cp->val) {
1345 hci_dev_lock(hdev);
1346 restart_le_actions(hdev);
1347 hci_update_passive_scan(hdev);
1348 hci_dev_unlock(hdev);
1349 }
1350
1351 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1352
1353 /* Only call new_setting for power on as power off is deferred
1354 * to hdev->power_off work which does call hci_dev_do_close.
1355 */
1356 if (cp->val)
1357 new_settings(hdev, cmd->sk);
1358 } else {
1359 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1360 mgmt_status(err));
1361 }
1362
1363 mgmt_pending_remove(cmd);
1364 }
1365
set_powered_sync(struct hci_dev * hdev,void * data)1366 static int set_powered_sync(struct hci_dev *hdev, void *data)
1367 {
1368 struct mgmt_pending_cmd *cmd = data;
1369 struct mgmt_mode *cp = cmd->param;
1370
1371 BT_DBG("%s", hdev->name);
1372
1373 return hci_set_powered_sync(hdev, cp->val);
1374 }
1375
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1376 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1377 u16 len)
1378 {
1379 struct mgmt_mode *cp = data;
1380 struct mgmt_pending_cmd *cmd;
1381 int err;
1382
1383 bt_dev_dbg(hdev, "sock %p", sk);
1384
1385 if (cp->val != 0x00 && cp->val != 0x01)
1386 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1387 MGMT_STATUS_INVALID_PARAMS);
1388
1389 hci_dev_lock(hdev);
1390
1391 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1392 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1393 MGMT_STATUS_BUSY);
1394 goto failed;
1395 }
1396
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1399 goto failed;
1400 }
1401
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1403 if (!cmd) {
1404 err = -ENOMEM;
1405 goto failed;
1406 }
1407
1408 /* Cancel potentially blocking sync operation before power off */
1409 if (cp->val == 0x00) {
1410 __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
1411 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1412 mgmt_set_powered_complete);
1413 } else {
1414 /* Use hci_cmd_sync_submit since hdev might not be running */
1415 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1416 mgmt_set_powered_complete);
1417 }
1418
1419 if (err < 0)
1420 mgmt_pending_remove(cmd);
1421
1422 failed:
1423 hci_dev_unlock(hdev);
1424 return err;
1425 }
1426
mgmt_new_settings(struct hci_dev * hdev)1427 int mgmt_new_settings(struct hci_dev *hdev)
1428 {
1429 return new_settings(hdev, NULL);
1430 }
1431
1432 struct cmd_lookup {
1433 struct sock *sk;
1434 struct hci_dev *hdev;
1435 u8 mgmt_status;
1436 };
1437
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1438 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1439 {
1440 struct cmd_lookup *match = data;
1441
1442 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1443
1444 list_del(&cmd->list);
1445
1446 if (match->sk == NULL) {
1447 match->sk = cmd->sk;
1448 sock_hold(match->sk);
1449 }
1450
1451 mgmt_pending_free(cmd);
1452 }
1453
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1454 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1455 {
1456 u8 *status = data;
1457
1458 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1459 mgmt_pending_remove(cmd);
1460 }
1461
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1462 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1463 {
1464 if (cmd->cmd_complete) {
1465 u8 *status = data;
1466
1467 cmd->cmd_complete(cmd, *status);
1468 mgmt_pending_remove(cmd);
1469
1470 return;
1471 }
1472
1473 cmd_status_rsp(cmd, data);
1474 }
1475
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1476 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1477 {
1478 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1479 cmd->param, cmd->param_len);
1480 }
1481
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1482 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1483 {
1484 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1485 cmd->param, sizeof(struct mgmt_addr_info));
1486 }
1487
mgmt_bredr_support(struct hci_dev * hdev)1488 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1489 {
1490 if (!lmp_bredr_capable(hdev))
1491 return MGMT_STATUS_NOT_SUPPORTED;
1492 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1493 return MGMT_STATUS_REJECTED;
1494 else
1495 return MGMT_STATUS_SUCCESS;
1496 }
1497
mgmt_le_support(struct hci_dev * hdev)1498 static u8 mgmt_le_support(struct hci_dev *hdev)
1499 {
1500 if (!lmp_le_capable(hdev))
1501 return MGMT_STATUS_NOT_SUPPORTED;
1502 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1503 return MGMT_STATUS_REJECTED;
1504 else
1505 return MGMT_STATUS_SUCCESS;
1506 }
1507
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1508 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1509 int err)
1510 {
1511 struct mgmt_pending_cmd *cmd = data;
1512
1513 bt_dev_dbg(hdev, "err %d", err);
1514
1515 /* Make sure cmd still outstanding. */
1516 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1517 return;
1518
1519 hci_dev_lock(hdev);
1520
1521 if (err) {
1522 u8 mgmt_err = mgmt_status(err);
1523 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1524 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1525 goto done;
1526 }
1527
1528 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1529 hdev->discov_timeout > 0) {
1530 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1531 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1532 }
1533
1534 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1535 new_settings(hdev, cmd->sk);
1536
1537 done:
1538 mgmt_pending_remove(cmd);
1539 hci_dev_unlock(hdev);
1540 }
1541
set_discoverable_sync(struct hci_dev * hdev,void * data)1542 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1543 {
1544 BT_DBG("%s", hdev->name);
1545
1546 return hci_update_discoverable_sync(hdev);
1547 }
1548
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1549 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1550 u16 len)
1551 {
1552 struct mgmt_cp_set_discoverable *cp = data;
1553 struct mgmt_pending_cmd *cmd;
1554 u16 timeout;
1555 int err;
1556
1557 bt_dev_dbg(hdev, "sock %p", sk);
1558
1559 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1560 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1561 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1562 MGMT_STATUS_REJECTED);
1563
1564 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_INVALID_PARAMS);
1567
1568 timeout = __le16_to_cpu(cp->timeout);
1569
1570 /* Disabling discoverable requires that no timeout is set,
1571 * and enabling limited discoverable requires a timeout.
1572 */
1573 if ((cp->val == 0x00 && timeout > 0) ||
1574 (cp->val == 0x02 && timeout == 0))
1575 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1576 MGMT_STATUS_INVALID_PARAMS);
1577
1578 hci_dev_lock(hdev);
1579
1580 if (!hdev_is_powered(hdev) && timeout > 0) {
1581 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1582 MGMT_STATUS_NOT_POWERED);
1583 goto failed;
1584 }
1585
1586 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1587 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1588 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1589 MGMT_STATUS_BUSY);
1590 goto failed;
1591 }
1592
1593 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1594 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 MGMT_STATUS_REJECTED);
1596 goto failed;
1597 }
1598
1599 if (hdev->advertising_paused) {
1600 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1601 MGMT_STATUS_BUSY);
1602 goto failed;
1603 }
1604
1605 if (!hdev_is_powered(hdev)) {
1606 bool changed = false;
1607
1608 /* Setting limited discoverable when powered off is
1609 * not a valid operation since it requires a timeout
1610 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1611 */
1612 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1613 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1614 changed = true;
1615 }
1616
1617 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1618 if (err < 0)
1619 goto failed;
1620
1621 if (changed)
1622 err = new_settings(hdev, sk);
1623
1624 goto failed;
1625 }
1626
1627 /* If the current mode is the same, then just update the timeout
1628 * value with the new value. And if only the timeout gets updated,
1629 * then no need for any HCI transactions.
1630 */
1631 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1632 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1633 HCI_LIMITED_DISCOVERABLE)) {
1634 cancel_delayed_work(&hdev->discov_off);
1635 hdev->discov_timeout = timeout;
1636
1637 if (cp->val && hdev->discov_timeout > 0) {
1638 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1639 queue_delayed_work(hdev->req_workqueue,
1640 &hdev->discov_off, to);
1641 }
1642
1643 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1644 goto failed;
1645 }
1646
1647 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1648 if (!cmd) {
1649 err = -ENOMEM;
1650 goto failed;
1651 }
1652
1653 /* Cancel any potential discoverable timeout that might be
1654 * still active and store new timeout value. The arming of
1655 * the timeout happens in the complete handler.
1656 */
1657 cancel_delayed_work(&hdev->discov_off);
1658 hdev->discov_timeout = timeout;
1659
1660 if (cp->val)
1661 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1662 else
1663 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1664
1665 /* Limited discoverable mode */
1666 if (cp->val == 0x02)
1667 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1668 else
1669 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1670
1671 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1672 mgmt_set_discoverable_complete);
1673
1674 if (err < 0)
1675 mgmt_pending_remove(cmd);
1676
1677 failed:
1678 hci_dev_unlock(hdev);
1679 return err;
1680 }
1681
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1682 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1683 int err)
1684 {
1685 struct mgmt_pending_cmd *cmd = data;
1686
1687 bt_dev_dbg(hdev, "err %d", err);
1688
1689 /* Make sure cmd still outstanding. */
1690 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1691 return;
1692
1693 hci_dev_lock(hdev);
1694
1695 if (err) {
1696 u8 mgmt_err = mgmt_status(err);
1697 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1698 goto done;
1699 }
1700
1701 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1702 new_settings(hdev, cmd->sk);
1703
1704 done:
1705 if (cmd)
1706 mgmt_pending_remove(cmd);
1707
1708 hci_dev_unlock(hdev);
1709 }
1710
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1711 static int set_connectable_update_settings(struct hci_dev *hdev,
1712 struct sock *sk, u8 val)
1713 {
1714 bool changed = false;
1715 int err;
1716
1717 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1718 changed = true;
1719
1720 if (val) {
1721 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1722 } else {
1723 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1724 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1725 }
1726
1727 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1728 if (err < 0)
1729 return err;
1730
1731 if (changed) {
1732 hci_update_scan(hdev);
1733 hci_update_passive_scan(hdev);
1734 return new_settings(hdev, sk);
1735 }
1736
1737 return 0;
1738 }
1739
set_connectable_sync(struct hci_dev * hdev,void * data)1740 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1741 {
1742 BT_DBG("%s", hdev->name);
1743
1744 return hci_update_connectable_sync(hdev);
1745 }
1746
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1747 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1748 u16 len)
1749 {
1750 struct mgmt_mode *cp = data;
1751 struct mgmt_pending_cmd *cmd;
1752 int err;
1753
1754 bt_dev_dbg(hdev, "sock %p", sk);
1755
1756 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1757 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1758 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1759 MGMT_STATUS_REJECTED);
1760
1761 if (cp->val != 0x00 && cp->val != 0x01)
1762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1763 MGMT_STATUS_INVALID_PARAMS);
1764
1765 hci_dev_lock(hdev);
1766
1767 if (!hdev_is_powered(hdev)) {
1768 err = set_connectable_update_settings(hdev, sk, cp->val);
1769 goto failed;
1770 }
1771
1772 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1773 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1774 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1775 MGMT_STATUS_BUSY);
1776 goto failed;
1777 }
1778
1779 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1780 if (!cmd) {
1781 err = -ENOMEM;
1782 goto failed;
1783 }
1784
1785 if (cp->val) {
1786 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1787 } else {
1788 if (hdev->discov_timeout > 0)
1789 cancel_delayed_work(&hdev->discov_off);
1790
1791 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1792 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1793 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1794 }
1795
1796 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1797 mgmt_set_connectable_complete);
1798
1799 if (err < 0)
1800 mgmt_pending_remove(cmd);
1801
1802 failed:
1803 hci_dev_unlock(hdev);
1804 return err;
1805 }
1806
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1807 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1808 u16 len)
1809 {
1810 struct mgmt_mode *cp = data;
1811 bool changed;
1812 int err;
1813
1814 bt_dev_dbg(hdev, "sock %p", sk);
1815
1816 if (cp->val != 0x00 && cp->val != 0x01)
1817 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1818 MGMT_STATUS_INVALID_PARAMS);
1819
1820 hci_dev_lock(hdev);
1821
1822 if (cp->val)
1823 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1824 else
1825 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1826
1827 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1828 if (err < 0)
1829 goto unlock;
1830
1831 if (changed) {
1832 /* In limited privacy mode the change of bondable mode
1833 * may affect the local advertising address.
1834 */
1835 hci_update_discoverable(hdev);
1836
1837 err = new_settings(hdev, sk);
1838 }
1839
1840 unlock:
1841 hci_dev_unlock(hdev);
1842 return err;
1843 }
1844
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1845 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1846 u16 len)
1847 {
1848 struct mgmt_mode *cp = data;
1849 struct mgmt_pending_cmd *cmd;
1850 u8 val, status;
1851 int err;
1852
1853 bt_dev_dbg(hdev, "sock %p", sk);
1854
1855 status = mgmt_bredr_support(hdev);
1856 if (status)
1857 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1858 status);
1859
1860 if (cp->val != 0x00 && cp->val != 0x01)
1861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1862 MGMT_STATUS_INVALID_PARAMS);
1863
1864 hci_dev_lock(hdev);
1865
1866 if (!hdev_is_powered(hdev)) {
1867 bool changed = false;
1868
1869 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1870 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1871 changed = true;
1872 }
1873
1874 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1875 if (err < 0)
1876 goto failed;
1877
1878 if (changed)
1879 err = new_settings(hdev, sk);
1880
1881 goto failed;
1882 }
1883
1884 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1885 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1886 MGMT_STATUS_BUSY);
1887 goto failed;
1888 }
1889
1890 val = !!cp->val;
1891
1892 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1893 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1894 goto failed;
1895 }
1896
1897 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1898 if (!cmd) {
1899 err = -ENOMEM;
1900 goto failed;
1901 }
1902
1903 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1904 if (err < 0) {
1905 mgmt_pending_remove(cmd);
1906 goto failed;
1907 }
1908
1909 failed:
1910 hci_dev_unlock(hdev);
1911 return err;
1912 }
1913
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1914 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1915 {
1916 struct cmd_lookup match = { NULL, hdev };
1917 struct mgmt_pending_cmd *cmd = data;
1918 struct mgmt_mode *cp = cmd->param;
1919 u8 enable = cp->val;
1920 bool changed;
1921
1922 /* Make sure cmd still outstanding. */
1923 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1924 return;
1925
1926 if (err) {
1927 u8 mgmt_err = mgmt_status(err);
1928
1929 if (enable && hci_dev_test_and_clear_flag(hdev,
1930 HCI_SSP_ENABLED)) {
1931 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1932 new_settings(hdev, NULL);
1933 }
1934
1935 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1936 &mgmt_err);
1937 return;
1938 }
1939
1940 if (enable) {
1941 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1942 } else {
1943 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1944
1945 if (!changed)
1946 changed = hci_dev_test_and_clear_flag(hdev,
1947 HCI_HS_ENABLED);
1948 else
1949 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1950 }
1951
1952 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1953
1954 if (changed)
1955 new_settings(hdev, match.sk);
1956
1957 if (match.sk)
1958 sock_put(match.sk);
1959
1960 hci_update_eir_sync(hdev);
1961 }
1962
set_ssp_sync(struct hci_dev * hdev,void * data)1963 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1964 {
1965 struct mgmt_pending_cmd *cmd = data;
1966 struct mgmt_mode *cp = cmd->param;
1967 bool changed = false;
1968 int err;
1969
1970 if (cp->val)
1971 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1972
1973 err = hci_write_ssp_mode_sync(hdev, cp->val);
1974
1975 if (!err && changed)
1976 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1977
1978 return err;
1979 }
1980
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1981 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1982 {
1983 struct mgmt_mode *cp = data;
1984 struct mgmt_pending_cmd *cmd;
1985 u8 status;
1986 int err;
1987
1988 bt_dev_dbg(hdev, "sock %p", sk);
1989
1990 status = mgmt_bredr_support(hdev);
1991 if (status)
1992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1993
1994 if (!lmp_ssp_capable(hdev))
1995 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1996 MGMT_STATUS_NOT_SUPPORTED);
1997
1998 if (cp->val != 0x00 && cp->val != 0x01)
1999 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2000 MGMT_STATUS_INVALID_PARAMS);
2001
2002 hci_dev_lock(hdev);
2003
2004 if (!hdev_is_powered(hdev)) {
2005 bool changed;
2006
2007 if (cp->val) {
2008 changed = !hci_dev_test_and_set_flag(hdev,
2009 HCI_SSP_ENABLED);
2010 } else {
2011 changed = hci_dev_test_and_clear_flag(hdev,
2012 HCI_SSP_ENABLED);
2013 if (!changed)
2014 changed = hci_dev_test_and_clear_flag(hdev,
2015 HCI_HS_ENABLED);
2016 else
2017 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2018 }
2019
2020 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2021 if (err < 0)
2022 goto failed;
2023
2024 if (changed)
2025 err = new_settings(hdev, sk);
2026
2027 goto failed;
2028 }
2029
2030 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2031 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2032 MGMT_STATUS_BUSY);
2033 goto failed;
2034 }
2035
2036 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2037 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2038 goto failed;
2039 }
2040
2041 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2042 if (!cmd)
2043 err = -ENOMEM;
2044 else
2045 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2046 set_ssp_complete);
2047
2048 if (err < 0) {
2049 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2050 MGMT_STATUS_FAILED);
2051
2052 if (cmd)
2053 mgmt_pending_remove(cmd);
2054 }
2055
2056 failed:
2057 hci_dev_unlock(hdev);
2058 return err;
2059 }
2060
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2061 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2062 {
2063 struct mgmt_mode *cp = data;
2064 bool changed;
2065 u8 status;
2066 int err;
2067
2068 bt_dev_dbg(hdev, "sock %p", sk);
2069
2070 if (!IS_ENABLED(CONFIG_BT_HS))
2071 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2072 MGMT_STATUS_NOT_SUPPORTED);
2073
2074 status = mgmt_bredr_support(hdev);
2075 if (status)
2076 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2077
2078 if (!lmp_ssp_capable(hdev))
2079 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2080 MGMT_STATUS_NOT_SUPPORTED);
2081
2082 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2083 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2084 MGMT_STATUS_REJECTED);
2085
2086 if (cp->val != 0x00 && cp->val != 0x01)
2087 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2088 MGMT_STATUS_INVALID_PARAMS);
2089
2090 hci_dev_lock(hdev);
2091
2092 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2093 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2094 MGMT_STATUS_BUSY);
2095 goto unlock;
2096 }
2097
2098 if (cp->val) {
2099 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2100 } else {
2101 if (hdev_is_powered(hdev)) {
2102 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2103 MGMT_STATUS_REJECTED);
2104 goto unlock;
2105 }
2106
2107 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2108 }
2109
2110 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2111 if (err < 0)
2112 goto unlock;
2113
2114 if (changed)
2115 err = new_settings(hdev, sk);
2116
2117 unlock:
2118 hci_dev_unlock(hdev);
2119 return err;
2120 }
2121
set_le_complete(struct hci_dev * hdev,void * data,int err)2122 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2123 {
2124 struct cmd_lookup match = { NULL, hdev };
2125 u8 status = mgmt_status(err);
2126
2127 bt_dev_dbg(hdev, "err %d", err);
2128
2129 if (status) {
2130 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2131 &status);
2132 return;
2133 }
2134
2135 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2136
2137 new_settings(hdev, match.sk);
2138
2139 if (match.sk)
2140 sock_put(match.sk);
2141 }
2142
set_le_sync(struct hci_dev * hdev,void * data)2143 static int set_le_sync(struct hci_dev *hdev, void *data)
2144 {
2145 struct mgmt_pending_cmd *cmd = data;
2146 struct mgmt_mode *cp = cmd->param;
2147 u8 val = !!cp->val;
2148 int err;
2149
2150 if (!val) {
2151 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2152
2153 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2154 hci_disable_advertising_sync(hdev);
2155
2156 if (ext_adv_capable(hdev))
2157 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2158 } else {
2159 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2160 }
2161
2162 err = hci_write_le_host_supported_sync(hdev, val, 0);
2163
2164 /* Make sure the controller has a good default for
2165 * advertising data. Restrict the update to when LE
2166 * has actually been enabled. During power on, the
2167 * update in powered_update_hci will take care of it.
2168 */
2169 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2170 if (ext_adv_capable(hdev)) {
2171 int status;
2172
2173 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2174 if (!status)
2175 hci_update_scan_rsp_data_sync(hdev, 0x00);
2176 } else {
2177 hci_update_adv_data_sync(hdev, 0x00);
2178 hci_update_scan_rsp_data_sync(hdev, 0x00);
2179 }
2180
2181 hci_update_passive_scan(hdev);
2182 }
2183
2184 return err;
2185 }
2186
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2187 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2188 {
2189 struct mgmt_pending_cmd *cmd = data;
2190 u8 status = mgmt_status(err);
2191 struct sock *sk = cmd->sk;
2192
2193 if (status) {
2194 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2195 cmd_status_rsp, &status);
2196 return;
2197 }
2198
2199 mgmt_pending_remove(cmd);
2200 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2201 }
2202
set_mesh_sync(struct hci_dev * hdev,void * data)2203 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2204 {
2205 struct mgmt_pending_cmd *cmd = data;
2206 struct mgmt_cp_set_mesh *cp = cmd->param;
2207 size_t len = cmd->param_len;
2208
2209 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2210
2211 if (cp->enable)
2212 hci_dev_set_flag(hdev, HCI_MESH);
2213 else
2214 hci_dev_clear_flag(hdev, HCI_MESH);
2215
2216 len -= sizeof(*cp);
2217
2218 /* If filters don't fit, forward all adv pkts */
2219 if (len <= sizeof(hdev->mesh_ad_types))
2220 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2221
2222 hci_update_passive_scan_sync(hdev);
2223 return 0;
2224 }
2225
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2226 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2227 {
2228 struct mgmt_cp_set_mesh *cp = data;
2229 struct mgmt_pending_cmd *cmd;
2230 int err = 0;
2231
2232 bt_dev_dbg(hdev, "sock %p", sk);
2233
2234 if (!lmp_le_capable(hdev) ||
2235 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2236 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2237 MGMT_STATUS_NOT_SUPPORTED);
2238
2239 if (cp->enable != 0x00 && cp->enable != 0x01)
2240 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2241 MGMT_STATUS_INVALID_PARAMS);
2242
2243 hci_dev_lock(hdev);
2244
2245 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2246 if (!cmd)
2247 err = -ENOMEM;
2248 else
2249 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2250 set_mesh_complete);
2251
2252 if (err < 0) {
2253 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2254 MGMT_STATUS_FAILED);
2255
2256 if (cmd)
2257 mgmt_pending_remove(cmd);
2258 }
2259
2260 hci_dev_unlock(hdev);
2261 return err;
2262 }
2263
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2264 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2265 {
2266 struct mgmt_mesh_tx *mesh_tx = data;
2267 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2268 unsigned long mesh_send_interval;
2269 u8 mgmt_err = mgmt_status(err);
2270
2271 /* Report any errors here, but don't report completion */
2272
2273 if (mgmt_err) {
2274 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2275 /* Send Complete Error Code for handle */
2276 mesh_send_complete(hdev, mesh_tx, false);
2277 return;
2278 }
2279
2280 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2281 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2282 mesh_send_interval);
2283 }
2284
mesh_send_sync(struct hci_dev * hdev,void * data)2285 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2286 {
2287 struct mgmt_mesh_tx *mesh_tx = data;
2288 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2289 struct adv_info *adv, *next_instance;
2290 u8 instance = hdev->le_num_of_adv_sets + 1;
2291 u16 timeout, duration;
2292 int err = 0;
2293
2294 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2295 return MGMT_STATUS_BUSY;
2296
2297 timeout = 1000;
2298 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2299 adv = hci_add_adv_instance(hdev, instance, 0,
2300 send->adv_data_len, send->adv_data,
2301 0, NULL,
2302 timeout, duration,
2303 HCI_ADV_TX_POWER_NO_PREFERENCE,
2304 hdev->le_adv_min_interval,
2305 hdev->le_adv_max_interval,
2306 mesh_tx->handle);
2307
2308 if (!IS_ERR(adv))
2309 mesh_tx->instance = instance;
2310 else
2311 err = PTR_ERR(adv);
2312
2313 if (hdev->cur_adv_instance == instance) {
2314 /* If the currently advertised instance is being changed then
2315 * cancel the current advertising and schedule the next
2316 * instance. If there is only one instance then the overridden
2317 * advertising data will be visible right away.
2318 */
2319 cancel_adv_timeout(hdev);
2320
2321 next_instance = hci_get_next_instance(hdev, instance);
2322 if (next_instance)
2323 instance = next_instance->instance;
2324 else
2325 instance = 0;
2326 } else if (hdev->adv_instance_timeout) {
2327 /* Immediately advertise the new instance if no other, or
2328 * let it go naturally from queue if ADV is already happening
2329 */
2330 instance = 0;
2331 }
2332
2333 if (instance)
2334 return hci_schedule_adv_instance_sync(hdev, instance, true);
2335
2336 return err;
2337 }
2338
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2339 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2340 {
2341 struct mgmt_rp_mesh_read_features *rp = data;
2342
2343 if (rp->used_handles >= rp->max_handles)
2344 return;
2345
2346 rp->handles[rp->used_handles++] = mesh_tx->handle;
2347 }
2348
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2349 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2350 void *data, u16 len)
2351 {
2352 struct mgmt_rp_mesh_read_features rp;
2353
2354 if (!lmp_le_capable(hdev) ||
2355 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2356 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2357 MGMT_STATUS_NOT_SUPPORTED);
2358
2359 memset(&rp, 0, sizeof(rp));
2360 rp.index = cpu_to_le16(hdev->id);
2361 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2362 rp.max_handles = MESH_HANDLES_MAX;
2363
2364 hci_dev_lock(hdev);
2365
2366 if (rp.max_handles)
2367 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2368
2369 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2370 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2371
2372 hci_dev_unlock(hdev);
2373 return 0;
2374 }
2375
send_cancel(struct hci_dev * hdev,void * data)2376 static int send_cancel(struct hci_dev *hdev, void *data)
2377 {
2378 struct mgmt_pending_cmd *cmd = data;
2379 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2380 struct mgmt_mesh_tx *mesh_tx;
2381
2382 if (!cancel->handle) {
2383 do {
2384 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2385
2386 if (mesh_tx)
2387 mesh_send_complete(hdev, mesh_tx, false);
2388 } while (mesh_tx);
2389 } else {
2390 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2391
2392 if (mesh_tx && mesh_tx->sk == cmd->sk)
2393 mesh_send_complete(hdev, mesh_tx, false);
2394 }
2395
2396 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2397 0, NULL, 0);
2398 mgmt_pending_free(cmd);
2399
2400 return 0;
2401 }
2402
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2403 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2404 void *data, u16 len)
2405 {
2406 struct mgmt_pending_cmd *cmd;
2407 int err;
2408
2409 if (!lmp_le_capable(hdev) ||
2410 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2411 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2412 MGMT_STATUS_NOT_SUPPORTED);
2413
2414 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2415 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2416 MGMT_STATUS_REJECTED);
2417
2418 hci_dev_lock(hdev);
2419 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2420 if (!cmd)
2421 err = -ENOMEM;
2422 else
2423 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2424
2425 if (err < 0) {
2426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2427 MGMT_STATUS_FAILED);
2428
2429 if (cmd)
2430 mgmt_pending_free(cmd);
2431 }
2432
2433 hci_dev_unlock(hdev);
2434 return err;
2435 }
2436
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2437 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2438 {
2439 struct mgmt_mesh_tx *mesh_tx;
2440 struct mgmt_cp_mesh_send *send = data;
2441 struct mgmt_rp_mesh_read_features rp;
2442 bool sending;
2443 int err = 0;
2444
2445 if (!lmp_le_capable(hdev) ||
2446 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2448 MGMT_STATUS_NOT_SUPPORTED);
2449 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2450 len <= MGMT_MESH_SEND_SIZE ||
2451 len > (MGMT_MESH_SEND_SIZE + 31))
2452 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2453 MGMT_STATUS_REJECTED);
2454
2455 hci_dev_lock(hdev);
2456
2457 memset(&rp, 0, sizeof(rp));
2458 rp.max_handles = MESH_HANDLES_MAX;
2459
2460 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2461
2462 if (rp.max_handles <= rp.used_handles) {
2463 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2464 MGMT_STATUS_BUSY);
2465 goto done;
2466 }
2467
2468 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2469 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2470
2471 if (!mesh_tx)
2472 err = -ENOMEM;
2473 else if (!sending)
2474 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2475 mesh_send_start_complete);
2476
2477 if (err < 0) {
2478 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2479 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2480 MGMT_STATUS_FAILED);
2481
2482 if (mesh_tx) {
2483 if (sending)
2484 mgmt_mesh_remove(mesh_tx);
2485 }
2486 } else {
2487 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2488
2489 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2490 &mesh_tx->handle, 1);
2491 }
2492
2493 done:
2494 hci_dev_unlock(hdev);
2495 return err;
2496 }
2497
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2498 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2499 {
2500 struct mgmt_mode *cp = data;
2501 struct mgmt_pending_cmd *cmd;
2502 int err;
2503 u8 val, enabled;
2504
2505 bt_dev_dbg(hdev, "sock %p", sk);
2506
2507 if (!lmp_le_capable(hdev))
2508 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2509 MGMT_STATUS_NOT_SUPPORTED);
2510
2511 if (cp->val != 0x00 && cp->val != 0x01)
2512 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2513 MGMT_STATUS_INVALID_PARAMS);
2514
2515 /* Bluetooth single mode LE only controllers or dual-mode
2516 * controllers configured as LE only devices, do not allow
2517 * switching LE off. These have either LE enabled explicitly
2518 * or BR/EDR has been previously switched off.
2519 *
2520 * When trying to enable an already enabled LE, then gracefully
2521 * send a positive response. Trying to disable it however will
2522 * result into rejection.
2523 */
2524 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2525 if (cp->val == 0x01)
2526 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2527
2528 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2529 MGMT_STATUS_REJECTED);
2530 }
2531
2532 hci_dev_lock(hdev);
2533
2534 val = !!cp->val;
2535 enabled = lmp_host_le_capable(hdev);
2536
2537 if (!hdev_is_powered(hdev) || val == enabled) {
2538 bool changed = false;
2539
2540 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2541 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2542 changed = true;
2543 }
2544
2545 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2546 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2547 changed = true;
2548 }
2549
2550 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2551 if (err < 0)
2552 goto unlock;
2553
2554 if (changed)
2555 err = new_settings(hdev, sk);
2556
2557 goto unlock;
2558 }
2559
2560 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2561 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2562 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2563 MGMT_STATUS_BUSY);
2564 goto unlock;
2565 }
2566
2567 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2568 if (!cmd)
2569 err = -ENOMEM;
2570 else
2571 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2572 set_le_complete);
2573
2574 if (err < 0) {
2575 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2576 MGMT_STATUS_FAILED);
2577
2578 if (cmd)
2579 mgmt_pending_remove(cmd);
2580 }
2581
2582 unlock:
2583 hci_dev_unlock(hdev);
2584 return err;
2585 }
2586
2587 /* This is a helper function to test for pending mgmt commands that can
2588 * cause CoD or EIR HCI commands. We can only allow one such pending
2589 * mgmt command at a time since otherwise we cannot easily track what
2590 * the current values are, will be, and based on that calculate if a new
2591 * HCI command needs to be sent and if yes with what value.
2592 */
pending_eir_or_class(struct hci_dev * hdev)2593 static bool pending_eir_or_class(struct hci_dev *hdev)
2594 {
2595 struct mgmt_pending_cmd *cmd;
2596
2597 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2598 switch (cmd->opcode) {
2599 case MGMT_OP_ADD_UUID:
2600 case MGMT_OP_REMOVE_UUID:
2601 case MGMT_OP_SET_DEV_CLASS:
2602 case MGMT_OP_SET_POWERED:
2603 return true;
2604 }
2605 }
2606
2607 return false;
2608 }
2609
2610 static const u8 bluetooth_base_uuid[] = {
2611 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2612 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2613 };
2614
get_uuid_size(const u8 * uuid)2615 static u8 get_uuid_size(const u8 *uuid)
2616 {
2617 u32 val;
2618
2619 if (memcmp(uuid, bluetooth_base_uuid, 12))
2620 return 128;
2621
2622 val = get_unaligned_le32(&uuid[12]);
2623 if (val > 0xffff)
2624 return 32;
2625
2626 return 16;
2627 }
2628
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2629 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2630 {
2631 struct mgmt_pending_cmd *cmd = data;
2632
2633 bt_dev_dbg(hdev, "err %d", err);
2634
2635 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2636 mgmt_status(err), hdev->dev_class, 3);
2637
2638 mgmt_pending_free(cmd);
2639 }
2640
add_uuid_sync(struct hci_dev * hdev,void * data)2641 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2642 {
2643 int err;
2644
2645 err = hci_update_class_sync(hdev);
2646 if (err)
2647 return err;
2648
2649 return hci_update_eir_sync(hdev);
2650 }
2651
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2652 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2653 {
2654 struct mgmt_cp_add_uuid *cp = data;
2655 struct mgmt_pending_cmd *cmd;
2656 struct bt_uuid *uuid;
2657 int err;
2658
2659 bt_dev_dbg(hdev, "sock %p", sk);
2660
2661 hci_dev_lock(hdev);
2662
2663 if (pending_eir_or_class(hdev)) {
2664 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2665 MGMT_STATUS_BUSY);
2666 goto failed;
2667 }
2668
2669 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2670 if (!uuid) {
2671 err = -ENOMEM;
2672 goto failed;
2673 }
2674
2675 memcpy(uuid->uuid, cp->uuid, 16);
2676 uuid->svc_hint = cp->svc_hint;
2677 uuid->size = get_uuid_size(cp->uuid);
2678
2679 list_add_tail(&uuid->list, &hdev->uuids);
2680
2681 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2682 if (!cmd) {
2683 err = -ENOMEM;
2684 goto failed;
2685 }
2686
2687 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2688 if (err < 0) {
2689 mgmt_pending_free(cmd);
2690 goto failed;
2691 }
2692
2693 failed:
2694 hci_dev_unlock(hdev);
2695 return err;
2696 }
2697
enable_service_cache(struct hci_dev * hdev)2698 static bool enable_service_cache(struct hci_dev *hdev)
2699 {
2700 if (!hdev_is_powered(hdev))
2701 return false;
2702
2703 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2704 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2705 CACHE_TIMEOUT);
2706 return true;
2707 }
2708
2709 return false;
2710 }
2711
remove_uuid_sync(struct hci_dev * hdev,void * data)2712 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2713 {
2714 int err;
2715
2716 err = hci_update_class_sync(hdev);
2717 if (err)
2718 return err;
2719
2720 return hci_update_eir_sync(hdev);
2721 }
2722
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2723 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2724 u16 len)
2725 {
2726 struct mgmt_cp_remove_uuid *cp = data;
2727 struct mgmt_pending_cmd *cmd;
2728 struct bt_uuid *match, *tmp;
2729 static const u8 bt_uuid_any[] = {
2730 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2731 };
2732 int err, found;
2733
2734 bt_dev_dbg(hdev, "sock %p", sk);
2735
2736 hci_dev_lock(hdev);
2737
2738 if (pending_eir_or_class(hdev)) {
2739 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2740 MGMT_STATUS_BUSY);
2741 goto unlock;
2742 }
2743
2744 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2745 hci_uuids_clear(hdev);
2746
2747 if (enable_service_cache(hdev)) {
2748 err = mgmt_cmd_complete(sk, hdev->id,
2749 MGMT_OP_REMOVE_UUID,
2750 0, hdev->dev_class, 3);
2751 goto unlock;
2752 }
2753
2754 goto update_class;
2755 }
2756
2757 found = 0;
2758
2759 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2760 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2761 continue;
2762
2763 list_del(&match->list);
2764 kfree(match);
2765 found++;
2766 }
2767
2768 if (found == 0) {
2769 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2770 MGMT_STATUS_INVALID_PARAMS);
2771 goto unlock;
2772 }
2773
2774 update_class:
2775 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2776 if (!cmd) {
2777 err = -ENOMEM;
2778 goto unlock;
2779 }
2780
2781 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2782 mgmt_class_complete);
2783 if (err < 0)
2784 mgmt_pending_free(cmd);
2785
2786 unlock:
2787 hci_dev_unlock(hdev);
2788 return err;
2789 }
2790
set_class_sync(struct hci_dev * hdev,void * data)2791 static int set_class_sync(struct hci_dev *hdev, void *data)
2792 {
2793 int err = 0;
2794
2795 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2796 cancel_delayed_work_sync(&hdev->service_cache);
2797 err = hci_update_eir_sync(hdev);
2798 }
2799
2800 if (err)
2801 return err;
2802
2803 return hci_update_class_sync(hdev);
2804 }
2805
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2806 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2807 u16 len)
2808 {
2809 struct mgmt_cp_set_dev_class *cp = data;
2810 struct mgmt_pending_cmd *cmd;
2811 int err;
2812
2813 bt_dev_dbg(hdev, "sock %p", sk);
2814
2815 if (!lmp_bredr_capable(hdev))
2816 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2817 MGMT_STATUS_NOT_SUPPORTED);
2818
2819 hci_dev_lock(hdev);
2820
2821 if (pending_eir_or_class(hdev)) {
2822 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2823 MGMT_STATUS_BUSY);
2824 goto unlock;
2825 }
2826
2827 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2828 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2829 MGMT_STATUS_INVALID_PARAMS);
2830 goto unlock;
2831 }
2832
2833 hdev->major_class = cp->major;
2834 hdev->minor_class = cp->minor;
2835
2836 if (!hdev_is_powered(hdev)) {
2837 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2838 hdev->dev_class, 3);
2839 goto unlock;
2840 }
2841
2842 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2843 if (!cmd) {
2844 err = -ENOMEM;
2845 goto unlock;
2846 }
2847
2848 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2849 mgmt_class_complete);
2850 if (err < 0)
2851 mgmt_pending_free(cmd);
2852
2853 unlock:
2854 hci_dev_unlock(hdev);
2855 return err;
2856 }
2857
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2858 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2859 u16 len)
2860 {
2861 struct mgmt_cp_load_link_keys *cp = data;
2862 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2863 sizeof(struct mgmt_link_key_info));
2864 u16 key_count, expected_len;
2865 bool changed;
2866 int i;
2867
2868 bt_dev_dbg(hdev, "sock %p", sk);
2869
2870 if (!lmp_bredr_capable(hdev))
2871 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2872 MGMT_STATUS_NOT_SUPPORTED);
2873
2874 key_count = __le16_to_cpu(cp->key_count);
2875 if (key_count > max_key_count) {
2876 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2877 key_count);
2878 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2879 MGMT_STATUS_INVALID_PARAMS);
2880 }
2881
2882 expected_len = struct_size(cp, keys, key_count);
2883 if (expected_len != len) {
2884 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2885 expected_len, len);
2886 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2887 MGMT_STATUS_INVALID_PARAMS);
2888 }
2889
2890 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2891 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2892 MGMT_STATUS_INVALID_PARAMS);
2893
2894 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2895 key_count);
2896
2897 for (i = 0; i < key_count; i++) {
2898 struct mgmt_link_key_info *key = &cp->keys[i];
2899
2900 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2901 if (key->type > 0x08)
2902 return mgmt_cmd_status(sk, hdev->id,
2903 MGMT_OP_LOAD_LINK_KEYS,
2904 MGMT_STATUS_INVALID_PARAMS);
2905 }
2906
2907 hci_dev_lock(hdev);
2908
2909 hci_link_keys_clear(hdev);
2910
2911 if (cp->debug_keys)
2912 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2913 else
2914 changed = hci_dev_test_and_clear_flag(hdev,
2915 HCI_KEEP_DEBUG_KEYS);
2916
2917 if (changed)
2918 new_settings(hdev, NULL);
2919
2920 for (i = 0; i < key_count; i++) {
2921 struct mgmt_link_key_info *key = &cp->keys[i];
2922
2923 if (hci_is_blocked_key(hdev,
2924 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2925 key->val)) {
2926 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2927 &key->addr.bdaddr);
2928 continue;
2929 }
2930
2931 /* Always ignore debug keys and require a new pairing if
2932 * the user wants to use them.
2933 */
2934 if (key->type == HCI_LK_DEBUG_COMBINATION)
2935 continue;
2936
2937 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2938 key->type, key->pin_len, NULL);
2939 }
2940
2941 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2942
2943 hci_dev_unlock(hdev);
2944
2945 return 0;
2946 }
2947
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2948 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2949 u8 addr_type, struct sock *skip_sk)
2950 {
2951 struct mgmt_ev_device_unpaired ev;
2952
2953 bacpy(&ev.addr.bdaddr, bdaddr);
2954 ev.addr.type = addr_type;
2955
2956 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2957 skip_sk);
2958 }
2959
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2960 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2961 {
2962 struct mgmt_pending_cmd *cmd = data;
2963 struct mgmt_cp_unpair_device *cp = cmd->param;
2964
2965 if (!err)
2966 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2967
2968 cmd->cmd_complete(cmd, err);
2969 mgmt_pending_free(cmd);
2970 }
2971
unpair_device_sync(struct hci_dev * hdev,void * data)2972 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2973 {
2974 struct mgmt_pending_cmd *cmd = data;
2975 struct mgmt_cp_unpair_device *cp = cmd->param;
2976 struct hci_conn *conn;
2977
2978 if (cp->addr.type == BDADDR_BREDR)
2979 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2980 &cp->addr.bdaddr);
2981 else
2982 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2983 le_addr_type(cp->addr.type));
2984
2985 if (!conn)
2986 return 0;
2987
2988 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2989 }
2990
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2991 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2992 u16 len)
2993 {
2994 struct mgmt_cp_unpair_device *cp = data;
2995 struct mgmt_rp_unpair_device rp;
2996 struct hci_conn_params *params;
2997 struct mgmt_pending_cmd *cmd;
2998 struct hci_conn *conn;
2999 u8 addr_type;
3000 int err;
3001
3002 memset(&rp, 0, sizeof(rp));
3003 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3004 rp.addr.type = cp->addr.type;
3005
3006 if (!bdaddr_type_is_valid(cp->addr.type))
3007 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3008 MGMT_STATUS_INVALID_PARAMS,
3009 &rp, sizeof(rp));
3010
3011 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3012 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3013 MGMT_STATUS_INVALID_PARAMS,
3014 &rp, sizeof(rp));
3015
3016 hci_dev_lock(hdev);
3017
3018 if (!hdev_is_powered(hdev)) {
3019 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3020 MGMT_STATUS_NOT_POWERED, &rp,
3021 sizeof(rp));
3022 goto unlock;
3023 }
3024
3025 if (cp->addr.type == BDADDR_BREDR) {
3026 /* If disconnection is requested, then look up the
3027 * connection. If the remote device is connected, it
3028 * will be later used to terminate the link.
3029 *
3030 * Setting it to NULL explicitly will cause no
3031 * termination of the link.
3032 */
3033 if (cp->disconnect)
3034 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3035 &cp->addr.bdaddr);
3036 else
3037 conn = NULL;
3038
3039 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3040 if (err < 0) {
3041 err = mgmt_cmd_complete(sk, hdev->id,
3042 MGMT_OP_UNPAIR_DEVICE,
3043 MGMT_STATUS_NOT_PAIRED, &rp,
3044 sizeof(rp));
3045 goto unlock;
3046 }
3047
3048 goto done;
3049 }
3050
3051 /* LE address type */
3052 addr_type = le_addr_type(cp->addr.type);
3053
3054 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3055 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3056 if (err < 0) {
3057 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3058 MGMT_STATUS_NOT_PAIRED, &rp,
3059 sizeof(rp));
3060 goto unlock;
3061 }
3062
3063 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3064 if (!conn) {
3065 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3066 goto done;
3067 }
3068
3069
3070 /* Defer clearing up the connection parameters until closing to
3071 * give a chance of keeping them if a repairing happens.
3072 */
3073 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3074
3075 /* Disable auto-connection parameters if present */
3076 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3077 if (params) {
3078 if (params->explicit_connect)
3079 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3080 else
3081 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3082 }
3083
3084 /* If disconnection is not requested, then clear the connection
3085 * variable so that the link is not terminated.
3086 */
3087 if (!cp->disconnect)
3088 conn = NULL;
3089
3090 done:
3091 /* If the connection variable is set, then termination of the
3092 * link is requested.
3093 */
3094 if (!conn) {
3095 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3096 &rp, sizeof(rp));
3097 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3098 goto unlock;
3099 }
3100
3101 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3102 sizeof(*cp));
3103 if (!cmd) {
3104 err = -ENOMEM;
3105 goto unlock;
3106 }
3107
3108 cmd->cmd_complete = addr_cmd_complete;
3109
3110 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3111 unpair_device_complete);
3112 if (err < 0)
3113 mgmt_pending_free(cmd);
3114
3115 unlock:
3116 hci_dev_unlock(hdev);
3117 return err;
3118 }
3119
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3120 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3121 u16 len)
3122 {
3123 struct mgmt_cp_disconnect *cp = data;
3124 struct mgmt_rp_disconnect rp;
3125 struct mgmt_pending_cmd *cmd;
3126 struct hci_conn *conn;
3127 int err;
3128
3129 bt_dev_dbg(hdev, "sock %p", sk);
3130
3131 memset(&rp, 0, sizeof(rp));
3132 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3133 rp.addr.type = cp->addr.type;
3134
3135 if (!bdaddr_type_is_valid(cp->addr.type))
3136 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3137 MGMT_STATUS_INVALID_PARAMS,
3138 &rp, sizeof(rp));
3139
3140 hci_dev_lock(hdev);
3141
3142 if (!test_bit(HCI_UP, &hdev->flags)) {
3143 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3144 MGMT_STATUS_NOT_POWERED, &rp,
3145 sizeof(rp));
3146 goto failed;
3147 }
3148
3149 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3150 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3151 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3152 goto failed;
3153 }
3154
3155 if (cp->addr.type == BDADDR_BREDR)
3156 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3157 &cp->addr.bdaddr);
3158 else
3159 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3160 le_addr_type(cp->addr.type));
3161
3162 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3163 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3164 MGMT_STATUS_NOT_CONNECTED, &rp,
3165 sizeof(rp));
3166 goto failed;
3167 }
3168
3169 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3170 if (!cmd) {
3171 err = -ENOMEM;
3172 goto failed;
3173 }
3174
3175 cmd->cmd_complete = generic_cmd_complete;
3176
3177 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3178 if (err < 0)
3179 mgmt_pending_remove(cmd);
3180
3181 failed:
3182 hci_dev_unlock(hdev);
3183 return err;
3184 }
3185
link_to_bdaddr(u8 link_type,u8 addr_type)3186 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3187 {
3188 switch (link_type) {
3189 case LE_LINK:
3190 switch (addr_type) {
3191 case ADDR_LE_DEV_PUBLIC:
3192 return BDADDR_LE_PUBLIC;
3193
3194 default:
3195 /* Fallback to LE Random address type */
3196 return BDADDR_LE_RANDOM;
3197 }
3198
3199 default:
3200 /* Fallback to BR/EDR type */
3201 return BDADDR_BREDR;
3202 }
3203 }
3204
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3205 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3206 u16 data_len)
3207 {
3208 struct mgmt_rp_get_connections *rp;
3209 struct hci_conn *c;
3210 int err;
3211 u16 i;
3212
3213 bt_dev_dbg(hdev, "sock %p", sk);
3214
3215 hci_dev_lock(hdev);
3216
3217 if (!hdev_is_powered(hdev)) {
3218 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3219 MGMT_STATUS_NOT_POWERED);
3220 goto unlock;
3221 }
3222
3223 i = 0;
3224 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3225 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3226 i++;
3227 }
3228
3229 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3230 if (!rp) {
3231 err = -ENOMEM;
3232 goto unlock;
3233 }
3234
3235 i = 0;
3236 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3237 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3238 continue;
3239 bacpy(&rp->addr[i].bdaddr, &c->dst);
3240 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3241 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3242 continue;
3243 i++;
3244 }
3245
3246 rp->conn_count = cpu_to_le16(i);
3247
3248 /* Recalculate length in case of filtered SCO connections, etc */
3249 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3250 struct_size(rp, addr, i));
3251
3252 kfree(rp);
3253
3254 unlock:
3255 hci_dev_unlock(hdev);
3256 return err;
3257 }
3258
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3259 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3260 struct mgmt_cp_pin_code_neg_reply *cp)
3261 {
3262 struct mgmt_pending_cmd *cmd;
3263 int err;
3264
3265 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3266 sizeof(*cp));
3267 if (!cmd)
3268 return -ENOMEM;
3269
3270 cmd->cmd_complete = addr_cmd_complete;
3271
3272 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3273 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3274 if (err < 0)
3275 mgmt_pending_remove(cmd);
3276
3277 return err;
3278 }
3279
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3280 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3281 u16 len)
3282 {
3283 struct hci_conn *conn;
3284 struct mgmt_cp_pin_code_reply *cp = data;
3285 struct hci_cp_pin_code_reply reply;
3286 struct mgmt_pending_cmd *cmd;
3287 int err;
3288
3289 bt_dev_dbg(hdev, "sock %p", sk);
3290
3291 hci_dev_lock(hdev);
3292
3293 if (!hdev_is_powered(hdev)) {
3294 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3295 MGMT_STATUS_NOT_POWERED);
3296 goto failed;
3297 }
3298
3299 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3300 if (!conn) {
3301 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3302 MGMT_STATUS_NOT_CONNECTED);
3303 goto failed;
3304 }
3305
3306 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3307 struct mgmt_cp_pin_code_neg_reply ncp;
3308
3309 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3310
3311 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3312
3313 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3314 if (err >= 0)
3315 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3316 MGMT_STATUS_INVALID_PARAMS);
3317
3318 goto failed;
3319 }
3320
3321 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3322 if (!cmd) {
3323 err = -ENOMEM;
3324 goto failed;
3325 }
3326
3327 cmd->cmd_complete = addr_cmd_complete;
3328
3329 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3330 reply.pin_len = cp->pin_len;
3331 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3332
3333 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3334 if (err < 0)
3335 mgmt_pending_remove(cmd);
3336
3337 failed:
3338 hci_dev_unlock(hdev);
3339 return err;
3340 }
3341
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3342 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3343 u16 len)
3344 {
3345 struct mgmt_cp_set_io_capability *cp = data;
3346
3347 bt_dev_dbg(hdev, "sock %p", sk);
3348
3349 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3350 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3351 MGMT_STATUS_INVALID_PARAMS);
3352
3353 hci_dev_lock(hdev);
3354
3355 hdev->io_capability = cp->io_capability;
3356
3357 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3358
3359 hci_dev_unlock(hdev);
3360
3361 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3362 NULL, 0);
3363 }
3364
find_pairing(struct hci_conn * conn)3365 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3366 {
3367 struct hci_dev *hdev = conn->hdev;
3368 struct mgmt_pending_cmd *cmd;
3369
3370 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3371 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3372 continue;
3373
3374 if (cmd->user_data != conn)
3375 continue;
3376
3377 return cmd;
3378 }
3379
3380 return NULL;
3381 }
3382
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3383 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3384 {
3385 struct mgmt_rp_pair_device rp;
3386 struct hci_conn *conn = cmd->user_data;
3387 int err;
3388
3389 bacpy(&rp.addr.bdaddr, &conn->dst);
3390 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3391
3392 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3393 status, &rp, sizeof(rp));
3394
3395 /* So we don't get further callbacks for this connection */
3396 conn->connect_cfm_cb = NULL;
3397 conn->security_cfm_cb = NULL;
3398 conn->disconn_cfm_cb = NULL;
3399
3400 hci_conn_drop(conn);
3401
3402 /* The device is paired so there is no need to remove
3403 * its connection parameters anymore.
3404 */
3405 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3406
3407 hci_conn_put(conn);
3408
3409 return err;
3410 }
3411
mgmt_smp_complete(struct hci_conn * conn,bool complete)3412 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3413 {
3414 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3415 struct mgmt_pending_cmd *cmd;
3416
3417 cmd = find_pairing(conn);
3418 if (cmd) {
3419 cmd->cmd_complete(cmd, status);
3420 mgmt_pending_remove(cmd);
3421 }
3422 }
3423
pairing_complete_cb(struct hci_conn * conn,u8 status)3424 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3425 {
3426 struct mgmt_pending_cmd *cmd;
3427
3428 BT_DBG("status %u", status);
3429
3430 cmd = find_pairing(conn);
3431 if (!cmd) {
3432 BT_DBG("Unable to find a pending command");
3433 return;
3434 }
3435
3436 cmd->cmd_complete(cmd, mgmt_status(status));
3437 mgmt_pending_remove(cmd);
3438 }
3439
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3440 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3441 {
3442 struct mgmt_pending_cmd *cmd;
3443
3444 BT_DBG("status %u", status);
3445
3446 if (!status)
3447 return;
3448
3449 cmd = find_pairing(conn);
3450 if (!cmd) {
3451 BT_DBG("Unable to find a pending command");
3452 return;
3453 }
3454
3455 cmd->cmd_complete(cmd, mgmt_status(status));
3456 mgmt_pending_remove(cmd);
3457 }
3458
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3459 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3460 u16 len)
3461 {
3462 struct mgmt_cp_pair_device *cp = data;
3463 struct mgmt_rp_pair_device rp;
3464 struct mgmt_pending_cmd *cmd;
3465 u8 sec_level, auth_type;
3466 struct hci_conn *conn;
3467 int err;
3468
3469 bt_dev_dbg(hdev, "sock %p", sk);
3470
3471 memset(&rp, 0, sizeof(rp));
3472 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3473 rp.addr.type = cp->addr.type;
3474
3475 if (!bdaddr_type_is_valid(cp->addr.type))
3476 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3477 MGMT_STATUS_INVALID_PARAMS,
3478 &rp, sizeof(rp));
3479
3480 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3481 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3482 MGMT_STATUS_INVALID_PARAMS,
3483 &rp, sizeof(rp));
3484
3485 hci_dev_lock(hdev);
3486
3487 if (!hdev_is_powered(hdev)) {
3488 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3489 MGMT_STATUS_NOT_POWERED, &rp,
3490 sizeof(rp));
3491 goto unlock;
3492 }
3493
3494 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3495 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3496 MGMT_STATUS_ALREADY_PAIRED, &rp,
3497 sizeof(rp));
3498 goto unlock;
3499 }
3500
3501 sec_level = BT_SECURITY_MEDIUM;
3502 auth_type = HCI_AT_DEDICATED_BONDING;
3503
3504 if (cp->addr.type == BDADDR_BREDR) {
3505 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3506 auth_type, CONN_REASON_PAIR_DEVICE);
3507 } else {
3508 u8 addr_type = le_addr_type(cp->addr.type);
3509 struct hci_conn_params *p;
3510
3511 /* When pairing a new device, it is expected to remember
3512 * this device for future connections. Adding the connection
3513 * parameter information ahead of time allows tracking
3514 * of the peripheral preferred values and will speed up any
3515 * further connection establishment.
3516 *
3517 * If connection parameters already exist, then they
3518 * will be kept and this function does nothing.
3519 */
3520 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3521
3522 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3523 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3524
3525 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3526 sec_level, HCI_LE_CONN_TIMEOUT,
3527 CONN_REASON_PAIR_DEVICE);
3528 }
3529
3530 if (IS_ERR(conn)) {
3531 int status;
3532
3533 if (PTR_ERR(conn) == -EBUSY)
3534 status = MGMT_STATUS_BUSY;
3535 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3536 status = MGMT_STATUS_NOT_SUPPORTED;
3537 else if (PTR_ERR(conn) == -ECONNREFUSED)
3538 status = MGMT_STATUS_REJECTED;
3539 else
3540 status = MGMT_STATUS_CONNECT_FAILED;
3541
3542 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3543 status, &rp, sizeof(rp));
3544 goto unlock;
3545 }
3546
3547 if (conn->connect_cfm_cb) {
3548 hci_conn_drop(conn);
3549 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3550 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3551 goto unlock;
3552 }
3553
3554 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3555 if (!cmd) {
3556 err = -ENOMEM;
3557 hci_conn_drop(conn);
3558 goto unlock;
3559 }
3560
3561 cmd->cmd_complete = pairing_complete;
3562
3563 /* For LE, just connecting isn't a proof that the pairing finished */
3564 if (cp->addr.type == BDADDR_BREDR) {
3565 conn->connect_cfm_cb = pairing_complete_cb;
3566 conn->security_cfm_cb = pairing_complete_cb;
3567 conn->disconn_cfm_cb = pairing_complete_cb;
3568 } else {
3569 conn->connect_cfm_cb = le_pairing_complete_cb;
3570 conn->security_cfm_cb = le_pairing_complete_cb;
3571 conn->disconn_cfm_cb = le_pairing_complete_cb;
3572 }
3573
3574 conn->io_capability = cp->io_cap;
3575 cmd->user_data = hci_conn_get(conn);
3576
3577 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3578 hci_conn_security(conn, sec_level, auth_type, true)) {
3579 cmd->cmd_complete(cmd, 0);
3580 mgmt_pending_remove(cmd);
3581 }
3582
3583 err = 0;
3584
3585 unlock:
3586 hci_dev_unlock(hdev);
3587 return err;
3588 }
3589
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3590 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3591 u16 len)
3592 {
3593 struct mgmt_addr_info *addr = data;
3594 struct mgmt_pending_cmd *cmd;
3595 struct hci_conn *conn;
3596 int err;
3597
3598 bt_dev_dbg(hdev, "sock %p", sk);
3599
3600 hci_dev_lock(hdev);
3601
3602 if (!hdev_is_powered(hdev)) {
3603 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3604 MGMT_STATUS_NOT_POWERED);
3605 goto unlock;
3606 }
3607
3608 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3609 if (!cmd) {
3610 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3611 MGMT_STATUS_INVALID_PARAMS);
3612 goto unlock;
3613 }
3614
3615 conn = cmd->user_data;
3616
3617 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3618 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3619 MGMT_STATUS_INVALID_PARAMS);
3620 goto unlock;
3621 }
3622
3623 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3624 mgmt_pending_remove(cmd);
3625
3626 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3627 addr, sizeof(*addr));
3628
3629 /* Since user doesn't want to proceed with the connection, abort any
3630 * ongoing pairing and then terminate the link if it was created
3631 * because of the pair device action.
3632 */
3633 if (addr->type == BDADDR_BREDR)
3634 hci_remove_link_key(hdev, &addr->bdaddr);
3635 else
3636 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3637 le_addr_type(addr->type));
3638
3639 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3640 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3641
3642 unlock:
3643 hci_dev_unlock(hdev);
3644 return err;
3645 }
3646
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3647 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3648 struct mgmt_addr_info *addr, u16 mgmt_op,
3649 u16 hci_op, __le32 passkey)
3650 {
3651 struct mgmt_pending_cmd *cmd;
3652 struct hci_conn *conn;
3653 int err;
3654
3655 hci_dev_lock(hdev);
3656
3657 if (!hdev_is_powered(hdev)) {
3658 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3659 MGMT_STATUS_NOT_POWERED, addr,
3660 sizeof(*addr));
3661 goto done;
3662 }
3663
3664 if (addr->type == BDADDR_BREDR)
3665 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3666 else
3667 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3668 le_addr_type(addr->type));
3669
3670 if (!conn) {
3671 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3672 MGMT_STATUS_NOT_CONNECTED, addr,
3673 sizeof(*addr));
3674 goto done;
3675 }
3676
3677 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3678 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3679 if (!err)
3680 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3681 MGMT_STATUS_SUCCESS, addr,
3682 sizeof(*addr));
3683 else
3684 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3685 MGMT_STATUS_FAILED, addr,
3686 sizeof(*addr));
3687
3688 goto done;
3689 }
3690
3691 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3692 if (!cmd) {
3693 err = -ENOMEM;
3694 goto done;
3695 }
3696
3697 cmd->cmd_complete = addr_cmd_complete;
3698
3699 /* Continue with pairing via HCI */
3700 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3701 struct hci_cp_user_passkey_reply cp;
3702
3703 bacpy(&cp.bdaddr, &addr->bdaddr);
3704 cp.passkey = passkey;
3705 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3706 } else
3707 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3708 &addr->bdaddr);
3709
3710 if (err < 0)
3711 mgmt_pending_remove(cmd);
3712
3713 done:
3714 hci_dev_unlock(hdev);
3715 return err;
3716 }
3717
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3718 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3719 void *data, u16 len)
3720 {
3721 struct mgmt_cp_pin_code_neg_reply *cp = data;
3722
3723 bt_dev_dbg(hdev, "sock %p", sk);
3724
3725 return user_pairing_resp(sk, hdev, &cp->addr,
3726 MGMT_OP_PIN_CODE_NEG_REPLY,
3727 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3728 }
3729
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3730 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3731 u16 len)
3732 {
3733 struct mgmt_cp_user_confirm_reply *cp = data;
3734
3735 bt_dev_dbg(hdev, "sock %p", sk);
3736
3737 if (len != sizeof(*cp))
3738 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3739 MGMT_STATUS_INVALID_PARAMS);
3740
3741 return user_pairing_resp(sk, hdev, &cp->addr,
3742 MGMT_OP_USER_CONFIRM_REPLY,
3743 HCI_OP_USER_CONFIRM_REPLY, 0);
3744 }
3745
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3746 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3747 void *data, u16 len)
3748 {
3749 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3750
3751 bt_dev_dbg(hdev, "sock %p", sk);
3752
3753 return user_pairing_resp(sk, hdev, &cp->addr,
3754 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3755 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3756 }
3757
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3758 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3759 u16 len)
3760 {
3761 struct mgmt_cp_user_passkey_reply *cp = data;
3762
3763 bt_dev_dbg(hdev, "sock %p", sk);
3764
3765 return user_pairing_resp(sk, hdev, &cp->addr,
3766 MGMT_OP_USER_PASSKEY_REPLY,
3767 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3768 }
3769
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3770 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3771 void *data, u16 len)
3772 {
3773 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3774
3775 bt_dev_dbg(hdev, "sock %p", sk);
3776
3777 return user_pairing_resp(sk, hdev, &cp->addr,
3778 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3779 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3780 }
3781
adv_expire_sync(struct hci_dev * hdev,u32 flags)3782 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3783 {
3784 struct adv_info *adv_instance;
3785
3786 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3787 if (!adv_instance)
3788 return 0;
3789
3790 /* stop if current instance doesn't need to be changed */
3791 if (!(adv_instance->flags & flags))
3792 return 0;
3793
3794 cancel_adv_timeout(hdev);
3795
3796 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3797 if (!adv_instance)
3798 return 0;
3799
3800 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3801
3802 return 0;
3803 }
3804
name_changed_sync(struct hci_dev * hdev,void * data)3805 static int name_changed_sync(struct hci_dev *hdev, void *data)
3806 {
3807 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3808 }
3809
set_name_complete(struct hci_dev * hdev,void * data,int err)3810 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3811 {
3812 struct mgmt_pending_cmd *cmd = data;
3813 struct mgmt_cp_set_local_name *cp = cmd->param;
3814 u8 status = mgmt_status(err);
3815
3816 bt_dev_dbg(hdev, "err %d", err);
3817
3818 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3819 return;
3820
3821 if (status) {
3822 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3823 status);
3824 } else {
3825 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3826 cp, sizeof(*cp));
3827
3828 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3829 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3830 }
3831
3832 mgmt_pending_remove(cmd);
3833 }
3834
set_name_sync(struct hci_dev * hdev,void * data)3835 static int set_name_sync(struct hci_dev *hdev, void *data)
3836 {
3837 if (lmp_bredr_capable(hdev)) {
3838 hci_update_name_sync(hdev);
3839 hci_update_eir_sync(hdev);
3840 }
3841
3842 /* The name is stored in the scan response data and so
3843 * no need to update the advertising data here.
3844 */
3845 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3846 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3847
3848 return 0;
3849 }
3850
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3851 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3852 u16 len)
3853 {
3854 struct mgmt_cp_set_local_name *cp = data;
3855 struct mgmt_pending_cmd *cmd;
3856 int err;
3857
3858 bt_dev_dbg(hdev, "sock %p", sk);
3859
3860 hci_dev_lock(hdev);
3861
3862 /* If the old values are the same as the new ones just return a
3863 * direct command complete event.
3864 */
3865 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3866 !memcmp(hdev->short_name, cp->short_name,
3867 sizeof(hdev->short_name))) {
3868 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3869 data, len);
3870 goto failed;
3871 }
3872
3873 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3874
3875 if (!hdev_is_powered(hdev)) {
3876 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3877
3878 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3879 data, len);
3880 if (err < 0)
3881 goto failed;
3882
3883 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3884 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3885 ext_info_changed(hdev, sk);
3886
3887 goto failed;
3888 }
3889
3890 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3891 if (!cmd)
3892 err = -ENOMEM;
3893 else
3894 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3895 set_name_complete);
3896
3897 if (err < 0) {
3898 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3899 MGMT_STATUS_FAILED);
3900
3901 if (cmd)
3902 mgmt_pending_remove(cmd);
3903
3904 goto failed;
3905 }
3906
3907 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3908
3909 failed:
3910 hci_dev_unlock(hdev);
3911 return err;
3912 }
3913
appearance_changed_sync(struct hci_dev * hdev,void * data)3914 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3915 {
3916 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3917 }
3918
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3919 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3920 u16 len)
3921 {
3922 struct mgmt_cp_set_appearance *cp = data;
3923 u16 appearance;
3924 int err;
3925
3926 bt_dev_dbg(hdev, "sock %p", sk);
3927
3928 if (!lmp_le_capable(hdev))
3929 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3930 MGMT_STATUS_NOT_SUPPORTED);
3931
3932 appearance = le16_to_cpu(cp->appearance);
3933
3934 hci_dev_lock(hdev);
3935
3936 if (hdev->appearance != appearance) {
3937 hdev->appearance = appearance;
3938
3939 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3940 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3941 NULL);
3942
3943 ext_info_changed(hdev, sk);
3944 }
3945
3946 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3947 0);
3948
3949 hci_dev_unlock(hdev);
3950
3951 return err;
3952 }
3953
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3954 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3955 void *data, u16 len)
3956 {
3957 struct mgmt_rp_get_phy_configuration rp;
3958
3959 bt_dev_dbg(hdev, "sock %p", sk);
3960
3961 hci_dev_lock(hdev);
3962
3963 memset(&rp, 0, sizeof(rp));
3964
3965 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3966 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3967 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3968
3969 hci_dev_unlock(hdev);
3970
3971 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3972 &rp, sizeof(rp));
3973 }
3974
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3975 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3976 {
3977 struct mgmt_ev_phy_configuration_changed ev;
3978
3979 memset(&ev, 0, sizeof(ev));
3980
3981 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3982
3983 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3984 sizeof(ev), skip);
3985 }
3986
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)3987 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3988 {
3989 struct mgmt_pending_cmd *cmd = data;
3990 struct sk_buff *skb = cmd->skb;
3991 u8 status = mgmt_status(err);
3992
3993 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3994 return;
3995
3996 if (!status) {
3997 if (!skb)
3998 status = MGMT_STATUS_FAILED;
3999 else if (IS_ERR(skb))
4000 status = mgmt_status(PTR_ERR(skb));
4001 else
4002 status = mgmt_status(skb->data[0]);
4003 }
4004
4005 bt_dev_dbg(hdev, "status %d", status);
4006
4007 if (status) {
4008 mgmt_cmd_status(cmd->sk, hdev->id,
4009 MGMT_OP_SET_PHY_CONFIGURATION, status);
4010 } else {
4011 mgmt_cmd_complete(cmd->sk, hdev->id,
4012 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4013 NULL, 0);
4014
4015 mgmt_phy_configuration_changed(hdev, cmd->sk);
4016 }
4017
4018 if (skb && !IS_ERR(skb))
4019 kfree_skb(skb);
4020
4021 mgmt_pending_remove(cmd);
4022 }
4023
set_default_phy_sync(struct hci_dev * hdev,void * data)4024 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4025 {
4026 struct mgmt_pending_cmd *cmd = data;
4027 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4028 struct hci_cp_le_set_default_phy cp_phy;
4029 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4030
4031 memset(&cp_phy, 0, sizeof(cp_phy));
4032
4033 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4034 cp_phy.all_phys |= 0x01;
4035
4036 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4037 cp_phy.all_phys |= 0x02;
4038
4039 if (selected_phys & MGMT_PHY_LE_1M_TX)
4040 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4041
4042 if (selected_phys & MGMT_PHY_LE_2M_TX)
4043 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4044
4045 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4046 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4047
4048 if (selected_phys & MGMT_PHY_LE_1M_RX)
4049 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4050
4051 if (selected_phys & MGMT_PHY_LE_2M_RX)
4052 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4053
4054 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4055 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4056
4057 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4058 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4059
4060 return 0;
4061 }
4062
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4063 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4064 void *data, u16 len)
4065 {
4066 struct mgmt_cp_set_phy_configuration *cp = data;
4067 struct mgmt_pending_cmd *cmd;
4068 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4069 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4070 bool changed = false;
4071 int err;
4072
4073 bt_dev_dbg(hdev, "sock %p", sk);
4074
4075 configurable_phys = get_configurable_phys(hdev);
4076 supported_phys = get_supported_phys(hdev);
4077 selected_phys = __le32_to_cpu(cp->selected_phys);
4078
4079 if (selected_phys & ~supported_phys)
4080 return mgmt_cmd_status(sk, hdev->id,
4081 MGMT_OP_SET_PHY_CONFIGURATION,
4082 MGMT_STATUS_INVALID_PARAMS);
4083
4084 unconfigure_phys = supported_phys & ~configurable_phys;
4085
4086 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4087 return mgmt_cmd_status(sk, hdev->id,
4088 MGMT_OP_SET_PHY_CONFIGURATION,
4089 MGMT_STATUS_INVALID_PARAMS);
4090
4091 if (selected_phys == get_selected_phys(hdev))
4092 return mgmt_cmd_complete(sk, hdev->id,
4093 MGMT_OP_SET_PHY_CONFIGURATION,
4094 0, NULL, 0);
4095
4096 hci_dev_lock(hdev);
4097
4098 if (!hdev_is_powered(hdev)) {
4099 err = mgmt_cmd_status(sk, hdev->id,
4100 MGMT_OP_SET_PHY_CONFIGURATION,
4101 MGMT_STATUS_REJECTED);
4102 goto unlock;
4103 }
4104
4105 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4106 err = mgmt_cmd_status(sk, hdev->id,
4107 MGMT_OP_SET_PHY_CONFIGURATION,
4108 MGMT_STATUS_BUSY);
4109 goto unlock;
4110 }
4111
4112 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4113 pkt_type |= (HCI_DH3 | HCI_DM3);
4114 else
4115 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4116
4117 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4118 pkt_type |= (HCI_DH5 | HCI_DM5);
4119 else
4120 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4121
4122 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4123 pkt_type &= ~HCI_2DH1;
4124 else
4125 pkt_type |= HCI_2DH1;
4126
4127 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4128 pkt_type &= ~HCI_2DH3;
4129 else
4130 pkt_type |= HCI_2DH3;
4131
4132 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4133 pkt_type &= ~HCI_2DH5;
4134 else
4135 pkt_type |= HCI_2DH5;
4136
4137 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4138 pkt_type &= ~HCI_3DH1;
4139 else
4140 pkt_type |= HCI_3DH1;
4141
4142 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4143 pkt_type &= ~HCI_3DH3;
4144 else
4145 pkt_type |= HCI_3DH3;
4146
4147 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4148 pkt_type &= ~HCI_3DH5;
4149 else
4150 pkt_type |= HCI_3DH5;
4151
4152 if (pkt_type != hdev->pkt_type) {
4153 hdev->pkt_type = pkt_type;
4154 changed = true;
4155 }
4156
4157 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4158 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4159 if (changed)
4160 mgmt_phy_configuration_changed(hdev, sk);
4161
4162 err = mgmt_cmd_complete(sk, hdev->id,
4163 MGMT_OP_SET_PHY_CONFIGURATION,
4164 0, NULL, 0);
4165
4166 goto unlock;
4167 }
4168
4169 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4170 len);
4171 if (!cmd)
4172 err = -ENOMEM;
4173 else
4174 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4175 set_default_phy_complete);
4176
4177 if (err < 0) {
4178 err = mgmt_cmd_status(sk, hdev->id,
4179 MGMT_OP_SET_PHY_CONFIGURATION,
4180 MGMT_STATUS_FAILED);
4181
4182 if (cmd)
4183 mgmt_pending_remove(cmd);
4184 }
4185
4186 unlock:
4187 hci_dev_unlock(hdev);
4188
4189 return err;
4190 }
4191
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4192 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4193 u16 len)
4194 {
4195 int err = MGMT_STATUS_SUCCESS;
4196 struct mgmt_cp_set_blocked_keys *keys = data;
4197 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4198 sizeof(struct mgmt_blocked_key_info));
4199 u16 key_count, expected_len;
4200 int i;
4201
4202 bt_dev_dbg(hdev, "sock %p", sk);
4203
4204 key_count = __le16_to_cpu(keys->key_count);
4205 if (key_count > max_key_count) {
4206 bt_dev_err(hdev, "too big key_count value %u", key_count);
4207 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4208 MGMT_STATUS_INVALID_PARAMS);
4209 }
4210
4211 expected_len = struct_size(keys, keys, key_count);
4212 if (expected_len != len) {
4213 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4214 expected_len, len);
4215 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4216 MGMT_STATUS_INVALID_PARAMS);
4217 }
4218
4219 hci_dev_lock(hdev);
4220
4221 hci_blocked_keys_clear(hdev);
4222
4223 for (i = 0; i < key_count; ++i) {
4224 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4225
4226 if (!b) {
4227 err = MGMT_STATUS_NO_RESOURCES;
4228 break;
4229 }
4230
4231 b->type = keys->keys[i].type;
4232 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4233 list_add_rcu(&b->list, &hdev->blocked_keys);
4234 }
4235 hci_dev_unlock(hdev);
4236
4237 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4238 err, NULL, 0);
4239 }
4240
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4241 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4242 void *data, u16 len)
4243 {
4244 struct mgmt_mode *cp = data;
4245 int err;
4246 bool changed = false;
4247
4248 bt_dev_dbg(hdev, "sock %p", sk);
4249
4250 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4251 return mgmt_cmd_status(sk, hdev->id,
4252 MGMT_OP_SET_WIDEBAND_SPEECH,
4253 MGMT_STATUS_NOT_SUPPORTED);
4254
4255 if (cp->val != 0x00 && cp->val != 0x01)
4256 return mgmt_cmd_status(sk, hdev->id,
4257 MGMT_OP_SET_WIDEBAND_SPEECH,
4258 MGMT_STATUS_INVALID_PARAMS);
4259
4260 hci_dev_lock(hdev);
4261
4262 if (hdev_is_powered(hdev) &&
4263 !!cp->val != hci_dev_test_flag(hdev,
4264 HCI_WIDEBAND_SPEECH_ENABLED)) {
4265 err = mgmt_cmd_status(sk, hdev->id,
4266 MGMT_OP_SET_WIDEBAND_SPEECH,
4267 MGMT_STATUS_REJECTED);
4268 goto unlock;
4269 }
4270
4271 if (cp->val)
4272 changed = !hci_dev_test_and_set_flag(hdev,
4273 HCI_WIDEBAND_SPEECH_ENABLED);
4274 else
4275 changed = hci_dev_test_and_clear_flag(hdev,
4276 HCI_WIDEBAND_SPEECH_ENABLED);
4277
4278 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4279 if (err < 0)
4280 goto unlock;
4281
4282 if (changed)
4283 err = new_settings(hdev, sk);
4284
4285 unlock:
4286 hci_dev_unlock(hdev);
4287 return err;
4288 }
4289
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4290 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4291 void *data, u16 data_len)
4292 {
4293 char buf[20];
4294 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4295 u16 cap_len = 0;
4296 u8 flags = 0;
4297 u8 tx_power_range[2];
4298
4299 bt_dev_dbg(hdev, "sock %p", sk);
4300
4301 memset(&buf, 0, sizeof(buf));
4302
4303 hci_dev_lock(hdev);
4304
4305 /* When the Read Simple Pairing Options command is supported, then
4306 * the remote public key validation is supported.
4307 *
4308 * Alternatively, when Microsoft extensions are available, they can
4309 * indicate support for public key validation as well.
4310 */
4311 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4312 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4313
4314 flags |= 0x02; /* Remote public key validation (LE) */
4315
4316 /* When the Read Encryption Key Size command is supported, then the
4317 * encryption key size is enforced.
4318 */
4319 if (hdev->commands[20] & 0x10)
4320 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4321
4322 flags |= 0x08; /* Encryption key size enforcement (LE) */
4323
4324 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4325 &flags, 1);
4326
4327 /* When the Read Simple Pairing Options command is supported, then
4328 * also max encryption key size information is provided.
4329 */
4330 if (hdev->commands[41] & 0x08)
4331 cap_len = eir_append_le16(rp->cap, cap_len,
4332 MGMT_CAP_MAX_ENC_KEY_SIZE,
4333 hdev->max_enc_key_size);
4334
4335 cap_len = eir_append_le16(rp->cap, cap_len,
4336 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4337 SMP_MAX_ENC_KEY_SIZE);
4338
4339 /* Append the min/max LE tx power parameters if we were able to fetch
4340 * it from the controller
4341 */
4342 if (hdev->commands[38] & 0x80) {
4343 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4344 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4345 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4346 tx_power_range, 2);
4347 }
4348
4349 rp->cap_len = cpu_to_le16(cap_len);
4350
4351 hci_dev_unlock(hdev);
4352
4353 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4354 rp, sizeof(*rp) + cap_len);
4355 }
4356
4357 #ifdef CONFIG_BT_FEATURE_DEBUG
4358 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4359 static const u8 debug_uuid[16] = {
4360 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4361 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4362 };
4363 #endif
4364
4365 /* 330859bc-7506-492d-9370-9a6f0614037f */
4366 static const u8 quality_report_uuid[16] = {
4367 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4368 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4369 };
4370
4371 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4372 static const u8 offload_codecs_uuid[16] = {
4373 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4374 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4375 };
4376
4377 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4378 static const u8 le_simultaneous_roles_uuid[16] = {
4379 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4380 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4381 };
4382
4383 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4384 static const u8 rpa_resolution_uuid[16] = {
4385 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4386 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4387 };
4388
4389 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4390 static const u8 iso_socket_uuid[16] = {
4391 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4392 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4393 };
4394
4395 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4396 static const u8 mgmt_mesh_uuid[16] = {
4397 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4398 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4399 };
4400
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4401 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4402 void *data, u16 data_len)
4403 {
4404 struct mgmt_rp_read_exp_features_info *rp;
4405 size_t len;
4406 u16 idx = 0;
4407 u32 flags;
4408 int status;
4409
4410 bt_dev_dbg(hdev, "sock %p", sk);
4411
4412 /* Enough space for 7 features */
4413 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4414 rp = kzalloc(len, GFP_KERNEL);
4415 if (!rp)
4416 return -ENOMEM;
4417
4418 #ifdef CONFIG_BT_FEATURE_DEBUG
4419 if (!hdev) {
4420 flags = bt_dbg_get() ? BIT(0) : 0;
4421
4422 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4423 rp->features[idx].flags = cpu_to_le32(flags);
4424 idx++;
4425 }
4426 #endif
4427
4428 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4429 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4430 flags = BIT(0);
4431 else
4432 flags = 0;
4433
4434 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4435 rp->features[idx].flags = cpu_to_le32(flags);
4436 idx++;
4437 }
4438
4439 if (hdev && ll_privacy_capable(hdev)) {
4440 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4441 flags = BIT(0) | BIT(1);
4442 else
4443 flags = BIT(1);
4444
4445 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4446 rp->features[idx].flags = cpu_to_le32(flags);
4447 idx++;
4448 }
4449
4450 if (hdev && (aosp_has_quality_report(hdev) ||
4451 hdev->set_quality_report)) {
4452 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4453 flags = BIT(0);
4454 else
4455 flags = 0;
4456
4457 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4458 rp->features[idx].flags = cpu_to_le32(flags);
4459 idx++;
4460 }
4461
4462 if (hdev && hdev->get_data_path_id) {
4463 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4464 flags = BIT(0);
4465 else
4466 flags = 0;
4467
4468 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4469 rp->features[idx].flags = cpu_to_le32(flags);
4470 idx++;
4471 }
4472
4473 if (IS_ENABLED(CONFIG_BT_LE)) {
4474 flags = iso_enabled() ? BIT(0) : 0;
4475 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4476 rp->features[idx].flags = cpu_to_le32(flags);
4477 idx++;
4478 }
4479
4480 if (hdev && lmp_le_capable(hdev)) {
4481 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4482 flags = BIT(0);
4483 else
4484 flags = 0;
4485
4486 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4487 rp->features[idx].flags = cpu_to_le32(flags);
4488 idx++;
4489 }
4490
4491 rp->feature_count = cpu_to_le16(idx);
4492
4493 /* After reading the experimental features information, enable
4494 * the events to update client on any future change.
4495 */
4496 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4497
4498 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4499 MGMT_OP_READ_EXP_FEATURES_INFO,
4500 0, rp, sizeof(*rp) + (20 * idx));
4501
4502 kfree(rp);
4503 return status;
4504 }
4505
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)4506 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4507 struct sock *skip)
4508 {
4509 struct mgmt_ev_exp_feature_changed ev;
4510
4511 memset(&ev, 0, sizeof(ev));
4512 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4513 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4514
4515 // Do we need to be atomic with the conn_flags?
4516 if (enabled && privacy_mode_capable(hdev))
4517 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4518 else
4519 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4520
4521 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4522 &ev, sizeof(ev),
4523 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4524
4525 }
4526
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4527 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4528 bool enabled, struct sock *skip)
4529 {
4530 struct mgmt_ev_exp_feature_changed ev;
4531
4532 memset(&ev, 0, sizeof(ev));
4533 memcpy(ev.uuid, uuid, 16);
4534 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4535
4536 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4537 &ev, sizeof(ev),
4538 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4539 }
4540
4541 #define EXP_FEAT(_uuid, _set_func) \
4542 { \
4543 .uuid = _uuid, \
4544 .set_func = _set_func, \
4545 }
4546
4547 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4548 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4549 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4550 {
4551 struct mgmt_rp_set_exp_feature rp;
4552
4553 memset(rp.uuid, 0, 16);
4554 rp.flags = cpu_to_le32(0);
4555
4556 #ifdef CONFIG_BT_FEATURE_DEBUG
4557 if (!hdev) {
4558 bool changed = bt_dbg_get();
4559
4560 bt_dbg_set(false);
4561
4562 if (changed)
4563 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4564 }
4565 #endif
4566
4567 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4568 bool changed;
4569
4570 changed = hci_dev_test_and_clear_flag(hdev,
4571 HCI_ENABLE_LL_PRIVACY);
4572 if (changed)
4573 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4574 sk);
4575 }
4576
4577 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4578
4579 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4580 MGMT_OP_SET_EXP_FEATURE, 0,
4581 &rp, sizeof(rp));
4582 }
4583
4584 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4585 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4586 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4587 {
4588 struct mgmt_rp_set_exp_feature rp;
4589
4590 bool val, changed;
4591 int err;
4592
4593 /* Command requires to use the non-controller index */
4594 if (hdev)
4595 return mgmt_cmd_status(sk, hdev->id,
4596 MGMT_OP_SET_EXP_FEATURE,
4597 MGMT_STATUS_INVALID_INDEX);
4598
4599 /* Parameters are limited to a single octet */
4600 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4601 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4602 MGMT_OP_SET_EXP_FEATURE,
4603 MGMT_STATUS_INVALID_PARAMS);
4604
4605 /* Only boolean on/off is supported */
4606 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4607 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4608 MGMT_OP_SET_EXP_FEATURE,
4609 MGMT_STATUS_INVALID_PARAMS);
4610
4611 val = !!cp->param[0];
4612 changed = val ? !bt_dbg_get() : bt_dbg_get();
4613 bt_dbg_set(val);
4614
4615 memcpy(rp.uuid, debug_uuid, 16);
4616 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4617
4618 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4619
4620 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4621 MGMT_OP_SET_EXP_FEATURE, 0,
4622 &rp, sizeof(rp));
4623
4624 if (changed)
4625 exp_feature_changed(hdev, debug_uuid, val, sk);
4626
4627 return err;
4628 }
4629 #endif
4630
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4631 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4632 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4633 {
4634 struct mgmt_rp_set_exp_feature rp;
4635 bool val, changed;
4636 int err;
4637
4638 /* Command requires to use the controller index */
4639 if (!hdev)
4640 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4641 MGMT_OP_SET_EXP_FEATURE,
4642 MGMT_STATUS_INVALID_INDEX);
4643
4644 /* Parameters are limited to a single octet */
4645 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4646 return mgmt_cmd_status(sk, hdev->id,
4647 MGMT_OP_SET_EXP_FEATURE,
4648 MGMT_STATUS_INVALID_PARAMS);
4649
4650 /* Only boolean on/off is supported */
4651 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4652 return mgmt_cmd_status(sk, hdev->id,
4653 MGMT_OP_SET_EXP_FEATURE,
4654 MGMT_STATUS_INVALID_PARAMS);
4655
4656 val = !!cp->param[0];
4657
4658 if (val) {
4659 changed = !hci_dev_test_and_set_flag(hdev,
4660 HCI_MESH_EXPERIMENTAL);
4661 } else {
4662 hci_dev_clear_flag(hdev, HCI_MESH);
4663 changed = hci_dev_test_and_clear_flag(hdev,
4664 HCI_MESH_EXPERIMENTAL);
4665 }
4666
4667 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4668 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4669
4670 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4671
4672 err = mgmt_cmd_complete(sk, hdev->id,
4673 MGMT_OP_SET_EXP_FEATURE, 0,
4674 &rp, sizeof(rp));
4675
4676 if (changed)
4677 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4678
4679 return err;
4680 }
4681
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4682 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4683 struct mgmt_cp_set_exp_feature *cp,
4684 u16 data_len)
4685 {
4686 struct mgmt_rp_set_exp_feature rp;
4687 bool val, changed;
4688 int err;
4689 u32 flags;
4690
4691 /* Command requires to use the controller index */
4692 if (!hdev)
4693 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4694 MGMT_OP_SET_EXP_FEATURE,
4695 MGMT_STATUS_INVALID_INDEX);
4696
4697 /* Changes can only be made when controller is powered down */
4698 if (hdev_is_powered(hdev))
4699 return mgmt_cmd_status(sk, hdev->id,
4700 MGMT_OP_SET_EXP_FEATURE,
4701 MGMT_STATUS_REJECTED);
4702
4703 /* Parameters are limited to a single octet */
4704 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4705 return mgmt_cmd_status(sk, hdev->id,
4706 MGMT_OP_SET_EXP_FEATURE,
4707 MGMT_STATUS_INVALID_PARAMS);
4708
4709 /* Only boolean on/off is supported */
4710 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4711 return mgmt_cmd_status(sk, hdev->id,
4712 MGMT_OP_SET_EXP_FEATURE,
4713 MGMT_STATUS_INVALID_PARAMS);
4714
4715 val = !!cp->param[0];
4716
4717 if (val) {
4718 changed = !hci_dev_test_and_set_flag(hdev,
4719 HCI_ENABLE_LL_PRIVACY);
4720 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4721
4722 /* Enable LL privacy + supported settings changed */
4723 flags = BIT(0) | BIT(1);
4724 } else {
4725 changed = hci_dev_test_and_clear_flag(hdev,
4726 HCI_ENABLE_LL_PRIVACY);
4727
4728 /* Disable LL privacy + supported settings changed */
4729 flags = BIT(1);
4730 }
4731
4732 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4733 rp.flags = cpu_to_le32(flags);
4734
4735 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4736
4737 err = mgmt_cmd_complete(sk, hdev->id,
4738 MGMT_OP_SET_EXP_FEATURE, 0,
4739 &rp, sizeof(rp));
4740
4741 if (changed)
4742 exp_ll_privacy_feature_changed(val, hdev, sk);
4743
4744 return err;
4745 }
4746
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4747 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4748 struct mgmt_cp_set_exp_feature *cp,
4749 u16 data_len)
4750 {
4751 struct mgmt_rp_set_exp_feature rp;
4752 bool val, changed;
4753 int err;
4754
4755 /* Command requires to use a valid controller index */
4756 if (!hdev)
4757 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4758 MGMT_OP_SET_EXP_FEATURE,
4759 MGMT_STATUS_INVALID_INDEX);
4760
4761 /* Parameters are limited to a single octet */
4762 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4763 return mgmt_cmd_status(sk, hdev->id,
4764 MGMT_OP_SET_EXP_FEATURE,
4765 MGMT_STATUS_INVALID_PARAMS);
4766
4767 /* Only boolean on/off is supported */
4768 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4769 return mgmt_cmd_status(sk, hdev->id,
4770 MGMT_OP_SET_EXP_FEATURE,
4771 MGMT_STATUS_INVALID_PARAMS);
4772
4773 hci_req_sync_lock(hdev);
4774
4775 val = !!cp->param[0];
4776 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4777
4778 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4779 err = mgmt_cmd_status(sk, hdev->id,
4780 MGMT_OP_SET_EXP_FEATURE,
4781 MGMT_STATUS_NOT_SUPPORTED);
4782 goto unlock_quality_report;
4783 }
4784
4785 if (changed) {
4786 if (hdev->set_quality_report)
4787 err = hdev->set_quality_report(hdev, val);
4788 else
4789 err = aosp_set_quality_report(hdev, val);
4790
4791 if (err) {
4792 err = mgmt_cmd_status(sk, hdev->id,
4793 MGMT_OP_SET_EXP_FEATURE,
4794 MGMT_STATUS_FAILED);
4795 goto unlock_quality_report;
4796 }
4797
4798 if (val)
4799 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4800 else
4801 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4802 }
4803
4804 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4805
4806 memcpy(rp.uuid, quality_report_uuid, 16);
4807 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4808 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4809
4810 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4811 &rp, sizeof(rp));
4812
4813 if (changed)
4814 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4815
4816 unlock_quality_report:
4817 hci_req_sync_unlock(hdev);
4818 return err;
4819 }
4820
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4821 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4822 struct mgmt_cp_set_exp_feature *cp,
4823 u16 data_len)
4824 {
4825 bool val, changed;
4826 int err;
4827 struct mgmt_rp_set_exp_feature rp;
4828
4829 /* Command requires to use a valid controller index */
4830 if (!hdev)
4831 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4832 MGMT_OP_SET_EXP_FEATURE,
4833 MGMT_STATUS_INVALID_INDEX);
4834
4835 /* Parameters are limited to a single octet */
4836 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4837 return mgmt_cmd_status(sk, hdev->id,
4838 MGMT_OP_SET_EXP_FEATURE,
4839 MGMT_STATUS_INVALID_PARAMS);
4840
4841 /* Only boolean on/off is supported */
4842 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4843 return mgmt_cmd_status(sk, hdev->id,
4844 MGMT_OP_SET_EXP_FEATURE,
4845 MGMT_STATUS_INVALID_PARAMS);
4846
4847 val = !!cp->param[0];
4848 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4849
4850 if (!hdev->get_data_path_id) {
4851 return mgmt_cmd_status(sk, hdev->id,
4852 MGMT_OP_SET_EXP_FEATURE,
4853 MGMT_STATUS_NOT_SUPPORTED);
4854 }
4855
4856 if (changed) {
4857 if (val)
4858 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4859 else
4860 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4861 }
4862
4863 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4864 val, changed);
4865
4866 memcpy(rp.uuid, offload_codecs_uuid, 16);
4867 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4868 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4869 err = mgmt_cmd_complete(sk, hdev->id,
4870 MGMT_OP_SET_EXP_FEATURE, 0,
4871 &rp, sizeof(rp));
4872
4873 if (changed)
4874 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4875
4876 return err;
4877 }
4878
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4879 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4880 struct mgmt_cp_set_exp_feature *cp,
4881 u16 data_len)
4882 {
4883 bool val, changed;
4884 int err;
4885 struct mgmt_rp_set_exp_feature rp;
4886
4887 /* Command requires to use a valid controller index */
4888 if (!hdev)
4889 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4890 MGMT_OP_SET_EXP_FEATURE,
4891 MGMT_STATUS_INVALID_INDEX);
4892
4893 /* Parameters are limited to a single octet */
4894 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4895 return mgmt_cmd_status(sk, hdev->id,
4896 MGMT_OP_SET_EXP_FEATURE,
4897 MGMT_STATUS_INVALID_PARAMS);
4898
4899 /* Only boolean on/off is supported */
4900 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4901 return mgmt_cmd_status(sk, hdev->id,
4902 MGMT_OP_SET_EXP_FEATURE,
4903 MGMT_STATUS_INVALID_PARAMS);
4904
4905 val = !!cp->param[0];
4906 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4907
4908 if (!hci_dev_le_state_simultaneous(hdev)) {
4909 return mgmt_cmd_status(sk, hdev->id,
4910 MGMT_OP_SET_EXP_FEATURE,
4911 MGMT_STATUS_NOT_SUPPORTED);
4912 }
4913
4914 if (changed) {
4915 if (val)
4916 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4917 else
4918 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4919 }
4920
4921 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4922 val, changed);
4923
4924 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4925 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4926 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4927 err = mgmt_cmd_complete(sk, hdev->id,
4928 MGMT_OP_SET_EXP_FEATURE, 0,
4929 &rp, sizeof(rp));
4930
4931 if (changed)
4932 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4933
4934 return err;
4935 }
4936
4937 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4938 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4939 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4940 {
4941 struct mgmt_rp_set_exp_feature rp;
4942 bool val, changed = false;
4943 int err;
4944
4945 /* Command requires to use the non-controller index */
4946 if (hdev)
4947 return mgmt_cmd_status(sk, hdev->id,
4948 MGMT_OP_SET_EXP_FEATURE,
4949 MGMT_STATUS_INVALID_INDEX);
4950
4951 /* Parameters are limited to a single octet */
4952 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4953 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4954 MGMT_OP_SET_EXP_FEATURE,
4955 MGMT_STATUS_INVALID_PARAMS);
4956
4957 /* Only boolean on/off is supported */
4958 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4959 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4960 MGMT_OP_SET_EXP_FEATURE,
4961 MGMT_STATUS_INVALID_PARAMS);
4962
4963 val = cp->param[0] ? true : false;
4964 if (val)
4965 err = iso_init();
4966 else
4967 err = iso_exit();
4968
4969 if (!err)
4970 changed = true;
4971
4972 memcpy(rp.uuid, iso_socket_uuid, 16);
4973 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4974
4975 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4976
4977 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4978 MGMT_OP_SET_EXP_FEATURE, 0,
4979 &rp, sizeof(rp));
4980
4981 if (changed)
4982 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4983
4984 return err;
4985 }
4986 #endif
4987
4988 static const struct mgmt_exp_feature {
4989 const u8 *uuid;
4990 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4991 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4992 } exp_features[] = {
4993 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4994 #ifdef CONFIG_BT_FEATURE_DEBUG
4995 EXP_FEAT(debug_uuid, set_debug_func),
4996 #endif
4997 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4998 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4999 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5000 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5001 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5002 #ifdef CONFIG_BT_LE
5003 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5004 #endif
5005
5006 /* end with a null feature */
5007 EXP_FEAT(NULL, NULL)
5008 };
5009
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5010 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5011 void *data, u16 data_len)
5012 {
5013 struct mgmt_cp_set_exp_feature *cp = data;
5014 size_t i = 0;
5015
5016 bt_dev_dbg(hdev, "sock %p", sk);
5017
5018 for (i = 0; exp_features[i].uuid; i++) {
5019 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5020 return exp_features[i].set_func(sk, hdev, cp, data_len);
5021 }
5022
5023 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5024 MGMT_OP_SET_EXP_FEATURE,
5025 MGMT_STATUS_NOT_SUPPORTED);
5026 }
5027
get_params_flags(struct hci_dev * hdev,struct hci_conn_params * params)5028 static u32 get_params_flags(struct hci_dev *hdev,
5029 struct hci_conn_params *params)
5030 {
5031 u32 flags = hdev->conn_flags;
5032
5033 /* Devices using RPAs can only be programmed in the acceptlist if
5034 * LL Privacy has been enable otherwise they cannot mark
5035 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5036 */
5037 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5038 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5039 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5040
5041 return flags;
5042 }
5043
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5044 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5045 u16 data_len)
5046 {
5047 struct mgmt_cp_get_device_flags *cp = data;
5048 struct mgmt_rp_get_device_flags rp;
5049 struct bdaddr_list_with_flags *br_params;
5050 struct hci_conn_params *params;
5051 u32 supported_flags;
5052 u32 current_flags = 0;
5053 u8 status = MGMT_STATUS_INVALID_PARAMS;
5054
5055 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5056 &cp->addr.bdaddr, cp->addr.type);
5057
5058 hci_dev_lock(hdev);
5059
5060 supported_flags = hdev->conn_flags;
5061
5062 memset(&rp, 0, sizeof(rp));
5063
5064 if (cp->addr.type == BDADDR_BREDR) {
5065 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5066 &cp->addr.bdaddr,
5067 cp->addr.type);
5068 if (!br_params)
5069 goto done;
5070
5071 current_flags = br_params->flags;
5072 } else {
5073 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5074 le_addr_type(cp->addr.type));
5075 if (!params)
5076 goto done;
5077
5078 supported_flags = get_params_flags(hdev, params);
5079 current_flags = params->flags;
5080 }
5081
5082 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5083 rp.addr.type = cp->addr.type;
5084 rp.supported_flags = cpu_to_le32(supported_flags);
5085 rp.current_flags = cpu_to_le32(current_flags);
5086
5087 status = MGMT_STATUS_SUCCESS;
5088
5089 done:
5090 hci_dev_unlock(hdev);
5091
5092 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5093 &rp, sizeof(rp));
5094 }
5095
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5096 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5097 bdaddr_t *bdaddr, u8 bdaddr_type,
5098 u32 supported_flags, u32 current_flags)
5099 {
5100 struct mgmt_ev_device_flags_changed ev;
5101
5102 bacpy(&ev.addr.bdaddr, bdaddr);
5103 ev.addr.type = bdaddr_type;
5104 ev.supported_flags = cpu_to_le32(supported_flags);
5105 ev.current_flags = cpu_to_le32(current_flags);
5106
5107 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5108 }
5109
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5110 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5111 u16 len)
5112 {
5113 struct mgmt_cp_set_device_flags *cp = data;
5114 struct bdaddr_list_with_flags *br_params;
5115 struct hci_conn_params *params;
5116 u8 status = MGMT_STATUS_INVALID_PARAMS;
5117 u32 supported_flags;
5118 u32 current_flags = __le32_to_cpu(cp->current_flags);
5119
5120 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5121 &cp->addr.bdaddr, cp->addr.type, current_flags);
5122
5123 // We should take hci_dev_lock() early, I think.. conn_flags can change
5124 supported_flags = hdev->conn_flags;
5125
5126 if ((supported_flags | current_flags) != supported_flags) {
5127 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5128 current_flags, supported_flags);
5129 goto done;
5130 }
5131
5132 hci_dev_lock(hdev);
5133
5134 if (cp->addr.type == BDADDR_BREDR) {
5135 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5136 &cp->addr.bdaddr,
5137 cp->addr.type);
5138
5139 if (br_params) {
5140 br_params->flags = current_flags;
5141 status = MGMT_STATUS_SUCCESS;
5142 } else {
5143 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5144 &cp->addr.bdaddr, cp->addr.type);
5145 }
5146
5147 goto unlock;
5148 }
5149
5150 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5151 le_addr_type(cp->addr.type));
5152 if (!params) {
5153 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5154 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5155 goto unlock;
5156 }
5157
5158 supported_flags = get_params_flags(hdev, params);
5159
5160 if ((supported_flags | current_flags) != supported_flags) {
5161 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5162 current_flags, supported_flags);
5163 goto unlock;
5164 }
5165
5166 WRITE_ONCE(params->flags, current_flags);
5167 status = MGMT_STATUS_SUCCESS;
5168
5169 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5170 * has been set.
5171 */
5172 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5173 hci_update_passive_scan(hdev);
5174
5175 unlock:
5176 hci_dev_unlock(hdev);
5177
5178 done:
5179 if (status == MGMT_STATUS_SUCCESS)
5180 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5181 supported_flags, current_flags);
5182
5183 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5184 &cp->addr, sizeof(cp->addr));
5185 }
5186
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5187 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5188 u16 handle)
5189 {
5190 struct mgmt_ev_adv_monitor_added ev;
5191
5192 ev.monitor_handle = cpu_to_le16(handle);
5193
5194 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5195 }
5196
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)5197 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5198 {
5199 struct mgmt_ev_adv_monitor_removed ev;
5200 struct mgmt_pending_cmd *cmd;
5201 struct sock *sk_skip = NULL;
5202 struct mgmt_cp_remove_adv_monitor *cp;
5203
5204 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5205 if (cmd) {
5206 cp = cmd->param;
5207
5208 if (cp->monitor_handle)
5209 sk_skip = cmd->sk;
5210 }
5211
5212 ev.monitor_handle = cpu_to_le16(handle);
5213
5214 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5215 }
5216
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5217 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5218 void *data, u16 len)
5219 {
5220 struct adv_monitor *monitor = NULL;
5221 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5222 int handle, err;
5223 size_t rp_size = 0;
5224 __u32 supported = 0;
5225 __u32 enabled = 0;
5226 __u16 num_handles = 0;
5227 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5228
5229 BT_DBG("request for %s", hdev->name);
5230
5231 hci_dev_lock(hdev);
5232
5233 if (msft_monitor_supported(hdev))
5234 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5235
5236 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5237 handles[num_handles++] = monitor->handle;
5238
5239 hci_dev_unlock(hdev);
5240
5241 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5242 rp = kmalloc(rp_size, GFP_KERNEL);
5243 if (!rp)
5244 return -ENOMEM;
5245
5246 /* All supported features are currently enabled */
5247 enabled = supported;
5248
5249 rp->supported_features = cpu_to_le32(supported);
5250 rp->enabled_features = cpu_to_le32(enabled);
5251 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5252 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5253 rp->num_handles = cpu_to_le16(num_handles);
5254 if (num_handles)
5255 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5256
5257 err = mgmt_cmd_complete(sk, hdev->id,
5258 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5259 MGMT_STATUS_SUCCESS, rp, rp_size);
5260
5261 kfree(rp);
5262
5263 return err;
5264 }
5265
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5266 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5267 void *data, int status)
5268 {
5269 struct mgmt_rp_add_adv_patterns_monitor rp;
5270 struct mgmt_pending_cmd *cmd = data;
5271 struct adv_monitor *monitor = cmd->user_data;
5272
5273 hci_dev_lock(hdev);
5274
5275 rp.monitor_handle = cpu_to_le16(monitor->handle);
5276
5277 if (!status) {
5278 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5279 hdev->adv_monitors_cnt++;
5280 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5281 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5282 hci_update_passive_scan(hdev);
5283 }
5284
5285 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5286 mgmt_status(status), &rp, sizeof(rp));
5287 mgmt_pending_remove(cmd);
5288
5289 hci_dev_unlock(hdev);
5290 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5291 rp.monitor_handle, status);
5292 }
5293
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5294 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5295 {
5296 struct mgmt_pending_cmd *cmd = data;
5297 struct adv_monitor *monitor = cmd->user_data;
5298
5299 return hci_add_adv_monitor(hdev, monitor);
5300 }
5301
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5302 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5303 struct adv_monitor *m, u8 status,
5304 void *data, u16 len, u16 op)
5305 {
5306 struct mgmt_pending_cmd *cmd;
5307 int err;
5308
5309 hci_dev_lock(hdev);
5310
5311 if (status)
5312 goto unlock;
5313
5314 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5315 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5316 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5317 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5318 status = MGMT_STATUS_BUSY;
5319 goto unlock;
5320 }
5321
5322 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5323 if (!cmd) {
5324 status = MGMT_STATUS_NO_RESOURCES;
5325 goto unlock;
5326 }
5327
5328 cmd->user_data = m;
5329 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5330 mgmt_add_adv_patterns_monitor_complete);
5331 if (err) {
5332 if (err == -ENOMEM)
5333 status = MGMT_STATUS_NO_RESOURCES;
5334 else
5335 status = MGMT_STATUS_FAILED;
5336
5337 goto unlock;
5338 }
5339
5340 hci_dev_unlock(hdev);
5341
5342 return 0;
5343
5344 unlock:
5345 hci_free_adv_monitor(hdev, m);
5346 hci_dev_unlock(hdev);
5347 return mgmt_cmd_status(sk, hdev->id, op, status);
5348 }
5349
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5350 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5351 struct mgmt_adv_rssi_thresholds *rssi)
5352 {
5353 if (rssi) {
5354 m->rssi.low_threshold = rssi->low_threshold;
5355 m->rssi.low_threshold_timeout =
5356 __le16_to_cpu(rssi->low_threshold_timeout);
5357 m->rssi.high_threshold = rssi->high_threshold;
5358 m->rssi.high_threshold_timeout =
5359 __le16_to_cpu(rssi->high_threshold_timeout);
5360 m->rssi.sampling_period = rssi->sampling_period;
5361 } else {
5362 /* Default values. These numbers are the least constricting
5363 * parameters for MSFT API to work, so it behaves as if there
5364 * are no rssi parameter to consider. May need to be changed
5365 * if other API are to be supported.
5366 */
5367 m->rssi.low_threshold = -127;
5368 m->rssi.low_threshold_timeout = 60;
5369 m->rssi.high_threshold = -127;
5370 m->rssi.high_threshold_timeout = 0;
5371 m->rssi.sampling_period = 0;
5372 }
5373 }
5374
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5375 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5376 struct mgmt_adv_pattern *patterns)
5377 {
5378 u8 offset = 0, length = 0;
5379 struct adv_pattern *p = NULL;
5380 int i;
5381
5382 for (i = 0; i < pattern_count; i++) {
5383 offset = patterns[i].offset;
5384 length = patterns[i].length;
5385 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5386 length > HCI_MAX_EXT_AD_LENGTH ||
5387 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5388 return MGMT_STATUS_INVALID_PARAMS;
5389
5390 p = kmalloc(sizeof(*p), GFP_KERNEL);
5391 if (!p)
5392 return MGMT_STATUS_NO_RESOURCES;
5393
5394 p->ad_type = patterns[i].ad_type;
5395 p->offset = patterns[i].offset;
5396 p->length = patterns[i].length;
5397 memcpy(p->value, patterns[i].value, p->length);
5398
5399 INIT_LIST_HEAD(&p->list);
5400 list_add(&p->list, &m->patterns);
5401 }
5402
5403 return MGMT_STATUS_SUCCESS;
5404 }
5405
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5406 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5407 void *data, u16 len)
5408 {
5409 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5410 struct adv_monitor *m = NULL;
5411 u8 status = MGMT_STATUS_SUCCESS;
5412 size_t expected_size = sizeof(*cp);
5413
5414 BT_DBG("request for %s", hdev->name);
5415
5416 if (len <= sizeof(*cp)) {
5417 status = MGMT_STATUS_INVALID_PARAMS;
5418 goto done;
5419 }
5420
5421 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5422 if (len != expected_size) {
5423 status = MGMT_STATUS_INVALID_PARAMS;
5424 goto done;
5425 }
5426
5427 m = kzalloc(sizeof(*m), GFP_KERNEL);
5428 if (!m) {
5429 status = MGMT_STATUS_NO_RESOURCES;
5430 goto done;
5431 }
5432
5433 INIT_LIST_HEAD(&m->patterns);
5434
5435 parse_adv_monitor_rssi(m, NULL);
5436 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5437
5438 done:
5439 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5440 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5441 }
5442
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5443 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5444 void *data, u16 len)
5445 {
5446 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5447 struct adv_monitor *m = NULL;
5448 u8 status = MGMT_STATUS_SUCCESS;
5449 size_t expected_size = sizeof(*cp);
5450
5451 BT_DBG("request for %s", hdev->name);
5452
5453 if (len <= sizeof(*cp)) {
5454 status = MGMT_STATUS_INVALID_PARAMS;
5455 goto done;
5456 }
5457
5458 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5459 if (len != expected_size) {
5460 status = MGMT_STATUS_INVALID_PARAMS;
5461 goto done;
5462 }
5463
5464 m = kzalloc(sizeof(*m), GFP_KERNEL);
5465 if (!m) {
5466 status = MGMT_STATUS_NO_RESOURCES;
5467 goto done;
5468 }
5469
5470 INIT_LIST_HEAD(&m->patterns);
5471
5472 parse_adv_monitor_rssi(m, &cp->rssi);
5473 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5474
5475 done:
5476 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5477 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5478 }
5479
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5480 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5481 void *data, int status)
5482 {
5483 struct mgmt_rp_remove_adv_monitor rp;
5484 struct mgmt_pending_cmd *cmd = data;
5485 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5486
5487 hci_dev_lock(hdev);
5488
5489 rp.monitor_handle = cp->monitor_handle;
5490
5491 if (!status)
5492 hci_update_passive_scan(hdev);
5493
5494 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5495 mgmt_status(status), &rp, sizeof(rp));
5496 mgmt_pending_remove(cmd);
5497
5498 hci_dev_unlock(hdev);
5499 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5500 rp.monitor_handle, status);
5501 }
5502
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5503 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5504 {
5505 struct mgmt_pending_cmd *cmd = data;
5506 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5507 u16 handle = __le16_to_cpu(cp->monitor_handle);
5508
5509 if (!handle)
5510 return hci_remove_all_adv_monitor(hdev);
5511
5512 return hci_remove_single_adv_monitor(hdev, handle);
5513 }
5514
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5515 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5516 void *data, u16 len)
5517 {
5518 struct mgmt_pending_cmd *cmd;
5519 int err, status;
5520
5521 hci_dev_lock(hdev);
5522
5523 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5524 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5525 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5526 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5527 status = MGMT_STATUS_BUSY;
5528 goto unlock;
5529 }
5530
5531 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5532 if (!cmd) {
5533 status = MGMT_STATUS_NO_RESOURCES;
5534 goto unlock;
5535 }
5536
5537 err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5538 mgmt_remove_adv_monitor_complete);
5539
5540 if (err) {
5541 mgmt_pending_remove(cmd);
5542
5543 if (err == -ENOMEM)
5544 status = MGMT_STATUS_NO_RESOURCES;
5545 else
5546 status = MGMT_STATUS_FAILED;
5547
5548 goto unlock;
5549 }
5550
5551 hci_dev_unlock(hdev);
5552
5553 return 0;
5554
5555 unlock:
5556 hci_dev_unlock(hdev);
5557 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5558 status);
5559 }
5560
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5561 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5562 {
5563 struct mgmt_rp_read_local_oob_data mgmt_rp;
5564 size_t rp_size = sizeof(mgmt_rp);
5565 struct mgmt_pending_cmd *cmd = data;
5566 struct sk_buff *skb = cmd->skb;
5567 u8 status = mgmt_status(err);
5568
5569 if (!status) {
5570 if (!skb)
5571 status = MGMT_STATUS_FAILED;
5572 else if (IS_ERR(skb))
5573 status = mgmt_status(PTR_ERR(skb));
5574 else
5575 status = mgmt_status(skb->data[0]);
5576 }
5577
5578 bt_dev_dbg(hdev, "status %d", status);
5579
5580 if (status) {
5581 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5582 goto remove;
5583 }
5584
5585 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5586
5587 if (!bredr_sc_enabled(hdev)) {
5588 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5589
5590 if (skb->len < sizeof(*rp)) {
5591 mgmt_cmd_status(cmd->sk, hdev->id,
5592 MGMT_OP_READ_LOCAL_OOB_DATA,
5593 MGMT_STATUS_FAILED);
5594 goto remove;
5595 }
5596
5597 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5598 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5599
5600 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5601 } else {
5602 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5603
5604 if (skb->len < sizeof(*rp)) {
5605 mgmt_cmd_status(cmd->sk, hdev->id,
5606 MGMT_OP_READ_LOCAL_OOB_DATA,
5607 MGMT_STATUS_FAILED);
5608 goto remove;
5609 }
5610
5611 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5612 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5613
5614 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5615 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5616 }
5617
5618 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5619 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5620
5621 remove:
5622 if (skb && !IS_ERR(skb))
5623 kfree_skb(skb);
5624
5625 mgmt_pending_free(cmd);
5626 }
5627
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5628 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5629 {
5630 struct mgmt_pending_cmd *cmd = data;
5631
5632 if (bredr_sc_enabled(hdev))
5633 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5634 else
5635 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5636
5637 if (IS_ERR(cmd->skb))
5638 return PTR_ERR(cmd->skb);
5639 else
5640 return 0;
5641 }
5642
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5643 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5644 void *data, u16 data_len)
5645 {
5646 struct mgmt_pending_cmd *cmd;
5647 int err;
5648
5649 bt_dev_dbg(hdev, "sock %p", sk);
5650
5651 hci_dev_lock(hdev);
5652
5653 if (!hdev_is_powered(hdev)) {
5654 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5655 MGMT_STATUS_NOT_POWERED);
5656 goto unlock;
5657 }
5658
5659 if (!lmp_ssp_capable(hdev)) {
5660 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5661 MGMT_STATUS_NOT_SUPPORTED);
5662 goto unlock;
5663 }
5664
5665 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5666 if (!cmd)
5667 err = -ENOMEM;
5668 else
5669 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5670 read_local_oob_data_complete);
5671
5672 if (err < 0) {
5673 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5674 MGMT_STATUS_FAILED);
5675
5676 if (cmd)
5677 mgmt_pending_free(cmd);
5678 }
5679
5680 unlock:
5681 hci_dev_unlock(hdev);
5682 return err;
5683 }
5684
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5685 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5686 void *data, u16 len)
5687 {
5688 struct mgmt_addr_info *addr = data;
5689 int err;
5690
5691 bt_dev_dbg(hdev, "sock %p", sk);
5692
5693 if (!bdaddr_type_is_valid(addr->type))
5694 return mgmt_cmd_complete(sk, hdev->id,
5695 MGMT_OP_ADD_REMOTE_OOB_DATA,
5696 MGMT_STATUS_INVALID_PARAMS,
5697 addr, sizeof(*addr));
5698
5699 hci_dev_lock(hdev);
5700
5701 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5702 struct mgmt_cp_add_remote_oob_data *cp = data;
5703 u8 status;
5704
5705 if (cp->addr.type != BDADDR_BREDR) {
5706 err = mgmt_cmd_complete(sk, hdev->id,
5707 MGMT_OP_ADD_REMOTE_OOB_DATA,
5708 MGMT_STATUS_INVALID_PARAMS,
5709 &cp->addr, sizeof(cp->addr));
5710 goto unlock;
5711 }
5712
5713 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5714 cp->addr.type, cp->hash,
5715 cp->rand, NULL, NULL);
5716 if (err < 0)
5717 status = MGMT_STATUS_FAILED;
5718 else
5719 status = MGMT_STATUS_SUCCESS;
5720
5721 err = mgmt_cmd_complete(sk, hdev->id,
5722 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5723 &cp->addr, sizeof(cp->addr));
5724 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5725 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5726 u8 *rand192, *hash192, *rand256, *hash256;
5727 u8 status;
5728
5729 if (bdaddr_type_is_le(cp->addr.type)) {
5730 /* Enforce zero-valued 192-bit parameters as
5731 * long as legacy SMP OOB isn't implemented.
5732 */
5733 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5734 memcmp(cp->hash192, ZERO_KEY, 16)) {
5735 err = mgmt_cmd_complete(sk, hdev->id,
5736 MGMT_OP_ADD_REMOTE_OOB_DATA,
5737 MGMT_STATUS_INVALID_PARAMS,
5738 addr, sizeof(*addr));
5739 goto unlock;
5740 }
5741
5742 rand192 = NULL;
5743 hash192 = NULL;
5744 } else {
5745 /* In case one of the P-192 values is set to zero,
5746 * then just disable OOB data for P-192.
5747 */
5748 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5749 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5750 rand192 = NULL;
5751 hash192 = NULL;
5752 } else {
5753 rand192 = cp->rand192;
5754 hash192 = cp->hash192;
5755 }
5756 }
5757
5758 /* In case one of the P-256 values is set to zero, then just
5759 * disable OOB data for P-256.
5760 */
5761 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5762 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5763 rand256 = NULL;
5764 hash256 = NULL;
5765 } else {
5766 rand256 = cp->rand256;
5767 hash256 = cp->hash256;
5768 }
5769
5770 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5771 cp->addr.type, hash192, rand192,
5772 hash256, rand256);
5773 if (err < 0)
5774 status = MGMT_STATUS_FAILED;
5775 else
5776 status = MGMT_STATUS_SUCCESS;
5777
5778 err = mgmt_cmd_complete(sk, hdev->id,
5779 MGMT_OP_ADD_REMOTE_OOB_DATA,
5780 status, &cp->addr, sizeof(cp->addr));
5781 } else {
5782 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5783 len);
5784 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5785 MGMT_STATUS_INVALID_PARAMS);
5786 }
5787
5788 unlock:
5789 hci_dev_unlock(hdev);
5790 return err;
5791 }
5792
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5793 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5794 void *data, u16 len)
5795 {
5796 struct mgmt_cp_remove_remote_oob_data *cp = data;
5797 u8 status;
5798 int err;
5799
5800 bt_dev_dbg(hdev, "sock %p", sk);
5801
5802 if (cp->addr.type != BDADDR_BREDR)
5803 return mgmt_cmd_complete(sk, hdev->id,
5804 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5805 MGMT_STATUS_INVALID_PARAMS,
5806 &cp->addr, sizeof(cp->addr));
5807
5808 hci_dev_lock(hdev);
5809
5810 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5811 hci_remote_oob_data_clear(hdev);
5812 status = MGMT_STATUS_SUCCESS;
5813 goto done;
5814 }
5815
5816 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5817 if (err < 0)
5818 status = MGMT_STATUS_INVALID_PARAMS;
5819 else
5820 status = MGMT_STATUS_SUCCESS;
5821
5822 done:
5823 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5824 status, &cp->addr, sizeof(cp->addr));
5825
5826 hci_dev_unlock(hdev);
5827 return err;
5828 }
5829
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5830 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5831 {
5832 struct mgmt_pending_cmd *cmd;
5833
5834 bt_dev_dbg(hdev, "status %u", status);
5835
5836 hci_dev_lock(hdev);
5837
5838 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5839 if (!cmd)
5840 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5841
5842 if (!cmd)
5843 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5844
5845 if (cmd) {
5846 cmd->cmd_complete(cmd, mgmt_status(status));
5847 mgmt_pending_remove(cmd);
5848 }
5849
5850 hci_dev_unlock(hdev);
5851 }
5852
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5853 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5854 uint8_t *mgmt_status)
5855 {
5856 switch (type) {
5857 case DISCOV_TYPE_LE:
5858 *mgmt_status = mgmt_le_support(hdev);
5859 if (*mgmt_status)
5860 return false;
5861 break;
5862 case DISCOV_TYPE_INTERLEAVED:
5863 *mgmt_status = mgmt_le_support(hdev);
5864 if (*mgmt_status)
5865 return false;
5866 fallthrough;
5867 case DISCOV_TYPE_BREDR:
5868 *mgmt_status = mgmt_bredr_support(hdev);
5869 if (*mgmt_status)
5870 return false;
5871 break;
5872 default:
5873 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5874 return false;
5875 }
5876
5877 return true;
5878 }
5879
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5880 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5881 {
5882 struct mgmt_pending_cmd *cmd = data;
5883
5884 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5885 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5886 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5887 return;
5888
5889 bt_dev_dbg(hdev, "err %d", err);
5890
5891 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5892 cmd->param, 1);
5893 mgmt_pending_remove(cmd);
5894
5895 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5896 DISCOVERY_FINDING);
5897 }
5898
start_discovery_sync(struct hci_dev * hdev,void * data)5899 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5900 {
5901 return hci_start_discovery_sync(hdev);
5902 }
5903
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5904 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5905 u16 op, void *data, u16 len)
5906 {
5907 struct mgmt_cp_start_discovery *cp = data;
5908 struct mgmt_pending_cmd *cmd;
5909 u8 status;
5910 int err;
5911
5912 bt_dev_dbg(hdev, "sock %p", sk);
5913
5914 hci_dev_lock(hdev);
5915
5916 if (!hdev_is_powered(hdev)) {
5917 err = mgmt_cmd_complete(sk, hdev->id, op,
5918 MGMT_STATUS_NOT_POWERED,
5919 &cp->type, sizeof(cp->type));
5920 goto failed;
5921 }
5922
5923 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5924 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5925 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5926 &cp->type, sizeof(cp->type));
5927 goto failed;
5928 }
5929
5930 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5931 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5932 &cp->type, sizeof(cp->type));
5933 goto failed;
5934 }
5935
5936 /* Can't start discovery when it is paused */
5937 if (hdev->discovery_paused) {
5938 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5939 &cp->type, sizeof(cp->type));
5940 goto failed;
5941 }
5942
5943 /* Clear the discovery filter first to free any previously
5944 * allocated memory for the UUID list.
5945 */
5946 hci_discovery_filter_clear(hdev);
5947
5948 hdev->discovery.type = cp->type;
5949 hdev->discovery.report_invalid_rssi = false;
5950 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5951 hdev->discovery.limited = true;
5952 else
5953 hdev->discovery.limited = false;
5954
5955 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5956 if (!cmd) {
5957 err = -ENOMEM;
5958 goto failed;
5959 }
5960
5961 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5962 start_discovery_complete);
5963 if (err < 0) {
5964 mgmt_pending_remove(cmd);
5965 goto failed;
5966 }
5967
5968 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5969
5970 failed:
5971 hci_dev_unlock(hdev);
5972 return err;
5973 }
5974
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5975 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5976 void *data, u16 len)
5977 {
5978 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5979 data, len);
5980 }
5981
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5982 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5983 void *data, u16 len)
5984 {
5985 return start_discovery_internal(sk, hdev,
5986 MGMT_OP_START_LIMITED_DISCOVERY,
5987 data, len);
5988 }
5989
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5990 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5991 void *data, u16 len)
5992 {
5993 struct mgmt_cp_start_service_discovery *cp = data;
5994 struct mgmt_pending_cmd *cmd;
5995 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5996 u16 uuid_count, expected_len;
5997 u8 status;
5998 int err;
5999
6000 bt_dev_dbg(hdev, "sock %p", sk);
6001
6002 hci_dev_lock(hdev);
6003
6004 if (!hdev_is_powered(hdev)) {
6005 err = mgmt_cmd_complete(sk, hdev->id,
6006 MGMT_OP_START_SERVICE_DISCOVERY,
6007 MGMT_STATUS_NOT_POWERED,
6008 &cp->type, sizeof(cp->type));
6009 goto failed;
6010 }
6011
6012 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6013 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6014 err = mgmt_cmd_complete(sk, hdev->id,
6015 MGMT_OP_START_SERVICE_DISCOVERY,
6016 MGMT_STATUS_BUSY, &cp->type,
6017 sizeof(cp->type));
6018 goto failed;
6019 }
6020
6021 if (hdev->discovery_paused) {
6022 err = mgmt_cmd_complete(sk, hdev->id,
6023 MGMT_OP_START_SERVICE_DISCOVERY,
6024 MGMT_STATUS_BUSY, &cp->type,
6025 sizeof(cp->type));
6026 goto failed;
6027 }
6028
6029 uuid_count = __le16_to_cpu(cp->uuid_count);
6030 if (uuid_count > max_uuid_count) {
6031 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6032 uuid_count);
6033 err = mgmt_cmd_complete(sk, hdev->id,
6034 MGMT_OP_START_SERVICE_DISCOVERY,
6035 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6036 sizeof(cp->type));
6037 goto failed;
6038 }
6039
6040 expected_len = sizeof(*cp) + uuid_count * 16;
6041 if (expected_len != len) {
6042 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6043 expected_len, len);
6044 err = mgmt_cmd_complete(sk, hdev->id,
6045 MGMT_OP_START_SERVICE_DISCOVERY,
6046 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6047 sizeof(cp->type));
6048 goto failed;
6049 }
6050
6051 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6052 err = mgmt_cmd_complete(sk, hdev->id,
6053 MGMT_OP_START_SERVICE_DISCOVERY,
6054 status, &cp->type, sizeof(cp->type));
6055 goto failed;
6056 }
6057
6058 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6059 hdev, data, len);
6060 if (!cmd) {
6061 err = -ENOMEM;
6062 goto failed;
6063 }
6064
6065 /* Clear the discovery filter first to free any previously
6066 * allocated memory for the UUID list.
6067 */
6068 hci_discovery_filter_clear(hdev);
6069
6070 hdev->discovery.result_filtering = true;
6071 hdev->discovery.type = cp->type;
6072 hdev->discovery.rssi = cp->rssi;
6073 hdev->discovery.uuid_count = uuid_count;
6074
6075 if (uuid_count > 0) {
6076 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6077 GFP_KERNEL);
6078 if (!hdev->discovery.uuids) {
6079 err = mgmt_cmd_complete(sk, hdev->id,
6080 MGMT_OP_START_SERVICE_DISCOVERY,
6081 MGMT_STATUS_FAILED,
6082 &cp->type, sizeof(cp->type));
6083 mgmt_pending_remove(cmd);
6084 goto failed;
6085 }
6086 }
6087
6088 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6089 start_discovery_complete);
6090 if (err < 0) {
6091 mgmt_pending_remove(cmd);
6092 goto failed;
6093 }
6094
6095 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6096
6097 failed:
6098 hci_dev_unlock(hdev);
6099 return err;
6100 }
6101
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)6102 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6103 {
6104 struct mgmt_pending_cmd *cmd;
6105
6106 bt_dev_dbg(hdev, "status %u", status);
6107
6108 hci_dev_lock(hdev);
6109
6110 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6111 if (cmd) {
6112 cmd->cmd_complete(cmd, mgmt_status(status));
6113 mgmt_pending_remove(cmd);
6114 }
6115
6116 hci_dev_unlock(hdev);
6117 }
6118
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6119 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6120 {
6121 struct mgmt_pending_cmd *cmd = data;
6122
6123 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6124 return;
6125
6126 bt_dev_dbg(hdev, "err %d", err);
6127
6128 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6129 cmd->param, 1);
6130 mgmt_pending_remove(cmd);
6131
6132 if (!err)
6133 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6134 }
6135
stop_discovery_sync(struct hci_dev * hdev,void * data)6136 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6137 {
6138 return hci_stop_discovery_sync(hdev);
6139 }
6140
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6141 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6142 u16 len)
6143 {
6144 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6145 struct mgmt_pending_cmd *cmd;
6146 int err;
6147
6148 bt_dev_dbg(hdev, "sock %p", sk);
6149
6150 hci_dev_lock(hdev);
6151
6152 if (!hci_discovery_active(hdev)) {
6153 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6154 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6155 sizeof(mgmt_cp->type));
6156 goto unlock;
6157 }
6158
6159 if (hdev->discovery.type != mgmt_cp->type) {
6160 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6161 MGMT_STATUS_INVALID_PARAMS,
6162 &mgmt_cp->type, sizeof(mgmt_cp->type));
6163 goto unlock;
6164 }
6165
6166 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6167 if (!cmd) {
6168 err = -ENOMEM;
6169 goto unlock;
6170 }
6171
6172 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6173 stop_discovery_complete);
6174 if (err < 0) {
6175 mgmt_pending_remove(cmd);
6176 goto unlock;
6177 }
6178
6179 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6180
6181 unlock:
6182 hci_dev_unlock(hdev);
6183 return err;
6184 }
6185
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6186 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6187 u16 len)
6188 {
6189 struct mgmt_cp_confirm_name *cp = data;
6190 struct inquiry_entry *e;
6191 int err;
6192
6193 bt_dev_dbg(hdev, "sock %p", sk);
6194
6195 hci_dev_lock(hdev);
6196
6197 if (!hci_discovery_active(hdev)) {
6198 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6199 MGMT_STATUS_FAILED, &cp->addr,
6200 sizeof(cp->addr));
6201 goto failed;
6202 }
6203
6204 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6205 if (!e) {
6206 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6207 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6208 sizeof(cp->addr));
6209 goto failed;
6210 }
6211
6212 if (cp->name_known) {
6213 e->name_state = NAME_KNOWN;
6214 list_del(&e->list);
6215 } else {
6216 e->name_state = NAME_NEEDED;
6217 hci_inquiry_cache_update_resolve(hdev, e);
6218 }
6219
6220 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6221 &cp->addr, sizeof(cp->addr));
6222
6223 failed:
6224 hci_dev_unlock(hdev);
6225 return err;
6226 }
6227
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6228 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6229 u16 len)
6230 {
6231 struct mgmt_cp_block_device *cp = data;
6232 u8 status;
6233 int err;
6234
6235 bt_dev_dbg(hdev, "sock %p", sk);
6236
6237 if (!bdaddr_type_is_valid(cp->addr.type))
6238 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6239 MGMT_STATUS_INVALID_PARAMS,
6240 &cp->addr, sizeof(cp->addr));
6241
6242 hci_dev_lock(hdev);
6243
6244 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6245 cp->addr.type);
6246 if (err < 0) {
6247 status = MGMT_STATUS_FAILED;
6248 goto done;
6249 }
6250
6251 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6252 sk);
6253 status = MGMT_STATUS_SUCCESS;
6254
6255 done:
6256 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6257 &cp->addr, sizeof(cp->addr));
6258
6259 hci_dev_unlock(hdev);
6260
6261 return err;
6262 }
6263
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6264 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6265 u16 len)
6266 {
6267 struct mgmt_cp_unblock_device *cp = data;
6268 u8 status;
6269 int err;
6270
6271 bt_dev_dbg(hdev, "sock %p", sk);
6272
6273 if (!bdaddr_type_is_valid(cp->addr.type))
6274 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6275 MGMT_STATUS_INVALID_PARAMS,
6276 &cp->addr, sizeof(cp->addr));
6277
6278 hci_dev_lock(hdev);
6279
6280 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6281 cp->addr.type);
6282 if (err < 0) {
6283 status = MGMT_STATUS_INVALID_PARAMS;
6284 goto done;
6285 }
6286
6287 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6288 sk);
6289 status = MGMT_STATUS_SUCCESS;
6290
6291 done:
6292 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6293 &cp->addr, sizeof(cp->addr));
6294
6295 hci_dev_unlock(hdev);
6296
6297 return err;
6298 }
6299
set_device_id_sync(struct hci_dev * hdev,void * data)6300 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6301 {
6302 return hci_update_eir_sync(hdev);
6303 }
6304
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6305 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6306 u16 len)
6307 {
6308 struct mgmt_cp_set_device_id *cp = data;
6309 int err;
6310 __u16 source;
6311
6312 bt_dev_dbg(hdev, "sock %p", sk);
6313
6314 source = __le16_to_cpu(cp->source);
6315
6316 if (source > 0x0002)
6317 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6318 MGMT_STATUS_INVALID_PARAMS);
6319
6320 hci_dev_lock(hdev);
6321
6322 hdev->devid_source = source;
6323 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6324 hdev->devid_product = __le16_to_cpu(cp->product);
6325 hdev->devid_version = __le16_to_cpu(cp->version);
6326
6327 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6328 NULL, 0);
6329
6330 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6331
6332 hci_dev_unlock(hdev);
6333
6334 return err;
6335 }
6336
enable_advertising_instance(struct hci_dev * hdev,int err)6337 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6338 {
6339 if (err)
6340 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6341 else
6342 bt_dev_dbg(hdev, "status %d", err);
6343 }
6344
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6345 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6346 {
6347 struct cmd_lookup match = { NULL, hdev };
6348 u8 instance;
6349 struct adv_info *adv_instance;
6350 u8 status = mgmt_status(err);
6351
6352 if (status) {
6353 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6354 cmd_status_rsp, &status);
6355 return;
6356 }
6357
6358 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6359 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6360 else
6361 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6362
6363 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6364 &match);
6365
6366 new_settings(hdev, match.sk);
6367
6368 if (match.sk)
6369 sock_put(match.sk);
6370
6371 /* If "Set Advertising" was just disabled and instance advertising was
6372 * set up earlier, then re-enable multi-instance advertising.
6373 */
6374 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6375 list_empty(&hdev->adv_instances))
6376 return;
6377
6378 instance = hdev->cur_adv_instance;
6379 if (!instance) {
6380 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6381 struct adv_info, list);
6382 if (!adv_instance)
6383 return;
6384
6385 instance = adv_instance->instance;
6386 }
6387
6388 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6389
6390 enable_advertising_instance(hdev, err);
6391 }
6392
set_adv_sync(struct hci_dev * hdev,void * data)6393 static int set_adv_sync(struct hci_dev *hdev, void *data)
6394 {
6395 struct mgmt_pending_cmd *cmd = data;
6396 struct mgmt_mode *cp = cmd->param;
6397 u8 val = !!cp->val;
6398
6399 if (cp->val == 0x02)
6400 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6401 else
6402 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6403
6404 cancel_adv_timeout(hdev);
6405
6406 if (val) {
6407 /* Switch to instance "0" for the Set Advertising setting.
6408 * We cannot use update_[adv|scan_rsp]_data() here as the
6409 * HCI_ADVERTISING flag is not yet set.
6410 */
6411 hdev->cur_adv_instance = 0x00;
6412
6413 if (ext_adv_capable(hdev)) {
6414 hci_start_ext_adv_sync(hdev, 0x00);
6415 } else {
6416 hci_update_adv_data_sync(hdev, 0x00);
6417 hci_update_scan_rsp_data_sync(hdev, 0x00);
6418 hci_enable_advertising_sync(hdev);
6419 }
6420 } else {
6421 hci_disable_advertising_sync(hdev);
6422 }
6423
6424 return 0;
6425 }
6426
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6427 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6428 u16 len)
6429 {
6430 struct mgmt_mode *cp = data;
6431 struct mgmt_pending_cmd *cmd;
6432 u8 val, status;
6433 int err;
6434
6435 bt_dev_dbg(hdev, "sock %p", sk);
6436
6437 status = mgmt_le_support(hdev);
6438 if (status)
6439 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6440 status);
6441
6442 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6444 MGMT_STATUS_INVALID_PARAMS);
6445
6446 if (hdev->advertising_paused)
6447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6448 MGMT_STATUS_BUSY);
6449
6450 hci_dev_lock(hdev);
6451
6452 val = !!cp->val;
6453
6454 /* The following conditions are ones which mean that we should
6455 * not do any HCI communication but directly send a mgmt
6456 * response to user space (after toggling the flag if
6457 * necessary).
6458 */
6459 if (!hdev_is_powered(hdev) ||
6460 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6461 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6462 hci_dev_test_flag(hdev, HCI_MESH) ||
6463 hci_conn_num(hdev, LE_LINK) > 0 ||
6464 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6465 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6466 bool changed;
6467
6468 if (cp->val) {
6469 hdev->cur_adv_instance = 0x00;
6470 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6471 if (cp->val == 0x02)
6472 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6473 else
6474 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6475 } else {
6476 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6477 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6478 }
6479
6480 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6481 if (err < 0)
6482 goto unlock;
6483
6484 if (changed)
6485 err = new_settings(hdev, sk);
6486
6487 goto unlock;
6488 }
6489
6490 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6491 pending_find(MGMT_OP_SET_LE, hdev)) {
6492 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6493 MGMT_STATUS_BUSY);
6494 goto unlock;
6495 }
6496
6497 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6498 if (!cmd)
6499 err = -ENOMEM;
6500 else
6501 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6502 set_advertising_complete);
6503
6504 if (err < 0 && cmd)
6505 mgmt_pending_remove(cmd);
6506
6507 unlock:
6508 hci_dev_unlock(hdev);
6509 return err;
6510 }
6511
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6512 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6513 void *data, u16 len)
6514 {
6515 struct mgmt_cp_set_static_address *cp = data;
6516 int err;
6517
6518 bt_dev_dbg(hdev, "sock %p", sk);
6519
6520 if (!lmp_le_capable(hdev))
6521 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6522 MGMT_STATUS_NOT_SUPPORTED);
6523
6524 if (hdev_is_powered(hdev))
6525 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6526 MGMT_STATUS_REJECTED);
6527
6528 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6529 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6530 return mgmt_cmd_status(sk, hdev->id,
6531 MGMT_OP_SET_STATIC_ADDRESS,
6532 MGMT_STATUS_INVALID_PARAMS);
6533
6534 /* Two most significant bits shall be set */
6535 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6536 return mgmt_cmd_status(sk, hdev->id,
6537 MGMT_OP_SET_STATIC_ADDRESS,
6538 MGMT_STATUS_INVALID_PARAMS);
6539 }
6540
6541 hci_dev_lock(hdev);
6542
6543 bacpy(&hdev->static_addr, &cp->bdaddr);
6544
6545 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6546 if (err < 0)
6547 goto unlock;
6548
6549 err = new_settings(hdev, sk);
6550
6551 unlock:
6552 hci_dev_unlock(hdev);
6553 return err;
6554 }
6555
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6556 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6557 void *data, u16 len)
6558 {
6559 struct mgmt_cp_set_scan_params *cp = data;
6560 __u16 interval, window;
6561 int err;
6562
6563 bt_dev_dbg(hdev, "sock %p", sk);
6564
6565 if (!lmp_le_capable(hdev))
6566 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6567 MGMT_STATUS_NOT_SUPPORTED);
6568
6569 interval = __le16_to_cpu(cp->interval);
6570
6571 if (interval < 0x0004 || interval > 0x4000)
6572 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6573 MGMT_STATUS_INVALID_PARAMS);
6574
6575 window = __le16_to_cpu(cp->window);
6576
6577 if (window < 0x0004 || window > 0x4000)
6578 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6579 MGMT_STATUS_INVALID_PARAMS);
6580
6581 if (window > interval)
6582 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6583 MGMT_STATUS_INVALID_PARAMS);
6584
6585 hci_dev_lock(hdev);
6586
6587 hdev->le_scan_interval = interval;
6588 hdev->le_scan_window = window;
6589
6590 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6591 NULL, 0);
6592
6593 /* If background scan is running, restart it so new parameters are
6594 * loaded.
6595 */
6596 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6597 hdev->discovery.state == DISCOVERY_STOPPED)
6598 hci_update_passive_scan(hdev);
6599
6600 hci_dev_unlock(hdev);
6601
6602 return err;
6603 }
6604
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6605 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6606 {
6607 struct mgmt_pending_cmd *cmd = data;
6608
6609 bt_dev_dbg(hdev, "err %d", err);
6610
6611 if (err) {
6612 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6613 mgmt_status(err));
6614 } else {
6615 struct mgmt_mode *cp = cmd->param;
6616
6617 if (cp->val)
6618 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6619 else
6620 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6621
6622 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6623 new_settings(hdev, cmd->sk);
6624 }
6625
6626 mgmt_pending_free(cmd);
6627 }
6628
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6629 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6630 {
6631 struct mgmt_pending_cmd *cmd = data;
6632 struct mgmt_mode *cp = cmd->param;
6633
6634 return hci_write_fast_connectable_sync(hdev, cp->val);
6635 }
6636
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6637 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6638 void *data, u16 len)
6639 {
6640 struct mgmt_mode *cp = data;
6641 struct mgmt_pending_cmd *cmd;
6642 int err;
6643
6644 bt_dev_dbg(hdev, "sock %p", sk);
6645
6646 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6647 hdev->hci_ver < BLUETOOTH_VER_1_2)
6648 return mgmt_cmd_status(sk, hdev->id,
6649 MGMT_OP_SET_FAST_CONNECTABLE,
6650 MGMT_STATUS_NOT_SUPPORTED);
6651
6652 if (cp->val != 0x00 && cp->val != 0x01)
6653 return mgmt_cmd_status(sk, hdev->id,
6654 MGMT_OP_SET_FAST_CONNECTABLE,
6655 MGMT_STATUS_INVALID_PARAMS);
6656
6657 hci_dev_lock(hdev);
6658
6659 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6660 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6661 goto unlock;
6662 }
6663
6664 if (!hdev_is_powered(hdev)) {
6665 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6666 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6667 new_settings(hdev, sk);
6668 goto unlock;
6669 }
6670
6671 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6672 len);
6673 if (!cmd)
6674 err = -ENOMEM;
6675 else
6676 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6677 fast_connectable_complete);
6678
6679 if (err < 0) {
6680 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6681 MGMT_STATUS_FAILED);
6682
6683 if (cmd)
6684 mgmt_pending_free(cmd);
6685 }
6686
6687 unlock:
6688 hci_dev_unlock(hdev);
6689
6690 return err;
6691 }
6692
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6693 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6694 {
6695 struct mgmt_pending_cmd *cmd = data;
6696
6697 bt_dev_dbg(hdev, "err %d", err);
6698
6699 if (err) {
6700 u8 mgmt_err = mgmt_status(err);
6701
6702 /* We need to restore the flag if related HCI commands
6703 * failed.
6704 */
6705 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6706
6707 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6708 } else {
6709 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6710 new_settings(hdev, cmd->sk);
6711 }
6712
6713 mgmt_pending_free(cmd);
6714 }
6715
set_bredr_sync(struct hci_dev * hdev,void * data)6716 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6717 {
6718 int status;
6719
6720 status = hci_write_fast_connectable_sync(hdev, false);
6721
6722 if (!status)
6723 status = hci_update_scan_sync(hdev);
6724
6725 /* Since only the advertising data flags will change, there
6726 * is no need to update the scan response data.
6727 */
6728 if (!status)
6729 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6730
6731 return status;
6732 }
6733
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6734 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6735 {
6736 struct mgmt_mode *cp = data;
6737 struct mgmt_pending_cmd *cmd;
6738 int err;
6739
6740 bt_dev_dbg(hdev, "sock %p", sk);
6741
6742 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6743 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6744 MGMT_STATUS_NOT_SUPPORTED);
6745
6746 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6748 MGMT_STATUS_REJECTED);
6749
6750 if (cp->val != 0x00 && cp->val != 0x01)
6751 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6752 MGMT_STATUS_INVALID_PARAMS);
6753
6754 hci_dev_lock(hdev);
6755
6756 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6757 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6758 goto unlock;
6759 }
6760
6761 if (!hdev_is_powered(hdev)) {
6762 if (!cp->val) {
6763 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6764 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6765 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6766 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6767 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6768 }
6769
6770 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6771
6772 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6773 if (err < 0)
6774 goto unlock;
6775
6776 err = new_settings(hdev, sk);
6777 goto unlock;
6778 }
6779
6780 /* Reject disabling when powered on */
6781 if (!cp->val) {
6782 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6783 MGMT_STATUS_REJECTED);
6784 goto unlock;
6785 } else {
6786 /* When configuring a dual-mode controller to operate
6787 * with LE only and using a static address, then switching
6788 * BR/EDR back on is not allowed.
6789 *
6790 * Dual-mode controllers shall operate with the public
6791 * address as its identity address for BR/EDR and LE. So
6792 * reject the attempt to create an invalid configuration.
6793 *
6794 * The same restrictions applies when secure connections
6795 * has been enabled. For BR/EDR this is a controller feature
6796 * while for LE it is a host stack feature. This means that
6797 * switching BR/EDR back on when secure connections has been
6798 * enabled is not a supported transaction.
6799 */
6800 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6801 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6802 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6803 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6804 MGMT_STATUS_REJECTED);
6805 goto unlock;
6806 }
6807 }
6808
6809 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6810 if (!cmd)
6811 err = -ENOMEM;
6812 else
6813 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6814 set_bredr_complete);
6815
6816 if (err < 0) {
6817 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6818 MGMT_STATUS_FAILED);
6819 if (cmd)
6820 mgmt_pending_free(cmd);
6821
6822 goto unlock;
6823 }
6824
6825 /* We need to flip the bit already here so that
6826 * hci_req_update_adv_data generates the correct flags.
6827 */
6828 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6829
6830 unlock:
6831 hci_dev_unlock(hdev);
6832 return err;
6833 }
6834
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6835 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6836 {
6837 struct mgmt_pending_cmd *cmd = data;
6838 struct mgmt_mode *cp;
6839
6840 bt_dev_dbg(hdev, "err %d", err);
6841
6842 if (err) {
6843 u8 mgmt_err = mgmt_status(err);
6844
6845 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6846 goto done;
6847 }
6848
6849 cp = cmd->param;
6850
6851 switch (cp->val) {
6852 case 0x00:
6853 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6854 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6855 break;
6856 case 0x01:
6857 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6858 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6859 break;
6860 case 0x02:
6861 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6862 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6863 break;
6864 }
6865
6866 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6867 new_settings(hdev, cmd->sk);
6868
6869 done:
6870 mgmt_pending_free(cmd);
6871 }
6872
set_secure_conn_sync(struct hci_dev * hdev,void * data)6873 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6874 {
6875 struct mgmt_pending_cmd *cmd = data;
6876 struct mgmt_mode *cp = cmd->param;
6877 u8 val = !!cp->val;
6878
6879 /* Force write of val */
6880 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6881
6882 return hci_write_sc_support_sync(hdev, val);
6883 }
6884
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6885 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6886 void *data, u16 len)
6887 {
6888 struct mgmt_mode *cp = data;
6889 struct mgmt_pending_cmd *cmd;
6890 u8 val;
6891 int err;
6892
6893 bt_dev_dbg(hdev, "sock %p", sk);
6894
6895 if (!lmp_sc_capable(hdev) &&
6896 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6897 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6898 MGMT_STATUS_NOT_SUPPORTED);
6899
6900 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6901 lmp_sc_capable(hdev) &&
6902 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6903 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6904 MGMT_STATUS_REJECTED);
6905
6906 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6907 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6908 MGMT_STATUS_INVALID_PARAMS);
6909
6910 hci_dev_lock(hdev);
6911
6912 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6913 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6914 bool changed;
6915
6916 if (cp->val) {
6917 changed = !hci_dev_test_and_set_flag(hdev,
6918 HCI_SC_ENABLED);
6919 if (cp->val == 0x02)
6920 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6921 else
6922 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6923 } else {
6924 changed = hci_dev_test_and_clear_flag(hdev,
6925 HCI_SC_ENABLED);
6926 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6927 }
6928
6929 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6930 if (err < 0)
6931 goto failed;
6932
6933 if (changed)
6934 err = new_settings(hdev, sk);
6935
6936 goto failed;
6937 }
6938
6939 val = !!cp->val;
6940
6941 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6942 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6943 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6944 goto failed;
6945 }
6946
6947 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6948 if (!cmd)
6949 err = -ENOMEM;
6950 else
6951 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6952 set_secure_conn_complete);
6953
6954 if (err < 0) {
6955 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6956 MGMT_STATUS_FAILED);
6957 if (cmd)
6958 mgmt_pending_free(cmd);
6959 }
6960
6961 failed:
6962 hci_dev_unlock(hdev);
6963 return err;
6964 }
6965
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6966 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6967 void *data, u16 len)
6968 {
6969 struct mgmt_mode *cp = data;
6970 bool changed, use_changed;
6971 int err;
6972
6973 bt_dev_dbg(hdev, "sock %p", sk);
6974
6975 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6976 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6977 MGMT_STATUS_INVALID_PARAMS);
6978
6979 hci_dev_lock(hdev);
6980
6981 if (cp->val)
6982 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6983 else
6984 changed = hci_dev_test_and_clear_flag(hdev,
6985 HCI_KEEP_DEBUG_KEYS);
6986
6987 if (cp->val == 0x02)
6988 use_changed = !hci_dev_test_and_set_flag(hdev,
6989 HCI_USE_DEBUG_KEYS);
6990 else
6991 use_changed = hci_dev_test_and_clear_flag(hdev,
6992 HCI_USE_DEBUG_KEYS);
6993
6994 if (hdev_is_powered(hdev) && use_changed &&
6995 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6996 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6997 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6998 sizeof(mode), &mode);
6999 }
7000
7001 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7002 if (err < 0)
7003 goto unlock;
7004
7005 if (changed)
7006 err = new_settings(hdev, sk);
7007
7008 unlock:
7009 hci_dev_unlock(hdev);
7010 return err;
7011 }
7012
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7013 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7014 u16 len)
7015 {
7016 struct mgmt_cp_set_privacy *cp = cp_data;
7017 bool changed;
7018 int err;
7019
7020 bt_dev_dbg(hdev, "sock %p", sk);
7021
7022 if (!lmp_le_capable(hdev))
7023 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7024 MGMT_STATUS_NOT_SUPPORTED);
7025
7026 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7027 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7028 MGMT_STATUS_INVALID_PARAMS);
7029
7030 if (hdev_is_powered(hdev))
7031 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7032 MGMT_STATUS_REJECTED);
7033
7034 hci_dev_lock(hdev);
7035
7036 /* If user space supports this command it is also expected to
7037 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7038 */
7039 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7040
7041 if (cp->privacy) {
7042 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7043 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7044 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7045 hci_adv_instances_set_rpa_expired(hdev, true);
7046 if (cp->privacy == 0x02)
7047 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7048 else
7049 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7050 } else {
7051 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7052 memset(hdev->irk, 0, sizeof(hdev->irk));
7053 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7054 hci_adv_instances_set_rpa_expired(hdev, false);
7055 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7056 }
7057
7058 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7059 if (err < 0)
7060 goto unlock;
7061
7062 if (changed)
7063 err = new_settings(hdev, sk);
7064
7065 unlock:
7066 hci_dev_unlock(hdev);
7067 return err;
7068 }
7069
irk_is_valid(struct mgmt_irk_info * irk)7070 static bool irk_is_valid(struct mgmt_irk_info *irk)
7071 {
7072 switch (irk->addr.type) {
7073 case BDADDR_LE_PUBLIC:
7074 return true;
7075
7076 case BDADDR_LE_RANDOM:
7077 /* Two most significant bits shall be set */
7078 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7079 return false;
7080 return true;
7081 }
7082
7083 return false;
7084 }
7085
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7086 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7087 u16 len)
7088 {
7089 struct mgmt_cp_load_irks *cp = cp_data;
7090 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7091 sizeof(struct mgmt_irk_info));
7092 u16 irk_count, expected_len;
7093 int i, err;
7094
7095 bt_dev_dbg(hdev, "sock %p", sk);
7096
7097 if (!lmp_le_capable(hdev))
7098 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7099 MGMT_STATUS_NOT_SUPPORTED);
7100
7101 irk_count = __le16_to_cpu(cp->irk_count);
7102 if (irk_count > max_irk_count) {
7103 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7104 irk_count);
7105 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7106 MGMT_STATUS_INVALID_PARAMS);
7107 }
7108
7109 expected_len = struct_size(cp, irks, irk_count);
7110 if (expected_len != len) {
7111 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7112 expected_len, len);
7113 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7114 MGMT_STATUS_INVALID_PARAMS);
7115 }
7116
7117 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7118
7119 for (i = 0; i < irk_count; i++) {
7120 struct mgmt_irk_info *key = &cp->irks[i];
7121
7122 if (!irk_is_valid(key))
7123 return mgmt_cmd_status(sk, hdev->id,
7124 MGMT_OP_LOAD_IRKS,
7125 MGMT_STATUS_INVALID_PARAMS);
7126 }
7127
7128 hci_dev_lock(hdev);
7129
7130 hci_smp_irks_clear(hdev);
7131
7132 for (i = 0; i < irk_count; i++) {
7133 struct mgmt_irk_info *irk = &cp->irks[i];
7134 u8 addr_type = le_addr_type(irk->addr.type);
7135
7136 if (hci_is_blocked_key(hdev,
7137 HCI_BLOCKED_KEY_TYPE_IRK,
7138 irk->val)) {
7139 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7140 &irk->addr.bdaddr);
7141 continue;
7142 }
7143
7144 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7145 if (irk->addr.type == BDADDR_BREDR)
7146 addr_type = BDADDR_BREDR;
7147
7148 hci_add_irk(hdev, &irk->addr.bdaddr,
7149 addr_type, irk->val,
7150 BDADDR_ANY);
7151 }
7152
7153 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7154
7155 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7156
7157 hci_dev_unlock(hdev);
7158
7159 return err;
7160 }
7161
ltk_is_valid(struct mgmt_ltk_info * key)7162 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7163 {
7164 if (key->initiator != 0x00 && key->initiator != 0x01)
7165 return false;
7166
7167 switch (key->addr.type) {
7168 case BDADDR_LE_PUBLIC:
7169 return true;
7170
7171 case BDADDR_LE_RANDOM:
7172 /* Two most significant bits shall be set */
7173 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7174 return false;
7175 return true;
7176 }
7177
7178 return false;
7179 }
7180
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7181 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7182 void *cp_data, u16 len)
7183 {
7184 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7185 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7186 sizeof(struct mgmt_ltk_info));
7187 u16 key_count, expected_len;
7188 int i, err;
7189
7190 bt_dev_dbg(hdev, "sock %p", sk);
7191
7192 if (!lmp_le_capable(hdev))
7193 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7194 MGMT_STATUS_NOT_SUPPORTED);
7195
7196 key_count = __le16_to_cpu(cp->key_count);
7197 if (key_count > max_key_count) {
7198 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7199 key_count);
7200 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7201 MGMT_STATUS_INVALID_PARAMS);
7202 }
7203
7204 expected_len = struct_size(cp, keys, key_count);
7205 if (expected_len != len) {
7206 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7207 expected_len, len);
7208 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7209 MGMT_STATUS_INVALID_PARAMS);
7210 }
7211
7212 bt_dev_dbg(hdev, "key_count %u", key_count);
7213
7214 for (i = 0; i < key_count; i++) {
7215 struct mgmt_ltk_info *key = &cp->keys[i];
7216
7217 if (!ltk_is_valid(key))
7218 return mgmt_cmd_status(sk, hdev->id,
7219 MGMT_OP_LOAD_LONG_TERM_KEYS,
7220 MGMT_STATUS_INVALID_PARAMS);
7221 }
7222
7223 hci_dev_lock(hdev);
7224
7225 hci_smp_ltks_clear(hdev);
7226
7227 for (i = 0; i < key_count; i++) {
7228 struct mgmt_ltk_info *key = &cp->keys[i];
7229 u8 type, authenticated;
7230 u8 addr_type = le_addr_type(key->addr.type);
7231
7232 if (hci_is_blocked_key(hdev,
7233 HCI_BLOCKED_KEY_TYPE_LTK,
7234 key->val)) {
7235 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7236 &key->addr.bdaddr);
7237 continue;
7238 }
7239
7240 switch (key->type) {
7241 case MGMT_LTK_UNAUTHENTICATED:
7242 authenticated = 0x00;
7243 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7244 break;
7245 case MGMT_LTK_AUTHENTICATED:
7246 authenticated = 0x01;
7247 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7248 break;
7249 case MGMT_LTK_P256_UNAUTH:
7250 authenticated = 0x00;
7251 type = SMP_LTK_P256;
7252 break;
7253 case MGMT_LTK_P256_AUTH:
7254 authenticated = 0x01;
7255 type = SMP_LTK_P256;
7256 break;
7257 case MGMT_LTK_P256_DEBUG:
7258 authenticated = 0x00;
7259 type = SMP_LTK_P256_DEBUG;
7260 fallthrough;
7261 default:
7262 continue;
7263 }
7264
7265 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7266 if (key->addr.type == BDADDR_BREDR)
7267 addr_type = BDADDR_BREDR;
7268
7269 hci_add_ltk(hdev, &key->addr.bdaddr,
7270 addr_type, type, authenticated,
7271 key->val, key->enc_size, key->ediv, key->rand);
7272 }
7273
7274 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7275 NULL, 0);
7276
7277 hci_dev_unlock(hdev);
7278
7279 return err;
7280 }
7281
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7282 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7283 {
7284 struct mgmt_pending_cmd *cmd = data;
7285 struct hci_conn *conn = cmd->user_data;
7286 struct mgmt_cp_get_conn_info *cp = cmd->param;
7287 struct mgmt_rp_get_conn_info rp;
7288 u8 status;
7289
7290 bt_dev_dbg(hdev, "err %d", err);
7291
7292 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7293
7294 status = mgmt_status(err);
7295 if (status == MGMT_STATUS_SUCCESS) {
7296 rp.rssi = conn->rssi;
7297 rp.tx_power = conn->tx_power;
7298 rp.max_tx_power = conn->max_tx_power;
7299 } else {
7300 rp.rssi = HCI_RSSI_INVALID;
7301 rp.tx_power = HCI_TX_POWER_INVALID;
7302 rp.max_tx_power = HCI_TX_POWER_INVALID;
7303 }
7304
7305 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7306 &rp, sizeof(rp));
7307
7308 mgmt_pending_free(cmd);
7309 }
7310
get_conn_info_sync(struct hci_dev * hdev,void * data)7311 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7312 {
7313 struct mgmt_pending_cmd *cmd = data;
7314 struct mgmt_cp_get_conn_info *cp = cmd->param;
7315 struct hci_conn *conn;
7316 int err;
7317 __le16 handle;
7318
7319 /* Make sure we are still connected */
7320 if (cp->addr.type == BDADDR_BREDR)
7321 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7322 &cp->addr.bdaddr);
7323 else
7324 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7325
7326 if (!conn || conn->state != BT_CONNECTED)
7327 return MGMT_STATUS_NOT_CONNECTED;
7328
7329 cmd->user_data = conn;
7330 handle = cpu_to_le16(conn->handle);
7331
7332 /* Refresh RSSI each time */
7333 err = hci_read_rssi_sync(hdev, handle);
7334
7335 /* For LE links TX power does not change thus we don't need to
7336 * query for it once value is known.
7337 */
7338 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7339 conn->tx_power == HCI_TX_POWER_INVALID))
7340 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7341
7342 /* Max TX power needs to be read only once per connection */
7343 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7344 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7345
7346 return err;
7347 }
7348
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7349 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7350 u16 len)
7351 {
7352 struct mgmt_cp_get_conn_info *cp = data;
7353 struct mgmt_rp_get_conn_info rp;
7354 struct hci_conn *conn;
7355 unsigned long conn_info_age;
7356 int err = 0;
7357
7358 bt_dev_dbg(hdev, "sock %p", sk);
7359
7360 memset(&rp, 0, sizeof(rp));
7361 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7362 rp.addr.type = cp->addr.type;
7363
7364 if (!bdaddr_type_is_valid(cp->addr.type))
7365 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7366 MGMT_STATUS_INVALID_PARAMS,
7367 &rp, sizeof(rp));
7368
7369 hci_dev_lock(hdev);
7370
7371 if (!hdev_is_powered(hdev)) {
7372 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7373 MGMT_STATUS_NOT_POWERED, &rp,
7374 sizeof(rp));
7375 goto unlock;
7376 }
7377
7378 if (cp->addr.type == BDADDR_BREDR)
7379 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7380 &cp->addr.bdaddr);
7381 else
7382 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7383
7384 if (!conn || conn->state != BT_CONNECTED) {
7385 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7386 MGMT_STATUS_NOT_CONNECTED, &rp,
7387 sizeof(rp));
7388 goto unlock;
7389 }
7390
7391 /* To avoid client trying to guess when to poll again for information we
7392 * calculate conn info age as random value between min/max set in hdev.
7393 */
7394 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7395 hdev->conn_info_max_age - 1);
7396
7397 /* Query controller to refresh cached values if they are too old or were
7398 * never read.
7399 */
7400 if (time_after(jiffies, conn->conn_info_timestamp +
7401 msecs_to_jiffies(conn_info_age)) ||
7402 !conn->conn_info_timestamp) {
7403 struct mgmt_pending_cmd *cmd;
7404
7405 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7406 len);
7407 if (!cmd) {
7408 err = -ENOMEM;
7409 } else {
7410 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7411 cmd, get_conn_info_complete);
7412 }
7413
7414 if (err < 0) {
7415 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7416 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7417
7418 if (cmd)
7419 mgmt_pending_free(cmd);
7420
7421 goto unlock;
7422 }
7423
7424 conn->conn_info_timestamp = jiffies;
7425 } else {
7426 /* Cache is valid, just reply with values cached in hci_conn */
7427 rp.rssi = conn->rssi;
7428 rp.tx_power = conn->tx_power;
7429 rp.max_tx_power = conn->max_tx_power;
7430
7431 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7432 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7433 }
7434
7435 unlock:
7436 hci_dev_unlock(hdev);
7437 return err;
7438 }
7439
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7440 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7441 {
7442 struct mgmt_pending_cmd *cmd = data;
7443 struct mgmt_cp_get_clock_info *cp = cmd->param;
7444 struct mgmt_rp_get_clock_info rp;
7445 struct hci_conn *conn = cmd->user_data;
7446 u8 status = mgmt_status(err);
7447
7448 bt_dev_dbg(hdev, "err %d", err);
7449
7450 memset(&rp, 0, sizeof(rp));
7451 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7452 rp.addr.type = cp->addr.type;
7453
7454 if (err)
7455 goto complete;
7456
7457 rp.local_clock = cpu_to_le32(hdev->clock);
7458
7459 if (conn) {
7460 rp.piconet_clock = cpu_to_le32(conn->clock);
7461 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7462 }
7463
7464 complete:
7465 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7466 sizeof(rp));
7467
7468 mgmt_pending_free(cmd);
7469 }
7470
get_clock_info_sync(struct hci_dev * hdev,void * data)7471 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7472 {
7473 struct mgmt_pending_cmd *cmd = data;
7474 struct mgmt_cp_get_clock_info *cp = cmd->param;
7475 struct hci_cp_read_clock hci_cp;
7476 struct hci_conn *conn;
7477
7478 memset(&hci_cp, 0, sizeof(hci_cp));
7479 hci_read_clock_sync(hdev, &hci_cp);
7480
7481 /* Make sure connection still exists */
7482 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7483 if (!conn || conn->state != BT_CONNECTED)
7484 return MGMT_STATUS_NOT_CONNECTED;
7485
7486 cmd->user_data = conn;
7487 hci_cp.handle = cpu_to_le16(conn->handle);
7488 hci_cp.which = 0x01; /* Piconet clock */
7489
7490 return hci_read_clock_sync(hdev, &hci_cp);
7491 }
7492
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7493 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7494 u16 len)
7495 {
7496 struct mgmt_cp_get_clock_info *cp = data;
7497 struct mgmt_rp_get_clock_info rp;
7498 struct mgmt_pending_cmd *cmd;
7499 struct hci_conn *conn;
7500 int err;
7501
7502 bt_dev_dbg(hdev, "sock %p", sk);
7503
7504 memset(&rp, 0, sizeof(rp));
7505 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7506 rp.addr.type = cp->addr.type;
7507
7508 if (cp->addr.type != BDADDR_BREDR)
7509 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7510 MGMT_STATUS_INVALID_PARAMS,
7511 &rp, sizeof(rp));
7512
7513 hci_dev_lock(hdev);
7514
7515 if (!hdev_is_powered(hdev)) {
7516 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7517 MGMT_STATUS_NOT_POWERED, &rp,
7518 sizeof(rp));
7519 goto unlock;
7520 }
7521
7522 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7523 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7524 &cp->addr.bdaddr);
7525 if (!conn || conn->state != BT_CONNECTED) {
7526 err = mgmt_cmd_complete(sk, hdev->id,
7527 MGMT_OP_GET_CLOCK_INFO,
7528 MGMT_STATUS_NOT_CONNECTED,
7529 &rp, sizeof(rp));
7530 goto unlock;
7531 }
7532 } else {
7533 conn = NULL;
7534 }
7535
7536 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7537 if (!cmd)
7538 err = -ENOMEM;
7539 else
7540 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7541 get_clock_info_complete);
7542
7543 if (err < 0) {
7544 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7545 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7546
7547 if (cmd)
7548 mgmt_pending_free(cmd);
7549 }
7550
7551
7552 unlock:
7553 hci_dev_unlock(hdev);
7554 return err;
7555 }
7556
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7557 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7558 {
7559 struct hci_conn *conn;
7560
7561 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7562 if (!conn)
7563 return false;
7564
7565 if (conn->dst_type != type)
7566 return false;
7567
7568 if (conn->state != BT_CONNECTED)
7569 return false;
7570
7571 return true;
7572 }
7573
7574 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7575 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7576 u8 addr_type, u8 auto_connect)
7577 {
7578 struct hci_conn_params *params;
7579
7580 params = hci_conn_params_add(hdev, addr, addr_type);
7581 if (!params)
7582 return -EIO;
7583
7584 if (params->auto_connect == auto_connect)
7585 return 0;
7586
7587 hci_pend_le_list_del_init(params);
7588
7589 switch (auto_connect) {
7590 case HCI_AUTO_CONN_DISABLED:
7591 case HCI_AUTO_CONN_LINK_LOSS:
7592 /* If auto connect is being disabled when we're trying to
7593 * connect to device, keep connecting.
7594 */
7595 if (params->explicit_connect)
7596 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7597 break;
7598 case HCI_AUTO_CONN_REPORT:
7599 if (params->explicit_connect)
7600 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7601 else
7602 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7603 break;
7604 case HCI_AUTO_CONN_DIRECT:
7605 case HCI_AUTO_CONN_ALWAYS:
7606 if (!is_connected(hdev, addr, addr_type))
7607 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7608 break;
7609 }
7610
7611 params->auto_connect = auto_connect;
7612
7613 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7614 addr, addr_type, auto_connect);
7615
7616 return 0;
7617 }
7618
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7619 static void device_added(struct sock *sk, struct hci_dev *hdev,
7620 bdaddr_t *bdaddr, u8 type, u8 action)
7621 {
7622 struct mgmt_ev_device_added ev;
7623
7624 bacpy(&ev.addr.bdaddr, bdaddr);
7625 ev.addr.type = type;
7626 ev.action = action;
7627
7628 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7629 }
7630
add_device_sync(struct hci_dev * hdev,void * data)7631 static int add_device_sync(struct hci_dev *hdev, void *data)
7632 {
7633 return hci_update_passive_scan_sync(hdev);
7634 }
7635
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7636 static int add_device(struct sock *sk, struct hci_dev *hdev,
7637 void *data, u16 len)
7638 {
7639 struct mgmt_cp_add_device *cp = data;
7640 u8 auto_conn, addr_type;
7641 struct hci_conn_params *params;
7642 int err;
7643 u32 current_flags = 0;
7644 u32 supported_flags;
7645
7646 bt_dev_dbg(hdev, "sock %p", sk);
7647
7648 if (!bdaddr_type_is_valid(cp->addr.type) ||
7649 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7650 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7651 MGMT_STATUS_INVALID_PARAMS,
7652 &cp->addr, sizeof(cp->addr));
7653
7654 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7655 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7656 MGMT_STATUS_INVALID_PARAMS,
7657 &cp->addr, sizeof(cp->addr));
7658
7659 hci_dev_lock(hdev);
7660
7661 if (cp->addr.type == BDADDR_BREDR) {
7662 /* Only incoming connections action is supported for now */
7663 if (cp->action != 0x01) {
7664 err = mgmt_cmd_complete(sk, hdev->id,
7665 MGMT_OP_ADD_DEVICE,
7666 MGMT_STATUS_INVALID_PARAMS,
7667 &cp->addr, sizeof(cp->addr));
7668 goto unlock;
7669 }
7670
7671 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7672 &cp->addr.bdaddr,
7673 cp->addr.type, 0);
7674 if (err)
7675 goto unlock;
7676
7677 hci_update_scan(hdev);
7678
7679 goto added;
7680 }
7681
7682 addr_type = le_addr_type(cp->addr.type);
7683
7684 if (cp->action == 0x02)
7685 auto_conn = HCI_AUTO_CONN_ALWAYS;
7686 else if (cp->action == 0x01)
7687 auto_conn = HCI_AUTO_CONN_DIRECT;
7688 else
7689 auto_conn = HCI_AUTO_CONN_REPORT;
7690
7691 /* Kernel internally uses conn_params with resolvable private
7692 * address, but Add Device allows only identity addresses.
7693 * Make sure it is enforced before calling
7694 * hci_conn_params_lookup.
7695 */
7696 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7697 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7698 MGMT_STATUS_INVALID_PARAMS,
7699 &cp->addr, sizeof(cp->addr));
7700 goto unlock;
7701 }
7702
7703 /* If the connection parameters don't exist for this device,
7704 * they will be created and configured with defaults.
7705 */
7706 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7707 auto_conn) < 0) {
7708 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7709 MGMT_STATUS_FAILED, &cp->addr,
7710 sizeof(cp->addr));
7711 goto unlock;
7712 } else {
7713 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7714 addr_type);
7715 if (params)
7716 current_flags = params->flags;
7717 }
7718
7719 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7720 if (err < 0)
7721 goto unlock;
7722
7723 added:
7724 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7725 supported_flags = hdev->conn_flags;
7726 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7727 supported_flags, current_flags);
7728
7729 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7730 MGMT_STATUS_SUCCESS, &cp->addr,
7731 sizeof(cp->addr));
7732
7733 unlock:
7734 hci_dev_unlock(hdev);
7735 return err;
7736 }
7737
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7738 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7739 bdaddr_t *bdaddr, u8 type)
7740 {
7741 struct mgmt_ev_device_removed ev;
7742
7743 bacpy(&ev.addr.bdaddr, bdaddr);
7744 ev.addr.type = type;
7745
7746 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7747 }
7748
remove_device_sync(struct hci_dev * hdev,void * data)7749 static int remove_device_sync(struct hci_dev *hdev, void *data)
7750 {
7751 return hci_update_passive_scan_sync(hdev);
7752 }
7753
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7754 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7755 void *data, u16 len)
7756 {
7757 struct mgmt_cp_remove_device *cp = data;
7758 int err;
7759
7760 bt_dev_dbg(hdev, "sock %p", sk);
7761
7762 hci_dev_lock(hdev);
7763
7764 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7765 struct hci_conn_params *params;
7766 u8 addr_type;
7767
7768 if (!bdaddr_type_is_valid(cp->addr.type)) {
7769 err = mgmt_cmd_complete(sk, hdev->id,
7770 MGMT_OP_REMOVE_DEVICE,
7771 MGMT_STATUS_INVALID_PARAMS,
7772 &cp->addr, sizeof(cp->addr));
7773 goto unlock;
7774 }
7775
7776 if (cp->addr.type == BDADDR_BREDR) {
7777 err = hci_bdaddr_list_del(&hdev->accept_list,
7778 &cp->addr.bdaddr,
7779 cp->addr.type);
7780 if (err) {
7781 err = mgmt_cmd_complete(sk, hdev->id,
7782 MGMT_OP_REMOVE_DEVICE,
7783 MGMT_STATUS_INVALID_PARAMS,
7784 &cp->addr,
7785 sizeof(cp->addr));
7786 goto unlock;
7787 }
7788
7789 hci_update_scan(hdev);
7790
7791 device_removed(sk, hdev, &cp->addr.bdaddr,
7792 cp->addr.type);
7793 goto complete;
7794 }
7795
7796 addr_type = le_addr_type(cp->addr.type);
7797
7798 /* Kernel internally uses conn_params with resolvable private
7799 * address, but Remove Device allows only identity addresses.
7800 * Make sure it is enforced before calling
7801 * hci_conn_params_lookup.
7802 */
7803 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7804 err = mgmt_cmd_complete(sk, hdev->id,
7805 MGMT_OP_REMOVE_DEVICE,
7806 MGMT_STATUS_INVALID_PARAMS,
7807 &cp->addr, sizeof(cp->addr));
7808 goto unlock;
7809 }
7810
7811 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7812 addr_type);
7813 if (!params) {
7814 err = mgmt_cmd_complete(sk, hdev->id,
7815 MGMT_OP_REMOVE_DEVICE,
7816 MGMT_STATUS_INVALID_PARAMS,
7817 &cp->addr, sizeof(cp->addr));
7818 goto unlock;
7819 }
7820
7821 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7822 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7823 err = mgmt_cmd_complete(sk, hdev->id,
7824 MGMT_OP_REMOVE_DEVICE,
7825 MGMT_STATUS_INVALID_PARAMS,
7826 &cp->addr, sizeof(cp->addr));
7827 goto unlock;
7828 }
7829
7830 hci_conn_params_free(params);
7831
7832 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7833 } else {
7834 struct hci_conn_params *p, *tmp;
7835 struct bdaddr_list *b, *btmp;
7836
7837 if (cp->addr.type) {
7838 err = mgmt_cmd_complete(sk, hdev->id,
7839 MGMT_OP_REMOVE_DEVICE,
7840 MGMT_STATUS_INVALID_PARAMS,
7841 &cp->addr, sizeof(cp->addr));
7842 goto unlock;
7843 }
7844
7845 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7846 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7847 list_del(&b->list);
7848 kfree(b);
7849 }
7850
7851 hci_update_scan(hdev);
7852
7853 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7854 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7855 continue;
7856 device_removed(sk, hdev, &p->addr, p->addr_type);
7857 if (p->explicit_connect) {
7858 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7859 continue;
7860 }
7861 hci_conn_params_free(p);
7862 }
7863
7864 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7865 }
7866
7867 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7868
7869 complete:
7870 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7871 MGMT_STATUS_SUCCESS, &cp->addr,
7872 sizeof(cp->addr));
7873 unlock:
7874 hci_dev_unlock(hdev);
7875 return err;
7876 }
7877
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7878 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7879 u16 len)
7880 {
7881 struct mgmt_cp_load_conn_param *cp = data;
7882 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7883 sizeof(struct mgmt_conn_param));
7884 u16 param_count, expected_len;
7885 int i;
7886
7887 if (!lmp_le_capable(hdev))
7888 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7889 MGMT_STATUS_NOT_SUPPORTED);
7890
7891 param_count = __le16_to_cpu(cp->param_count);
7892 if (param_count > max_param_count) {
7893 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7894 param_count);
7895 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7896 MGMT_STATUS_INVALID_PARAMS);
7897 }
7898
7899 expected_len = struct_size(cp, params, param_count);
7900 if (expected_len != len) {
7901 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7902 expected_len, len);
7903 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7904 MGMT_STATUS_INVALID_PARAMS);
7905 }
7906
7907 bt_dev_dbg(hdev, "param_count %u", param_count);
7908
7909 hci_dev_lock(hdev);
7910
7911 hci_conn_params_clear_disabled(hdev);
7912
7913 for (i = 0; i < param_count; i++) {
7914 struct mgmt_conn_param *param = &cp->params[i];
7915 struct hci_conn_params *hci_param;
7916 u16 min, max, latency, timeout;
7917 u8 addr_type;
7918
7919 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7920 param->addr.type);
7921
7922 if (param->addr.type == BDADDR_LE_PUBLIC) {
7923 addr_type = ADDR_LE_DEV_PUBLIC;
7924 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7925 addr_type = ADDR_LE_DEV_RANDOM;
7926 } else {
7927 bt_dev_err(hdev, "ignoring invalid connection parameters");
7928 continue;
7929 }
7930
7931 min = le16_to_cpu(param->min_interval);
7932 max = le16_to_cpu(param->max_interval);
7933 latency = le16_to_cpu(param->latency);
7934 timeout = le16_to_cpu(param->timeout);
7935
7936 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7937 min, max, latency, timeout);
7938
7939 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7940 bt_dev_err(hdev, "ignoring invalid connection parameters");
7941 continue;
7942 }
7943
7944 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7945 addr_type);
7946 if (!hci_param) {
7947 bt_dev_err(hdev, "failed to add connection parameters");
7948 continue;
7949 }
7950
7951 hci_param->conn_min_interval = min;
7952 hci_param->conn_max_interval = max;
7953 hci_param->conn_latency = latency;
7954 hci_param->supervision_timeout = timeout;
7955 }
7956
7957 hci_dev_unlock(hdev);
7958
7959 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7960 NULL, 0);
7961 }
7962
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7963 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7964 void *data, u16 len)
7965 {
7966 struct mgmt_cp_set_external_config *cp = data;
7967 bool changed;
7968 int err;
7969
7970 bt_dev_dbg(hdev, "sock %p", sk);
7971
7972 if (hdev_is_powered(hdev))
7973 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7974 MGMT_STATUS_REJECTED);
7975
7976 if (cp->config != 0x00 && cp->config != 0x01)
7977 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7978 MGMT_STATUS_INVALID_PARAMS);
7979
7980 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7982 MGMT_STATUS_NOT_SUPPORTED);
7983
7984 hci_dev_lock(hdev);
7985
7986 if (cp->config)
7987 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7988 else
7989 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7990
7991 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7992 if (err < 0)
7993 goto unlock;
7994
7995 if (!changed)
7996 goto unlock;
7997
7998 err = new_options(hdev, sk);
7999
8000 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8001 mgmt_index_removed(hdev);
8002
8003 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8004 hci_dev_set_flag(hdev, HCI_CONFIG);
8005 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8006
8007 queue_work(hdev->req_workqueue, &hdev->power_on);
8008 } else {
8009 set_bit(HCI_RAW, &hdev->flags);
8010 mgmt_index_added(hdev);
8011 }
8012 }
8013
8014 unlock:
8015 hci_dev_unlock(hdev);
8016 return err;
8017 }
8018
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8019 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8020 void *data, u16 len)
8021 {
8022 struct mgmt_cp_set_public_address *cp = data;
8023 bool changed;
8024 int err;
8025
8026 bt_dev_dbg(hdev, "sock %p", sk);
8027
8028 if (hdev_is_powered(hdev))
8029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8030 MGMT_STATUS_REJECTED);
8031
8032 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8033 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8034 MGMT_STATUS_INVALID_PARAMS);
8035
8036 if (!hdev->set_bdaddr)
8037 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8038 MGMT_STATUS_NOT_SUPPORTED);
8039
8040 hci_dev_lock(hdev);
8041
8042 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8043 bacpy(&hdev->public_addr, &cp->bdaddr);
8044
8045 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8046 if (err < 0)
8047 goto unlock;
8048
8049 if (!changed)
8050 goto unlock;
8051
8052 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8053 err = new_options(hdev, sk);
8054
8055 if (is_configured(hdev)) {
8056 mgmt_index_removed(hdev);
8057
8058 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8059
8060 hci_dev_set_flag(hdev, HCI_CONFIG);
8061 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8062
8063 queue_work(hdev->req_workqueue, &hdev->power_on);
8064 }
8065
8066 unlock:
8067 hci_dev_unlock(hdev);
8068 return err;
8069 }
8070
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8071 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8072 int err)
8073 {
8074 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8075 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8076 u8 *h192, *r192, *h256, *r256;
8077 struct mgmt_pending_cmd *cmd = data;
8078 struct sk_buff *skb = cmd->skb;
8079 u8 status = mgmt_status(err);
8080 u16 eir_len;
8081
8082 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8083 return;
8084
8085 if (!status) {
8086 if (!skb)
8087 status = MGMT_STATUS_FAILED;
8088 else if (IS_ERR(skb))
8089 status = mgmt_status(PTR_ERR(skb));
8090 else
8091 status = mgmt_status(skb->data[0]);
8092 }
8093
8094 bt_dev_dbg(hdev, "status %u", status);
8095
8096 mgmt_cp = cmd->param;
8097
8098 if (status) {
8099 status = mgmt_status(status);
8100 eir_len = 0;
8101
8102 h192 = NULL;
8103 r192 = NULL;
8104 h256 = NULL;
8105 r256 = NULL;
8106 } else if (!bredr_sc_enabled(hdev)) {
8107 struct hci_rp_read_local_oob_data *rp;
8108
8109 if (skb->len != sizeof(*rp)) {
8110 status = MGMT_STATUS_FAILED;
8111 eir_len = 0;
8112 } else {
8113 status = MGMT_STATUS_SUCCESS;
8114 rp = (void *)skb->data;
8115
8116 eir_len = 5 + 18 + 18;
8117 h192 = rp->hash;
8118 r192 = rp->rand;
8119 h256 = NULL;
8120 r256 = NULL;
8121 }
8122 } else {
8123 struct hci_rp_read_local_oob_ext_data *rp;
8124
8125 if (skb->len != sizeof(*rp)) {
8126 status = MGMT_STATUS_FAILED;
8127 eir_len = 0;
8128 } else {
8129 status = MGMT_STATUS_SUCCESS;
8130 rp = (void *)skb->data;
8131
8132 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8133 eir_len = 5 + 18 + 18;
8134 h192 = NULL;
8135 r192 = NULL;
8136 } else {
8137 eir_len = 5 + 18 + 18 + 18 + 18;
8138 h192 = rp->hash192;
8139 r192 = rp->rand192;
8140 }
8141
8142 h256 = rp->hash256;
8143 r256 = rp->rand256;
8144 }
8145 }
8146
8147 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8148 if (!mgmt_rp)
8149 goto done;
8150
8151 if (eir_len == 0)
8152 goto send_rsp;
8153
8154 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8155 hdev->dev_class, 3);
8156
8157 if (h192 && r192) {
8158 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8159 EIR_SSP_HASH_C192, h192, 16);
8160 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8161 EIR_SSP_RAND_R192, r192, 16);
8162 }
8163
8164 if (h256 && r256) {
8165 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8166 EIR_SSP_HASH_C256, h256, 16);
8167 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8168 EIR_SSP_RAND_R256, r256, 16);
8169 }
8170
8171 send_rsp:
8172 mgmt_rp->type = mgmt_cp->type;
8173 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8174
8175 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8176 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8177 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8178 if (err < 0 || status)
8179 goto done;
8180
8181 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8182
8183 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8184 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8185 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8186 done:
8187 if (skb && !IS_ERR(skb))
8188 kfree_skb(skb);
8189
8190 kfree(mgmt_rp);
8191 mgmt_pending_remove(cmd);
8192 }
8193
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8194 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8195 struct mgmt_cp_read_local_oob_ext_data *cp)
8196 {
8197 struct mgmt_pending_cmd *cmd;
8198 int err;
8199
8200 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8201 cp, sizeof(*cp));
8202 if (!cmd)
8203 return -ENOMEM;
8204
8205 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8206 read_local_oob_ext_data_complete);
8207
8208 if (err < 0) {
8209 mgmt_pending_remove(cmd);
8210 return err;
8211 }
8212
8213 return 0;
8214 }
8215
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8216 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8217 void *data, u16 data_len)
8218 {
8219 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8220 struct mgmt_rp_read_local_oob_ext_data *rp;
8221 size_t rp_len;
8222 u16 eir_len;
8223 u8 status, flags, role, addr[7], hash[16], rand[16];
8224 int err;
8225
8226 bt_dev_dbg(hdev, "sock %p", sk);
8227
8228 if (hdev_is_powered(hdev)) {
8229 switch (cp->type) {
8230 case BIT(BDADDR_BREDR):
8231 status = mgmt_bredr_support(hdev);
8232 if (status)
8233 eir_len = 0;
8234 else
8235 eir_len = 5;
8236 break;
8237 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8238 status = mgmt_le_support(hdev);
8239 if (status)
8240 eir_len = 0;
8241 else
8242 eir_len = 9 + 3 + 18 + 18 + 3;
8243 break;
8244 default:
8245 status = MGMT_STATUS_INVALID_PARAMS;
8246 eir_len = 0;
8247 break;
8248 }
8249 } else {
8250 status = MGMT_STATUS_NOT_POWERED;
8251 eir_len = 0;
8252 }
8253
8254 rp_len = sizeof(*rp) + eir_len;
8255 rp = kmalloc(rp_len, GFP_ATOMIC);
8256 if (!rp)
8257 return -ENOMEM;
8258
8259 if (!status && !lmp_ssp_capable(hdev)) {
8260 status = MGMT_STATUS_NOT_SUPPORTED;
8261 eir_len = 0;
8262 }
8263
8264 if (status)
8265 goto complete;
8266
8267 hci_dev_lock(hdev);
8268
8269 eir_len = 0;
8270 switch (cp->type) {
8271 case BIT(BDADDR_BREDR):
8272 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8273 err = read_local_ssp_oob_req(hdev, sk, cp);
8274 hci_dev_unlock(hdev);
8275 if (!err)
8276 goto done;
8277
8278 status = MGMT_STATUS_FAILED;
8279 goto complete;
8280 } else {
8281 eir_len = eir_append_data(rp->eir, eir_len,
8282 EIR_CLASS_OF_DEV,
8283 hdev->dev_class, 3);
8284 }
8285 break;
8286 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8287 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8288 smp_generate_oob(hdev, hash, rand) < 0) {
8289 hci_dev_unlock(hdev);
8290 status = MGMT_STATUS_FAILED;
8291 goto complete;
8292 }
8293
8294 /* This should return the active RPA, but since the RPA
8295 * is only programmed on demand, it is really hard to fill
8296 * this in at the moment. For now disallow retrieving
8297 * local out-of-band data when privacy is in use.
8298 *
8299 * Returning the identity address will not help here since
8300 * pairing happens before the identity resolving key is
8301 * known and thus the connection establishment happens
8302 * based on the RPA and not the identity address.
8303 */
8304 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8305 hci_dev_unlock(hdev);
8306 status = MGMT_STATUS_REJECTED;
8307 goto complete;
8308 }
8309
8310 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8311 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8312 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8313 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8314 memcpy(addr, &hdev->static_addr, 6);
8315 addr[6] = 0x01;
8316 } else {
8317 memcpy(addr, &hdev->bdaddr, 6);
8318 addr[6] = 0x00;
8319 }
8320
8321 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8322 addr, sizeof(addr));
8323
8324 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8325 role = 0x02;
8326 else
8327 role = 0x01;
8328
8329 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8330 &role, sizeof(role));
8331
8332 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8333 eir_len = eir_append_data(rp->eir, eir_len,
8334 EIR_LE_SC_CONFIRM,
8335 hash, sizeof(hash));
8336
8337 eir_len = eir_append_data(rp->eir, eir_len,
8338 EIR_LE_SC_RANDOM,
8339 rand, sizeof(rand));
8340 }
8341
8342 flags = mgmt_get_adv_discov_flags(hdev);
8343
8344 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8345 flags |= LE_AD_NO_BREDR;
8346
8347 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8348 &flags, sizeof(flags));
8349 break;
8350 }
8351
8352 hci_dev_unlock(hdev);
8353
8354 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8355
8356 status = MGMT_STATUS_SUCCESS;
8357
8358 complete:
8359 rp->type = cp->type;
8360 rp->eir_len = cpu_to_le16(eir_len);
8361
8362 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8363 status, rp, sizeof(*rp) + eir_len);
8364 if (err < 0 || status)
8365 goto done;
8366
8367 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8368 rp, sizeof(*rp) + eir_len,
8369 HCI_MGMT_OOB_DATA_EVENTS, sk);
8370
8371 done:
8372 kfree(rp);
8373
8374 return err;
8375 }
8376
get_supported_adv_flags(struct hci_dev * hdev)8377 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8378 {
8379 u32 flags = 0;
8380
8381 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8382 flags |= MGMT_ADV_FLAG_DISCOV;
8383 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8384 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8385 flags |= MGMT_ADV_FLAG_APPEARANCE;
8386 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8387 flags |= MGMT_ADV_PARAM_DURATION;
8388 flags |= MGMT_ADV_PARAM_TIMEOUT;
8389 flags |= MGMT_ADV_PARAM_INTERVALS;
8390 flags |= MGMT_ADV_PARAM_TX_POWER;
8391 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8392
8393 /* In extended adv TX_POWER returned from Set Adv Param
8394 * will be always valid.
8395 */
8396 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8397 flags |= MGMT_ADV_FLAG_TX_POWER;
8398
8399 if (ext_adv_capable(hdev)) {
8400 flags |= MGMT_ADV_FLAG_SEC_1M;
8401 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8402 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8403
8404 if (le_2m_capable(hdev))
8405 flags |= MGMT_ADV_FLAG_SEC_2M;
8406
8407 if (le_coded_capable(hdev))
8408 flags |= MGMT_ADV_FLAG_SEC_CODED;
8409 }
8410
8411 return flags;
8412 }
8413
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8414 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8415 void *data, u16 data_len)
8416 {
8417 struct mgmt_rp_read_adv_features *rp;
8418 size_t rp_len;
8419 int err;
8420 struct adv_info *adv_instance;
8421 u32 supported_flags;
8422 u8 *instance;
8423
8424 bt_dev_dbg(hdev, "sock %p", sk);
8425
8426 if (!lmp_le_capable(hdev))
8427 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8428 MGMT_STATUS_REJECTED);
8429
8430 hci_dev_lock(hdev);
8431
8432 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8433 rp = kmalloc(rp_len, GFP_ATOMIC);
8434 if (!rp) {
8435 hci_dev_unlock(hdev);
8436 return -ENOMEM;
8437 }
8438
8439 supported_flags = get_supported_adv_flags(hdev);
8440
8441 rp->supported_flags = cpu_to_le32(supported_flags);
8442 rp->max_adv_data_len = max_adv_len(hdev);
8443 rp->max_scan_rsp_len = max_adv_len(hdev);
8444 rp->max_instances = hdev->le_num_of_adv_sets;
8445 rp->num_instances = hdev->adv_instance_cnt;
8446
8447 instance = rp->instance;
8448 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8449 /* Only instances 1-le_num_of_adv_sets are externally visible */
8450 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8451 *instance = adv_instance->instance;
8452 instance++;
8453 } else {
8454 rp->num_instances--;
8455 rp_len--;
8456 }
8457 }
8458
8459 hci_dev_unlock(hdev);
8460
8461 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8462 MGMT_STATUS_SUCCESS, rp, rp_len);
8463
8464 kfree(rp);
8465
8466 return err;
8467 }
8468
calculate_name_len(struct hci_dev * hdev)8469 static u8 calculate_name_len(struct hci_dev *hdev)
8470 {
8471 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
8472
8473 return eir_append_local_name(hdev, buf, 0);
8474 }
8475
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8476 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8477 bool is_adv_data)
8478 {
8479 u8 max_len = max_adv_len(hdev);
8480
8481 if (is_adv_data) {
8482 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8483 MGMT_ADV_FLAG_LIMITED_DISCOV |
8484 MGMT_ADV_FLAG_MANAGED_FLAGS))
8485 max_len -= 3;
8486
8487 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8488 max_len -= 3;
8489 } else {
8490 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8491 max_len -= calculate_name_len(hdev);
8492
8493 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8494 max_len -= 4;
8495 }
8496
8497 return max_len;
8498 }
8499
flags_managed(u32 adv_flags)8500 static bool flags_managed(u32 adv_flags)
8501 {
8502 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8503 MGMT_ADV_FLAG_LIMITED_DISCOV |
8504 MGMT_ADV_FLAG_MANAGED_FLAGS);
8505 }
8506
tx_power_managed(u32 adv_flags)8507 static bool tx_power_managed(u32 adv_flags)
8508 {
8509 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8510 }
8511
name_managed(u32 adv_flags)8512 static bool name_managed(u32 adv_flags)
8513 {
8514 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8515 }
8516
appearance_managed(u32 adv_flags)8517 static bool appearance_managed(u32 adv_flags)
8518 {
8519 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8520 }
8521
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8522 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8523 u8 len, bool is_adv_data)
8524 {
8525 int i, cur_len;
8526 u8 max_len;
8527
8528 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8529
8530 if (len > max_len)
8531 return false;
8532
8533 /* Make sure that the data is correctly formatted. */
8534 for (i = 0; i < len; i += (cur_len + 1)) {
8535 cur_len = data[i];
8536
8537 if (!cur_len)
8538 continue;
8539
8540 if (data[i + 1] == EIR_FLAGS &&
8541 (!is_adv_data || flags_managed(adv_flags)))
8542 return false;
8543
8544 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8545 return false;
8546
8547 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8548 return false;
8549
8550 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8551 return false;
8552
8553 if (data[i + 1] == EIR_APPEARANCE &&
8554 appearance_managed(adv_flags))
8555 return false;
8556
8557 /* If the current field length would exceed the total data
8558 * length, then it's invalid.
8559 */
8560 if (i + cur_len >= len)
8561 return false;
8562 }
8563
8564 return true;
8565 }
8566
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8567 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8568 {
8569 u32 supported_flags, phy_flags;
8570
8571 /* The current implementation only supports a subset of the specified
8572 * flags. Also need to check mutual exclusiveness of sec flags.
8573 */
8574 supported_flags = get_supported_adv_flags(hdev);
8575 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8576 if (adv_flags & ~supported_flags ||
8577 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8578 return false;
8579
8580 return true;
8581 }
8582
adv_busy(struct hci_dev * hdev)8583 static bool adv_busy(struct hci_dev *hdev)
8584 {
8585 return pending_find(MGMT_OP_SET_LE, hdev);
8586 }
8587
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8588 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8589 int err)
8590 {
8591 struct adv_info *adv, *n;
8592
8593 bt_dev_dbg(hdev, "err %d", err);
8594
8595 hci_dev_lock(hdev);
8596
8597 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8598 u8 instance;
8599
8600 if (!adv->pending)
8601 continue;
8602
8603 if (!err) {
8604 adv->pending = false;
8605 continue;
8606 }
8607
8608 instance = adv->instance;
8609
8610 if (hdev->cur_adv_instance == instance)
8611 cancel_adv_timeout(hdev);
8612
8613 hci_remove_adv_instance(hdev, instance);
8614 mgmt_advertising_removed(sk, hdev, instance);
8615 }
8616
8617 hci_dev_unlock(hdev);
8618 }
8619
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8620 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8621 {
8622 struct mgmt_pending_cmd *cmd = data;
8623 struct mgmt_cp_add_advertising *cp = cmd->param;
8624 struct mgmt_rp_add_advertising rp;
8625
8626 memset(&rp, 0, sizeof(rp));
8627
8628 rp.instance = cp->instance;
8629
8630 if (err)
8631 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8632 mgmt_status(err));
8633 else
8634 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8635 mgmt_status(err), &rp, sizeof(rp));
8636
8637 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8638
8639 mgmt_pending_free(cmd);
8640 }
8641
add_advertising_sync(struct hci_dev * hdev,void * data)8642 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8643 {
8644 struct mgmt_pending_cmd *cmd = data;
8645 struct mgmt_cp_add_advertising *cp = cmd->param;
8646
8647 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8648 }
8649
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8650 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8651 void *data, u16 data_len)
8652 {
8653 struct mgmt_cp_add_advertising *cp = data;
8654 struct mgmt_rp_add_advertising rp;
8655 u32 flags;
8656 u8 status;
8657 u16 timeout, duration;
8658 unsigned int prev_instance_cnt;
8659 u8 schedule_instance = 0;
8660 struct adv_info *adv, *next_instance;
8661 int err;
8662 struct mgmt_pending_cmd *cmd;
8663
8664 bt_dev_dbg(hdev, "sock %p", sk);
8665
8666 status = mgmt_le_support(hdev);
8667 if (status)
8668 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8669 status);
8670
8671 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8672 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8673 MGMT_STATUS_INVALID_PARAMS);
8674
8675 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8676 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8677 MGMT_STATUS_INVALID_PARAMS);
8678
8679 flags = __le32_to_cpu(cp->flags);
8680 timeout = __le16_to_cpu(cp->timeout);
8681 duration = __le16_to_cpu(cp->duration);
8682
8683 if (!requested_adv_flags_are_valid(hdev, flags))
8684 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8685 MGMT_STATUS_INVALID_PARAMS);
8686
8687 hci_dev_lock(hdev);
8688
8689 if (timeout && !hdev_is_powered(hdev)) {
8690 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8691 MGMT_STATUS_REJECTED);
8692 goto unlock;
8693 }
8694
8695 if (adv_busy(hdev)) {
8696 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8697 MGMT_STATUS_BUSY);
8698 goto unlock;
8699 }
8700
8701 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8702 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8703 cp->scan_rsp_len, false)) {
8704 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8705 MGMT_STATUS_INVALID_PARAMS);
8706 goto unlock;
8707 }
8708
8709 prev_instance_cnt = hdev->adv_instance_cnt;
8710
8711 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8712 cp->adv_data_len, cp->data,
8713 cp->scan_rsp_len,
8714 cp->data + cp->adv_data_len,
8715 timeout, duration,
8716 HCI_ADV_TX_POWER_NO_PREFERENCE,
8717 hdev->le_adv_min_interval,
8718 hdev->le_adv_max_interval, 0);
8719 if (IS_ERR(adv)) {
8720 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8721 MGMT_STATUS_FAILED);
8722 goto unlock;
8723 }
8724
8725 /* Only trigger an advertising added event if a new instance was
8726 * actually added.
8727 */
8728 if (hdev->adv_instance_cnt > prev_instance_cnt)
8729 mgmt_advertising_added(sk, hdev, cp->instance);
8730
8731 if (hdev->cur_adv_instance == cp->instance) {
8732 /* If the currently advertised instance is being changed then
8733 * cancel the current advertising and schedule the next
8734 * instance. If there is only one instance then the overridden
8735 * advertising data will be visible right away.
8736 */
8737 cancel_adv_timeout(hdev);
8738
8739 next_instance = hci_get_next_instance(hdev, cp->instance);
8740 if (next_instance)
8741 schedule_instance = next_instance->instance;
8742 } else if (!hdev->adv_instance_timeout) {
8743 /* Immediately advertise the new instance if no other
8744 * instance is currently being advertised.
8745 */
8746 schedule_instance = cp->instance;
8747 }
8748
8749 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8750 * there is no instance to be advertised then we have no HCI
8751 * communication to make. Simply return.
8752 */
8753 if (!hdev_is_powered(hdev) ||
8754 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8755 !schedule_instance) {
8756 rp.instance = cp->instance;
8757 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8758 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8759 goto unlock;
8760 }
8761
8762 /* We're good to go, update advertising data, parameters, and start
8763 * advertising.
8764 */
8765 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8766 data_len);
8767 if (!cmd) {
8768 err = -ENOMEM;
8769 goto unlock;
8770 }
8771
8772 cp->instance = schedule_instance;
8773
8774 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8775 add_advertising_complete);
8776 if (err < 0)
8777 mgmt_pending_free(cmd);
8778
8779 unlock:
8780 hci_dev_unlock(hdev);
8781
8782 return err;
8783 }
8784
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8785 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8786 int err)
8787 {
8788 struct mgmt_pending_cmd *cmd = data;
8789 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8790 struct mgmt_rp_add_ext_adv_params rp;
8791 struct adv_info *adv;
8792 u32 flags;
8793
8794 BT_DBG("%s", hdev->name);
8795
8796 hci_dev_lock(hdev);
8797
8798 adv = hci_find_adv_instance(hdev, cp->instance);
8799 if (!adv)
8800 goto unlock;
8801
8802 rp.instance = cp->instance;
8803 rp.tx_power = adv->tx_power;
8804
8805 /* While we're at it, inform userspace of the available space for this
8806 * advertisement, given the flags that will be used.
8807 */
8808 flags = __le32_to_cpu(cp->flags);
8809 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8810 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8811
8812 if (err) {
8813 /* If this advertisement was previously advertising and we
8814 * failed to update it, we signal that it has been removed and
8815 * delete its structure
8816 */
8817 if (!adv->pending)
8818 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8819
8820 hci_remove_adv_instance(hdev, cp->instance);
8821
8822 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8823 mgmt_status(err));
8824 } else {
8825 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8826 mgmt_status(err), &rp, sizeof(rp));
8827 }
8828
8829 unlock:
8830 if (cmd)
8831 mgmt_pending_free(cmd);
8832
8833 hci_dev_unlock(hdev);
8834 }
8835
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8836 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8837 {
8838 struct mgmt_pending_cmd *cmd = data;
8839 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8840
8841 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8842 }
8843
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8844 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8845 void *data, u16 data_len)
8846 {
8847 struct mgmt_cp_add_ext_adv_params *cp = data;
8848 struct mgmt_rp_add_ext_adv_params rp;
8849 struct mgmt_pending_cmd *cmd = NULL;
8850 struct adv_info *adv;
8851 u32 flags, min_interval, max_interval;
8852 u16 timeout, duration;
8853 u8 status;
8854 s8 tx_power;
8855 int err;
8856
8857 BT_DBG("%s", hdev->name);
8858
8859 status = mgmt_le_support(hdev);
8860 if (status)
8861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8862 status);
8863
8864 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8865 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8866 MGMT_STATUS_INVALID_PARAMS);
8867
8868 /* The purpose of breaking add_advertising into two separate MGMT calls
8869 * for params and data is to allow more parameters to be added to this
8870 * structure in the future. For this reason, we verify that we have the
8871 * bare minimum structure we know of when the interface was defined. Any
8872 * extra parameters we don't know about will be ignored in this request.
8873 */
8874 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8875 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8876 MGMT_STATUS_INVALID_PARAMS);
8877
8878 flags = __le32_to_cpu(cp->flags);
8879
8880 if (!requested_adv_flags_are_valid(hdev, flags))
8881 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8882 MGMT_STATUS_INVALID_PARAMS);
8883
8884 hci_dev_lock(hdev);
8885
8886 /* In new interface, we require that we are powered to register */
8887 if (!hdev_is_powered(hdev)) {
8888 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8889 MGMT_STATUS_REJECTED);
8890 goto unlock;
8891 }
8892
8893 if (adv_busy(hdev)) {
8894 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8895 MGMT_STATUS_BUSY);
8896 goto unlock;
8897 }
8898
8899 /* Parse defined parameters from request, use defaults otherwise */
8900 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8901 __le16_to_cpu(cp->timeout) : 0;
8902
8903 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8904 __le16_to_cpu(cp->duration) :
8905 hdev->def_multi_adv_rotation_duration;
8906
8907 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8908 __le32_to_cpu(cp->min_interval) :
8909 hdev->le_adv_min_interval;
8910
8911 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8912 __le32_to_cpu(cp->max_interval) :
8913 hdev->le_adv_max_interval;
8914
8915 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8916 cp->tx_power :
8917 HCI_ADV_TX_POWER_NO_PREFERENCE;
8918
8919 /* Create advertising instance with no advertising or response data */
8920 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8921 timeout, duration, tx_power, min_interval,
8922 max_interval, 0);
8923
8924 if (IS_ERR(adv)) {
8925 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8926 MGMT_STATUS_FAILED);
8927 goto unlock;
8928 }
8929
8930 /* Submit request for advertising params if ext adv available */
8931 if (ext_adv_capable(hdev)) {
8932 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8933 data, data_len);
8934 if (!cmd) {
8935 err = -ENOMEM;
8936 hci_remove_adv_instance(hdev, cp->instance);
8937 goto unlock;
8938 }
8939
8940 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8941 add_ext_adv_params_complete);
8942 if (err < 0)
8943 mgmt_pending_free(cmd);
8944 } else {
8945 rp.instance = cp->instance;
8946 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8947 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8948 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8949 err = mgmt_cmd_complete(sk, hdev->id,
8950 MGMT_OP_ADD_EXT_ADV_PARAMS,
8951 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8952 }
8953
8954 unlock:
8955 hci_dev_unlock(hdev);
8956
8957 return err;
8958 }
8959
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8960 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8961 {
8962 struct mgmt_pending_cmd *cmd = data;
8963 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8964 struct mgmt_rp_add_advertising rp;
8965
8966 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8967
8968 memset(&rp, 0, sizeof(rp));
8969
8970 rp.instance = cp->instance;
8971
8972 if (err)
8973 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8974 mgmt_status(err));
8975 else
8976 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8977 mgmt_status(err), &rp, sizeof(rp));
8978
8979 mgmt_pending_free(cmd);
8980 }
8981
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8982 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8983 {
8984 struct mgmt_pending_cmd *cmd = data;
8985 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8986 int err;
8987
8988 if (ext_adv_capable(hdev)) {
8989 err = hci_update_adv_data_sync(hdev, cp->instance);
8990 if (err)
8991 return err;
8992
8993 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8994 if (err)
8995 return err;
8996
8997 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8998 }
8999
9000 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9001 }
9002
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9003 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9004 u16 data_len)
9005 {
9006 struct mgmt_cp_add_ext_adv_data *cp = data;
9007 struct mgmt_rp_add_ext_adv_data rp;
9008 u8 schedule_instance = 0;
9009 struct adv_info *next_instance;
9010 struct adv_info *adv_instance;
9011 int err = 0;
9012 struct mgmt_pending_cmd *cmd;
9013
9014 BT_DBG("%s", hdev->name);
9015
9016 hci_dev_lock(hdev);
9017
9018 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9019
9020 if (!adv_instance) {
9021 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9022 MGMT_STATUS_INVALID_PARAMS);
9023 goto unlock;
9024 }
9025
9026 /* In new interface, we require that we are powered to register */
9027 if (!hdev_is_powered(hdev)) {
9028 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9029 MGMT_STATUS_REJECTED);
9030 goto clear_new_instance;
9031 }
9032
9033 if (adv_busy(hdev)) {
9034 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9035 MGMT_STATUS_BUSY);
9036 goto clear_new_instance;
9037 }
9038
9039 /* Validate new data */
9040 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9041 cp->adv_data_len, true) ||
9042 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9043 cp->adv_data_len, cp->scan_rsp_len, false)) {
9044 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9045 MGMT_STATUS_INVALID_PARAMS);
9046 goto clear_new_instance;
9047 }
9048
9049 /* Set the data in the advertising instance */
9050 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9051 cp->data, cp->scan_rsp_len,
9052 cp->data + cp->adv_data_len);
9053
9054 /* If using software rotation, determine next instance to use */
9055 if (hdev->cur_adv_instance == cp->instance) {
9056 /* If the currently advertised instance is being changed
9057 * then cancel the current advertising and schedule the
9058 * next instance. If there is only one instance then the
9059 * overridden advertising data will be visible right
9060 * away
9061 */
9062 cancel_adv_timeout(hdev);
9063
9064 next_instance = hci_get_next_instance(hdev, cp->instance);
9065 if (next_instance)
9066 schedule_instance = next_instance->instance;
9067 } else if (!hdev->adv_instance_timeout) {
9068 /* Immediately advertise the new instance if no other
9069 * instance is currently being advertised.
9070 */
9071 schedule_instance = cp->instance;
9072 }
9073
9074 /* If the HCI_ADVERTISING flag is set or there is no instance to
9075 * be advertised then we have no HCI communication to make.
9076 * Simply return.
9077 */
9078 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9079 if (adv_instance->pending) {
9080 mgmt_advertising_added(sk, hdev, cp->instance);
9081 adv_instance->pending = false;
9082 }
9083 rp.instance = cp->instance;
9084 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9085 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9086 goto unlock;
9087 }
9088
9089 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9090 data_len);
9091 if (!cmd) {
9092 err = -ENOMEM;
9093 goto clear_new_instance;
9094 }
9095
9096 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9097 add_ext_adv_data_complete);
9098 if (err < 0) {
9099 mgmt_pending_free(cmd);
9100 goto clear_new_instance;
9101 }
9102
9103 /* We were successful in updating data, so trigger advertising_added
9104 * event if this is an instance that wasn't previously advertising. If
9105 * a failure occurs in the requests we initiated, we will remove the
9106 * instance again in add_advertising_complete
9107 */
9108 if (adv_instance->pending)
9109 mgmt_advertising_added(sk, hdev, cp->instance);
9110
9111 goto unlock;
9112
9113 clear_new_instance:
9114 hci_remove_adv_instance(hdev, cp->instance);
9115
9116 unlock:
9117 hci_dev_unlock(hdev);
9118
9119 return err;
9120 }
9121
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9122 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9123 int err)
9124 {
9125 struct mgmt_pending_cmd *cmd = data;
9126 struct mgmt_cp_remove_advertising *cp = cmd->param;
9127 struct mgmt_rp_remove_advertising rp;
9128
9129 bt_dev_dbg(hdev, "err %d", err);
9130
9131 memset(&rp, 0, sizeof(rp));
9132 rp.instance = cp->instance;
9133
9134 if (err)
9135 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9136 mgmt_status(err));
9137 else
9138 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9139 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9140
9141 mgmt_pending_free(cmd);
9142 }
9143
remove_advertising_sync(struct hci_dev * hdev,void * data)9144 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9145 {
9146 struct mgmt_pending_cmd *cmd = data;
9147 struct mgmt_cp_remove_advertising *cp = cmd->param;
9148 int err;
9149
9150 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9151 if (err)
9152 return err;
9153
9154 if (list_empty(&hdev->adv_instances))
9155 err = hci_disable_advertising_sync(hdev);
9156
9157 return err;
9158 }
9159
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9160 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9161 void *data, u16 data_len)
9162 {
9163 struct mgmt_cp_remove_advertising *cp = data;
9164 struct mgmt_pending_cmd *cmd;
9165 int err;
9166
9167 bt_dev_dbg(hdev, "sock %p", sk);
9168
9169 hci_dev_lock(hdev);
9170
9171 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9172 err = mgmt_cmd_status(sk, hdev->id,
9173 MGMT_OP_REMOVE_ADVERTISING,
9174 MGMT_STATUS_INVALID_PARAMS);
9175 goto unlock;
9176 }
9177
9178 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9179 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9180 MGMT_STATUS_BUSY);
9181 goto unlock;
9182 }
9183
9184 if (list_empty(&hdev->adv_instances)) {
9185 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9186 MGMT_STATUS_INVALID_PARAMS);
9187 goto unlock;
9188 }
9189
9190 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9191 data_len);
9192 if (!cmd) {
9193 err = -ENOMEM;
9194 goto unlock;
9195 }
9196
9197 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9198 remove_advertising_complete);
9199 if (err < 0)
9200 mgmt_pending_free(cmd);
9201
9202 unlock:
9203 hci_dev_unlock(hdev);
9204
9205 return err;
9206 }
9207
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9208 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9209 void *data, u16 data_len)
9210 {
9211 struct mgmt_cp_get_adv_size_info *cp = data;
9212 struct mgmt_rp_get_adv_size_info rp;
9213 u32 flags, supported_flags;
9214
9215 bt_dev_dbg(hdev, "sock %p", sk);
9216
9217 if (!lmp_le_capable(hdev))
9218 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9219 MGMT_STATUS_REJECTED);
9220
9221 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9222 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9223 MGMT_STATUS_INVALID_PARAMS);
9224
9225 flags = __le32_to_cpu(cp->flags);
9226
9227 /* The current implementation only supports a subset of the specified
9228 * flags.
9229 */
9230 supported_flags = get_supported_adv_flags(hdev);
9231 if (flags & ~supported_flags)
9232 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9233 MGMT_STATUS_INVALID_PARAMS);
9234
9235 rp.instance = cp->instance;
9236 rp.flags = cp->flags;
9237 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9238 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9239
9240 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9241 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9242 }
9243
9244 static const struct hci_mgmt_handler mgmt_handlers[] = {
9245 { NULL }, /* 0x0000 (no command) */
9246 { read_version, MGMT_READ_VERSION_SIZE,
9247 HCI_MGMT_NO_HDEV |
9248 HCI_MGMT_UNTRUSTED },
9249 { read_commands, MGMT_READ_COMMANDS_SIZE,
9250 HCI_MGMT_NO_HDEV |
9251 HCI_MGMT_UNTRUSTED },
9252 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9253 HCI_MGMT_NO_HDEV |
9254 HCI_MGMT_UNTRUSTED },
9255 { read_controller_info, MGMT_READ_INFO_SIZE,
9256 HCI_MGMT_UNTRUSTED },
9257 { set_powered, MGMT_SETTING_SIZE },
9258 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9259 { set_connectable, MGMT_SETTING_SIZE },
9260 { set_fast_connectable, MGMT_SETTING_SIZE },
9261 { set_bondable, MGMT_SETTING_SIZE },
9262 { set_link_security, MGMT_SETTING_SIZE },
9263 { set_ssp, MGMT_SETTING_SIZE },
9264 { set_hs, MGMT_SETTING_SIZE },
9265 { set_le, MGMT_SETTING_SIZE },
9266 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9267 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9268 { add_uuid, MGMT_ADD_UUID_SIZE },
9269 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9270 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9271 HCI_MGMT_VAR_LEN },
9272 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9273 HCI_MGMT_VAR_LEN },
9274 { disconnect, MGMT_DISCONNECT_SIZE },
9275 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9276 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9277 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9278 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9279 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9280 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9281 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9282 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9283 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9284 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9285 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9286 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9287 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9288 HCI_MGMT_VAR_LEN },
9289 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9290 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9291 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9292 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9293 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9294 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9295 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9296 { set_advertising, MGMT_SETTING_SIZE },
9297 { set_bredr, MGMT_SETTING_SIZE },
9298 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9299 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9300 { set_secure_conn, MGMT_SETTING_SIZE },
9301 { set_debug_keys, MGMT_SETTING_SIZE },
9302 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9303 { load_irks, MGMT_LOAD_IRKS_SIZE,
9304 HCI_MGMT_VAR_LEN },
9305 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9306 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9307 { add_device, MGMT_ADD_DEVICE_SIZE },
9308 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9309 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9310 HCI_MGMT_VAR_LEN },
9311 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9312 HCI_MGMT_NO_HDEV |
9313 HCI_MGMT_UNTRUSTED },
9314 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9315 HCI_MGMT_UNCONFIGURED |
9316 HCI_MGMT_UNTRUSTED },
9317 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9318 HCI_MGMT_UNCONFIGURED },
9319 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9320 HCI_MGMT_UNCONFIGURED },
9321 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9322 HCI_MGMT_VAR_LEN },
9323 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9324 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9325 HCI_MGMT_NO_HDEV |
9326 HCI_MGMT_UNTRUSTED },
9327 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9328 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9329 HCI_MGMT_VAR_LEN },
9330 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9331 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9332 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9333 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9334 HCI_MGMT_UNTRUSTED },
9335 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9336 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9337 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9338 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9339 HCI_MGMT_VAR_LEN },
9340 { set_wideband_speech, MGMT_SETTING_SIZE },
9341 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9342 HCI_MGMT_UNTRUSTED },
9343 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9344 HCI_MGMT_UNTRUSTED |
9345 HCI_MGMT_HDEV_OPTIONAL },
9346 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9347 HCI_MGMT_VAR_LEN |
9348 HCI_MGMT_HDEV_OPTIONAL },
9349 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9350 HCI_MGMT_UNTRUSTED },
9351 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9352 HCI_MGMT_VAR_LEN },
9353 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9354 HCI_MGMT_UNTRUSTED },
9355 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9356 HCI_MGMT_VAR_LEN },
9357 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9358 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9359 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9360 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9361 HCI_MGMT_VAR_LEN },
9362 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9363 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9364 HCI_MGMT_VAR_LEN },
9365 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9366 HCI_MGMT_VAR_LEN },
9367 { add_adv_patterns_monitor_rssi,
9368 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9369 HCI_MGMT_VAR_LEN },
9370 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9371 HCI_MGMT_VAR_LEN },
9372 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9373 { mesh_send, MGMT_MESH_SEND_SIZE,
9374 HCI_MGMT_VAR_LEN },
9375 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9376 };
9377
mgmt_index_added(struct hci_dev * hdev)9378 void mgmt_index_added(struct hci_dev *hdev)
9379 {
9380 struct mgmt_ev_ext_index ev;
9381
9382 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9383 return;
9384
9385 switch (hdev->dev_type) {
9386 case HCI_PRIMARY:
9387 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9388 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9389 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9390 ev.type = 0x01;
9391 } else {
9392 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9393 HCI_MGMT_INDEX_EVENTS);
9394 ev.type = 0x00;
9395 }
9396 break;
9397 case HCI_AMP:
9398 ev.type = 0x02;
9399 break;
9400 default:
9401 return;
9402 }
9403
9404 ev.bus = hdev->bus;
9405
9406 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9407 HCI_MGMT_EXT_INDEX_EVENTS);
9408 }
9409
mgmt_index_removed(struct hci_dev * hdev)9410 void mgmt_index_removed(struct hci_dev *hdev)
9411 {
9412 struct mgmt_ev_ext_index ev;
9413 u8 status = MGMT_STATUS_INVALID_INDEX;
9414
9415 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9416 return;
9417
9418 switch (hdev->dev_type) {
9419 case HCI_PRIMARY:
9420 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9421
9422 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9423 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9424 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9425 ev.type = 0x01;
9426 } else {
9427 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9428 HCI_MGMT_INDEX_EVENTS);
9429 ev.type = 0x00;
9430 }
9431 break;
9432 case HCI_AMP:
9433 ev.type = 0x02;
9434 break;
9435 default:
9436 return;
9437 }
9438
9439 ev.bus = hdev->bus;
9440
9441 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9442 HCI_MGMT_EXT_INDEX_EVENTS);
9443
9444 /* Cancel any remaining timed work */
9445 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9446 return;
9447 cancel_delayed_work_sync(&hdev->discov_off);
9448 cancel_delayed_work_sync(&hdev->service_cache);
9449 cancel_delayed_work_sync(&hdev->rpa_expired);
9450 }
9451
mgmt_power_on(struct hci_dev * hdev,int err)9452 void mgmt_power_on(struct hci_dev *hdev, int err)
9453 {
9454 struct cmd_lookup match = { NULL, hdev };
9455
9456 bt_dev_dbg(hdev, "err %d", err);
9457
9458 hci_dev_lock(hdev);
9459
9460 if (!err) {
9461 restart_le_actions(hdev);
9462 hci_update_passive_scan(hdev);
9463 }
9464
9465 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9466
9467 new_settings(hdev, match.sk);
9468
9469 if (match.sk)
9470 sock_put(match.sk);
9471
9472 hci_dev_unlock(hdev);
9473 }
9474
__mgmt_power_off(struct hci_dev * hdev)9475 void __mgmt_power_off(struct hci_dev *hdev)
9476 {
9477 struct cmd_lookup match = { NULL, hdev };
9478 u8 status, zero_cod[] = { 0, 0, 0 };
9479
9480 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9481
9482 /* If the power off is because of hdev unregistration let
9483 * use the appropriate INVALID_INDEX status. Otherwise use
9484 * NOT_POWERED. We cover both scenarios here since later in
9485 * mgmt_index_removed() any hci_conn callbacks will have already
9486 * been triggered, potentially causing misleading DISCONNECTED
9487 * status responses.
9488 */
9489 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9490 status = MGMT_STATUS_INVALID_INDEX;
9491 else
9492 status = MGMT_STATUS_NOT_POWERED;
9493
9494 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9495
9496 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9497 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9498 zero_cod, sizeof(zero_cod),
9499 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9500 ext_info_changed(hdev, NULL);
9501 }
9502
9503 new_settings(hdev, match.sk);
9504
9505 if (match.sk)
9506 sock_put(match.sk);
9507 }
9508
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9509 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9510 {
9511 struct mgmt_pending_cmd *cmd;
9512 u8 status;
9513
9514 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9515 if (!cmd)
9516 return;
9517
9518 if (err == -ERFKILL)
9519 status = MGMT_STATUS_RFKILLED;
9520 else
9521 status = MGMT_STATUS_FAILED;
9522
9523 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9524
9525 mgmt_pending_remove(cmd);
9526 }
9527
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9528 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9529 bool persistent)
9530 {
9531 struct mgmt_ev_new_link_key ev;
9532
9533 memset(&ev, 0, sizeof(ev));
9534
9535 ev.store_hint = persistent;
9536 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9537 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9538 ev.key.type = key->type;
9539 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9540 ev.key.pin_len = key->pin_len;
9541
9542 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9543 }
9544
mgmt_ltk_type(struct smp_ltk * ltk)9545 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9546 {
9547 switch (ltk->type) {
9548 case SMP_LTK:
9549 case SMP_LTK_RESPONDER:
9550 if (ltk->authenticated)
9551 return MGMT_LTK_AUTHENTICATED;
9552 return MGMT_LTK_UNAUTHENTICATED;
9553 case SMP_LTK_P256:
9554 if (ltk->authenticated)
9555 return MGMT_LTK_P256_AUTH;
9556 return MGMT_LTK_P256_UNAUTH;
9557 case SMP_LTK_P256_DEBUG:
9558 return MGMT_LTK_P256_DEBUG;
9559 }
9560
9561 return MGMT_LTK_UNAUTHENTICATED;
9562 }
9563
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9564 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9565 {
9566 struct mgmt_ev_new_long_term_key ev;
9567
9568 memset(&ev, 0, sizeof(ev));
9569
9570 /* Devices using resolvable or non-resolvable random addresses
9571 * without providing an identity resolving key don't require
9572 * to store long term keys. Their addresses will change the
9573 * next time around.
9574 *
9575 * Only when a remote device provides an identity address
9576 * make sure the long term key is stored. If the remote
9577 * identity is known, the long term keys are internally
9578 * mapped to the identity address. So allow static random
9579 * and public addresses here.
9580 */
9581 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9582 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9583 ev.store_hint = 0x00;
9584 else
9585 ev.store_hint = persistent;
9586
9587 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9588 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9589 ev.key.type = mgmt_ltk_type(key);
9590 ev.key.enc_size = key->enc_size;
9591 ev.key.ediv = key->ediv;
9592 ev.key.rand = key->rand;
9593
9594 if (key->type == SMP_LTK)
9595 ev.key.initiator = 1;
9596
9597 /* Make sure we copy only the significant bytes based on the
9598 * encryption key size, and set the rest of the value to zeroes.
9599 */
9600 memcpy(ev.key.val, key->val, key->enc_size);
9601 memset(ev.key.val + key->enc_size, 0,
9602 sizeof(ev.key.val) - key->enc_size);
9603
9604 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9605 }
9606
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9607 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9608 {
9609 struct mgmt_ev_new_irk ev;
9610
9611 memset(&ev, 0, sizeof(ev));
9612
9613 ev.store_hint = persistent;
9614
9615 bacpy(&ev.rpa, &irk->rpa);
9616 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9617 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9618 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9619
9620 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9621 }
9622
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9623 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9624 bool persistent)
9625 {
9626 struct mgmt_ev_new_csrk ev;
9627
9628 memset(&ev, 0, sizeof(ev));
9629
9630 /* Devices using resolvable or non-resolvable random addresses
9631 * without providing an identity resolving key don't require
9632 * to store signature resolving keys. Their addresses will change
9633 * the next time around.
9634 *
9635 * Only when a remote device provides an identity address
9636 * make sure the signature resolving key is stored. So allow
9637 * static random and public addresses here.
9638 */
9639 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9640 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9641 ev.store_hint = 0x00;
9642 else
9643 ev.store_hint = persistent;
9644
9645 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9646 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9647 ev.key.type = csrk->type;
9648 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9649
9650 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9651 }
9652
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9653 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9654 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9655 u16 max_interval, u16 latency, u16 timeout)
9656 {
9657 struct mgmt_ev_new_conn_param ev;
9658
9659 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9660 return;
9661
9662 memset(&ev, 0, sizeof(ev));
9663 bacpy(&ev.addr.bdaddr, bdaddr);
9664 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9665 ev.store_hint = store_hint;
9666 ev.min_interval = cpu_to_le16(min_interval);
9667 ev.max_interval = cpu_to_le16(max_interval);
9668 ev.latency = cpu_to_le16(latency);
9669 ev.timeout = cpu_to_le16(timeout);
9670
9671 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9672 }
9673
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9674 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9675 u8 *name, u8 name_len)
9676 {
9677 struct sk_buff *skb;
9678 struct mgmt_ev_device_connected *ev;
9679 u16 eir_len = 0;
9680 u32 flags = 0;
9681
9682 /* allocate buff for LE or BR/EDR adv */
9683 if (conn->le_adv_data_len > 0)
9684 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9685 sizeof(*ev) + conn->le_adv_data_len);
9686 else
9687 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9688 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9689 eir_precalc_len(sizeof(conn->dev_class)));
9690
9691 ev = skb_put(skb, sizeof(*ev));
9692 bacpy(&ev->addr.bdaddr, &conn->dst);
9693 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9694
9695 if (conn->out)
9696 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9697
9698 ev->flags = __cpu_to_le32(flags);
9699
9700 /* We must ensure that the EIR Data fields are ordered and
9701 * unique. Keep it simple for now and avoid the problem by not
9702 * adding any BR/EDR data to the LE adv.
9703 */
9704 if (conn->le_adv_data_len > 0) {
9705 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9706 eir_len = conn->le_adv_data_len;
9707 } else {
9708 if (name)
9709 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9710
9711 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9712 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9713 conn->dev_class, sizeof(conn->dev_class));
9714 }
9715
9716 ev->eir_len = cpu_to_le16(eir_len);
9717
9718 mgmt_event_skb(skb, NULL);
9719 }
9720
disconnect_rsp(struct mgmt_pending_cmd * cmd,void * data)9721 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9722 {
9723 struct sock **sk = data;
9724
9725 cmd->cmd_complete(cmd, 0);
9726
9727 *sk = cmd->sk;
9728 sock_hold(*sk);
9729
9730 mgmt_pending_remove(cmd);
9731 }
9732
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9733 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9734 {
9735 struct hci_dev *hdev = data;
9736 struct mgmt_cp_unpair_device *cp = cmd->param;
9737
9738 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9739
9740 cmd->cmd_complete(cmd, 0);
9741 mgmt_pending_remove(cmd);
9742 }
9743
mgmt_powering_down(struct hci_dev * hdev)9744 bool mgmt_powering_down(struct hci_dev *hdev)
9745 {
9746 struct mgmt_pending_cmd *cmd;
9747 struct mgmt_mode *cp;
9748
9749 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9750 if (!cmd)
9751 return false;
9752
9753 cp = cmd->param;
9754 if (!cp->val)
9755 return true;
9756
9757 return false;
9758 }
9759
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9760 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9761 u8 link_type, u8 addr_type, u8 reason,
9762 bool mgmt_connected)
9763 {
9764 struct mgmt_ev_device_disconnected ev;
9765 struct sock *sk = NULL;
9766
9767 /* The connection is still in hci_conn_hash so test for 1
9768 * instead of 0 to know if this is the last one.
9769 */
9770 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9771 cancel_delayed_work(&hdev->power_off);
9772 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9773 }
9774
9775 if (!mgmt_connected)
9776 return;
9777
9778 if (link_type != ACL_LINK && link_type != LE_LINK)
9779 return;
9780
9781 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9782
9783 bacpy(&ev.addr.bdaddr, bdaddr);
9784 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9785 ev.reason = reason;
9786
9787 /* Report disconnects due to suspend */
9788 if (hdev->suspended)
9789 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9790
9791 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9792
9793 if (sk)
9794 sock_put(sk);
9795
9796 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9797 hdev);
9798 }
9799
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9800 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9801 u8 link_type, u8 addr_type, u8 status)
9802 {
9803 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9804 struct mgmt_cp_disconnect *cp;
9805 struct mgmt_pending_cmd *cmd;
9806
9807 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9808 hdev);
9809
9810 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9811 if (!cmd)
9812 return;
9813
9814 cp = cmd->param;
9815
9816 if (bacmp(bdaddr, &cp->addr.bdaddr))
9817 return;
9818
9819 if (cp->addr.type != bdaddr_type)
9820 return;
9821
9822 cmd->cmd_complete(cmd, mgmt_status(status));
9823 mgmt_pending_remove(cmd);
9824 }
9825
mgmt_connect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9826 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9827 u8 addr_type, u8 status)
9828 {
9829 struct mgmt_ev_connect_failed ev;
9830
9831 /* The connection is still in hci_conn_hash so test for 1
9832 * instead of 0 to know if this is the last one.
9833 */
9834 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9835 cancel_delayed_work(&hdev->power_off);
9836 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9837 }
9838
9839 bacpy(&ev.addr.bdaddr, bdaddr);
9840 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9841 ev.status = mgmt_status(status);
9842
9843 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9844 }
9845
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9846 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9847 {
9848 struct mgmt_ev_pin_code_request ev;
9849
9850 bacpy(&ev.addr.bdaddr, bdaddr);
9851 ev.addr.type = BDADDR_BREDR;
9852 ev.secure = secure;
9853
9854 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9855 }
9856
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9857 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9858 u8 status)
9859 {
9860 struct mgmt_pending_cmd *cmd;
9861
9862 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9863 if (!cmd)
9864 return;
9865
9866 cmd->cmd_complete(cmd, mgmt_status(status));
9867 mgmt_pending_remove(cmd);
9868 }
9869
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9870 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9871 u8 status)
9872 {
9873 struct mgmt_pending_cmd *cmd;
9874
9875 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9876 if (!cmd)
9877 return;
9878
9879 cmd->cmd_complete(cmd, mgmt_status(status));
9880 mgmt_pending_remove(cmd);
9881 }
9882
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9883 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9884 u8 link_type, u8 addr_type, u32 value,
9885 u8 confirm_hint)
9886 {
9887 struct mgmt_ev_user_confirm_request ev;
9888
9889 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9890
9891 bacpy(&ev.addr.bdaddr, bdaddr);
9892 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9893 ev.confirm_hint = confirm_hint;
9894 ev.value = cpu_to_le32(value);
9895
9896 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9897 NULL);
9898 }
9899
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9900 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9901 u8 link_type, u8 addr_type)
9902 {
9903 struct mgmt_ev_user_passkey_request ev;
9904
9905 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9906
9907 bacpy(&ev.addr.bdaddr, bdaddr);
9908 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9909
9910 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9911 NULL);
9912 }
9913
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9914 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9915 u8 link_type, u8 addr_type, u8 status,
9916 u8 opcode)
9917 {
9918 struct mgmt_pending_cmd *cmd;
9919
9920 cmd = pending_find(opcode, hdev);
9921 if (!cmd)
9922 return -ENOENT;
9923
9924 cmd->cmd_complete(cmd, mgmt_status(status));
9925 mgmt_pending_remove(cmd);
9926
9927 return 0;
9928 }
9929
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9930 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9931 u8 link_type, u8 addr_type, u8 status)
9932 {
9933 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9934 status, MGMT_OP_USER_CONFIRM_REPLY);
9935 }
9936
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9937 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9938 u8 link_type, u8 addr_type, u8 status)
9939 {
9940 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9941 status,
9942 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9943 }
9944
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9945 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9946 u8 link_type, u8 addr_type, u8 status)
9947 {
9948 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9949 status, MGMT_OP_USER_PASSKEY_REPLY);
9950 }
9951
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9952 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9953 u8 link_type, u8 addr_type, u8 status)
9954 {
9955 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9956 status,
9957 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9958 }
9959
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9960 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9961 u8 link_type, u8 addr_type, u32 passkey,
9962 u8 entered)
9963 {
9964 struct mgmt_ev_passkey_notify ev;
9965
9966 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9967
9968 bacpy(&ev.addr.bdaddr, bdaddr);
9969 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9970 ev.passkey = __cpu_to_le32(passkey);
9971 ev.entered = entered;
9972
9973 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9974 }
9975
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9976 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9977 {
9978 struct mgmt_ev_auth_failed ev;
9979 struct mgmt_pending_cmd *cmd;
9980 u8 status = mgmt_status(hci_status);
9981
9982 bacpy(&ev.addr.bdaddr, &conn->dst);
9983 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9984 ev.status = status;
9985
9986 cmd = find_pairing(conn);
9987
9988 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9989 cmd ? cmd->sk : NULL);
9990
9991 if (cmd) {
9992 cmd->cmd_complete(cmd, status);
9993 mgmt_pending_remove(cmd);
9994 }
9995 }
9996
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9997 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9998 {
9999 struct cmd_lookup match = { NULL, hdev };
10000 bool changed;
10001
10002 if (status) {
10003 u8 mgmt_err = mgmt_status(status);
10004 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10005 cmd_status_rsp, &mgmt_err);
10006 return;
10007 }
10008
10009 if (test_bit(HCI_AUTH, &hdev->flags))
10010 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10011 else
10012 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10013
10014 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10015 &match);
10016
10017 if (changed)
10018 new_settings(hdev, match.sk);
10019
10020 if (match.sk)
10021 sock_put(match.sk);
10022 }
10023
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)10024 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10025 {
10026 struct cmd_lookup *match = data;
10027
10028 if (match->sk == NULL) {
10029 match->sk = cmd->sk;
10030 sock_hold(match->sk);
10031 }
10032 }
10033
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)10034 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10035 u8 status)
10036 {
10037 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10038
10039 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10040 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10041 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10042
10043 if (!status) {
10044 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10045 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10046 ext_info_changed(hdev, NULL);
10047 }
10048
10049 if (match.sk)
10050 sock_put(match.sk);
10051 }
10052
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)10053 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10054 {
10055 struct mgmt_cp_set_local_name ev;
10056 struct mgmt_pending_cmd *cmd;
10057
10058 if (status)
10059 return;
10060
10061 memset(&ev, 0, sizeof(ev));
10062 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10063 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10064
10065 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10066 if (!cmd) {
10067 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10068
10069 /* If this is a HCI command related to powering on the
10070 * HCI dev don't send any mgmt signals.
10071 */
10072 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10073 return;
10074 }
10075
10076 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10077 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10078 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10079 }
10080
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10081 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10082 {
10083 int i;
10084
10085 for (i = 0; i < uuid_count; i++) {
10086 if (!memcmp(uuid, uuids[i], 16))
10087 return true;
10088 }
10089
10090 return false;
10091 }
10092
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10093 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10094 {
10095 u16 parsed = 0;
10096
10097 while (parsed < eir_len) {
10098 u8 field_len = eir[0];
10099 u8 uuid[16];
10100 int i;
10101
10102 if (field_len == 0)
10103 break;
10104
10105 if (eir_len - parsed < field_len + 1)
10106 break;
10107
10108 switch (eir[1]) {
10109 case EIR_UUID16_ALL:
10110 case EIR_UUID16_SOME:
10111 for (i = 0; i + 3 <= field_len; i += 2) {
10112 memcpy(uuid, bluetooth_base_uuid, 16);
10113 uuid[13] = eir[i + 3];
10114 uuid[12] = eir[i + 2];
10115 if (has_uuid(uuid, uuid_count, uuids))
10116 return true;
10117 }
10118 break;
10119 case EIR_UUID32_ALL:
10120 case EIR_UUID32_SOME:
10121 for (i = 0; i + 5 <= field_len; i += 4) {
10122 memcpy(uuid, bluetooth_base_uuid, 16);
10123 uuid[15] = eir[i + 5];
10124 uuid[14] = eir[i + 4];
10125 uuid[13] = eir[i + 3];
10126 uuid[12] = eir[i + 2];
10127 if (has_uuid(uuid, uuid_count, uuids))
10128 return true;
10129 }
10130 break;
10131 case EIR_UUID128_ALL:
10132 case EIR_UUID128_SOME:
10133 for (i = 0; i + 17 <= field_len; i += 16) {
10134 memcpy(uuid, eir + i + 2, 16);
10135 if (has_uuid(uuid, uuid_count, uuids))
10136 return true;
10137 }
10138 break;
10139 }
10140
10141 parsed += field_len + 1;
10142 eir += field_len + 1;
10143 }
10144
10145 return false;
10146 }
10147
restart_le_scan(struct hci_dev * hdev)10148 static void restart_le_scan(struct hci_dev *hdev)
10149 {
10150 /* If controller is not scanning we are done. */
10151 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10152 return;
10153
10154 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10155 hdev->discovery.scan_start +
10156 hdev->discovery.scan_duration))
10157 return;
10158
10159 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10160 DISCOV_LE_RESTART_DELAY);
10161 }
10162
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10163 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10164 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10165 {
10166 /* If a RSSI threshold has been specified, and
10167 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10168 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10169 * is set, let it through for further processing, as we might need to
10170 * restart the scan.
10171 *
10172 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10173 * the results are also dropped.
10174 */
10175 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10176 (rssi == HCI_RSSI_INVALID ||
10177 (rssi < hdev->discovery.rssi &&
10178 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10179 return false;
10180
10181 if (hdev->discovery.uuid_count != 0) {
10182 /* If a list of UUIDs is provided in filter, results with no
10183 * matching UUID should be dropped.
10184 */
10185 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10186 hdev->discovery.uuids) &&
10187 !eir_has_uuids(scan_rsp, scan_rsp_len,
10188 hdev->discovery.uuid_count,
10189 hdev->discovery.uuids))
10190 return false;
10191 }
10192
10193 /* If duplicate filtering does not report RSSI changes, then restart
10194 * scanning to ensure updated result with updated RSSI values.
10195 */
10196 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10197 restart_le_scan(hdev);
10198
10199 /* Validate RSSI value against the RSSI threshold once more. */
10200 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10201 rssi < hdev->discovery.rssi)
10202 return false;
10203 }
10204
10205 return true;
10206 }
10207
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10208 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10209 bdaddr_t *bdaddr, u8 addr_type)
10210 {
10211 struct mgmt_ev_adv_monitor_device_lost ev;
10212
10213 ev.monitor_handle = cpu_to_le16(handle);
10214 bacpy(&ev.addr.bdaddr, bdaddr);
10215 ev.addr.type = addr_type;
10216
10217 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10218 NULL);
10219 }
10220
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10221 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10222 struct sk_buff *skb,
10223 struct sock *skip_sk,
10224 u16 handle)
10225 {
10226 struct sk_buff *advmon_skb;
10227 size_t advmon_skb_len;
10228 __le16 *monitor_handle;
10229
10230 if (!skb)
10231 return;
10232
10233 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10234 sizeof(struct mgmt_ev_device_found)) + skb->len;
10235 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10236 advmon_skb_len);
10237 if (!advmon_skb)
10238 return;
10239
10240 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10241 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10242 * store monitor_handle of the matched monitor.
10243 */
10244 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10245 *monitor_handle = cpu_to_le16(handle);
10246 skb_put_data(advmon_skb, skb->data, skb->len);
10247
10248 mgmt_event_skb(advmon_skb, skip_sk);
10249 }
10250
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10251 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10252 bdaddr_t *bdaddr, bool report_device,
10253 struct sk_buff *skb,
10254 struct sock *skip_sk)
10255 {
10256 struct monitored_device *dev, *tmp;
10257 bool matched = false;
10258 bool notified = false;
10259
10260 /* We have received the Advertisement Report because:
10261 * 1. the kernel has initiated active discovery
10262 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10263 * passive scanning
10264 * 3. if none of the above is true, we have one or more active
10265 * Advertisement Monitor
10266 *
10267 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10268 * and report ONLY one advertisement per device for the matched Monitor
10269 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10270 *
10271 * For case 3, since we are not active scanning and all advertisements
10272 * received are due to a matched Advertisement Monitor, report all
10273 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10274 */
10275 if (report_device && !hdev->advmon_pend_notify) {
10276 mgmt_event_skb(skb, skip_sk);
10277 return;
10278 }
10279
10280 hdev->advmon_pend_notify = false;
10281
10282 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10283 if (!bacmp(&dev->bdaddr, bdaddr)) {
10284 matched = true;
10285
10286 if (!dev->notified) {
10287 mgmt_send_adv_monitor_device_found(hdev, skb,
10288 skip_sk,
10289 dev->handle);
10290 notified = true;
10291 dev->notified = true;
10292 }
10293 }
10294
10295 if (!dev->notified)
10296 hdev->advmon_pend_notify = true;
10297 }
10298
10299 if (!report_device &&
10300 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10301 /* Handle 0 indicates that we are not active scanning and this
10302 * is a subsequent advertisement report for an already matched
10303 * Advertisement Monitor or the controller offloading support
10304 * is not available.
10305 */
10306 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10307 }
10308
10309 if (report_device)
10310 mgmt_event_skb(skb, skip_sk);
10311 else
10312 kfree_skb(skb);
10313 }
10314
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10315 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10316 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10317 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10318 u64 instant)
10319 {
10320 struct sk_buff *skb;
10321 struct mgmt_ev_mesh_device_found *ev;
10322 int i, j;
10323
10324 if (!hdev->mesh_ad_types[0])
10325 goto accepted;
10326
10327 /* Scan for requested AD types */
10328 if (eir_len > 0) {
10329 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10330 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10331 if (!hdev->mesh_ad_types[j])
10332 break;
10333
10334 if (hdev->mesh_ad_types[j] == eir[i + 1])
10335 goto accepted;
10336 }
10337 }
10338 }
10339
10340 if (scan_rsp_len > 0) {
10341 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10342 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10343 if (!hdev->mesh_ad_types[j])
10344 break;
10345
10346 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10347 goto accepted;
10348 }
10349 }
10350 }
10351
10352 return;
10353
10354 accepted:
10355 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10356 sizeof(*ev) + eir_len + scan_rsp_len);
10357 if (!skb)
10358 return;
10359
10360 ev = skb_put(skb, sizeof(*ev));
10361
10362 bacpy(&ev->addr.bdaddr, bdaddr);
10363 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10364 ev->rssi = rssi;
10365 ev->flags = cpu_to_le32(flags);
10366 ev->instant = cpu_to_le64(instant);
10367
10368 if (eir_len > 0)
10369 /* Copy EIR or advertising data into event */
10370 skb_put_data(skb, eir, eir_len);
10371
10372 if (scan_rsp_len > 0)
10373 /* Append scan response data to event */
10374 skb_put_data(skb, scan_rsp, scan_rsp_len);
10375
10376 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10377
10378 mgmt_event_skb(skb, NULL);
10379 }
10380
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10381 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10382 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10383 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10384 u64 instant)
10385 {
10386 struct sk_buff *skb;
10387 struct mgmt_ev_device_found *ev;
10388 bool report_device = hci_discovery_active(hdev);
10389
10390 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10391 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10392 eir, eir_len, scan_rsp, scan_rsp_len,
10393 instant);
10394
10395 /* Don't send events for a non-kernel initiated discovery. With
10396 * LE one exception is if we have pend_le_reports > 0 in which
10397 * case we're doing passive scanning and want these events.
10398 */
10399 if (!hci_discovery_active(hdev)) {
10400 if (link_type == ACL_LINK)
10401 return;
10402 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10403 report_device = true;
10404 else if (!hci_is_adv_monitoring(hdev))
10405 return;
10406 }
10407
10408 if (hdev->discovery.result_filtering) {
10409 /* We are using service discovery */
10410 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10411 scan_rsp_len))
10412 return;
10413 }
10414
10415 if (hdev->discovery.limited) {
10416 /* Check for limited discoverable bit */
10417 if (dev_class) {
10418 if (!(dev_class[1] & 0x20))
10419 return;
10420 } else {
10421 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10422 if (!flags || !(flags[0] & LE_AD_LIMITED))
10423 return;
10424 }
10425 }
10426
10427 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10428 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10429 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10430 if (!skb)
10431 return;
10432
10433 ev = skb_put(skb, sizeof(*ev));
10434
10435 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10436 * RSSI value was reported as 0 when not available. This behavior
10437 * is kept when using device discovery. This is required for full
10438 * backwards compatibility with the API.
10439 *
10440 * However when using service discovery, the value 127 will be
10441 * returned when the RSSI is not available.
10442 */
10443 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10444 link_type == ACL_LINK)
10445 rssi = 0;
10446
10447 bacpy(&ev->addr.bdaddr, bdaddr);
10448 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10449 ev->rssi = rssi;
10450 ev->flags = cpu_to_le32(flags);
10451
10452 if (eir_len > 0)
10453 /* Copy EIR or advertising data into event */
10454 skb_put_data(skb, eir, eir_len);
10455
10456 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10457 u8 eir_cod[5];
10458
10459 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10460 dev_class, 3);
10461 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10462 }
10463
10464 if (scan_rsp_len > 0)
10465 /* Append scan response data to event */
10466 skb_put_data(skb, scan_rsp, scan_rsp_len);
10467
10468 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10469
10470 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10471 }
10472
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10473 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10474 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10475 {
10476 struct sk_buff *skb;
10477 struct mgmt_ev_device_found *ev;
10478 u16 eir_len = 0;
10479 u32 flags = 0;
10480
10481 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10482 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10483
10484 ev = skb_put(skb, sizeof(*ev));
10485 bacpy(&ev->addr.bdaddr, bdaddr);
10486 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10487 ev->rssi = rssi;
10488
10489 if (name)
10490 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10491 else
10492 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10493
10494 ev->eir_len = cpu_to_le16(eir_len);
10495 ev->flags = cpu_to_le32(flags);
10496
10497 mgmt_event_skb(skb, NULL);
10498 }
10499
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10500 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10501 {
10502 struct mgmt_ev_discovering ev;
10503
10504 bt_dev_dbg(hdev, "discovering %u", discovering);
10505
10506 memset(&ev, 0, sizeof(ev));
10507 ev.type = hdev->discovery.type;
10508 ev.discovering = discovering;
10509
10510 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10511 }
10512
mgmt_suspending(struct hci_dev * hdev,u8 state)10513 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10514 {
10515 struct mgmt_ev_controller_suspend ev;
10516
10517 ev.suspend_state = state;
10518 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10519 }
10520
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10521 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10522 u8 addr_type)
10523 {
10524 struct mgmt_ev_controller_resume ev;
10525
10526 ev.wake_reason = reason;
10527 if (bdaddr) {
10528 bacpy(&ev.addr.bdaddr, bdaddr);
10529 ev.addr.type = addr_type;
10530 } else {
10531 memset(&ev.addr, 0, sizeof(ev.addr));
10532 }
10533
10534 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10535 }
10536
10537 static struct hci_mgmt_chan chan = {
10538 .channel = HCI_CHANNEL_CONTROL,
10539 .handler_count = ARRAY_SIZE(mgmt_handlers),
10540 .handlers = mgmt_handlers,
10541 .hdev_init = mgmt_init_hdev,
10542 };
10543
mgmt_init(void)10544 int mgmt_init(void)
10545 {
10546 return hci_mgmt_chan_register(&chan);
10547 }
10548
mgmt_exit(void)10549 void mgmt_exit(void)
10550 {
10551 hci_mgmt_chan_unregister(&chan);
10552 }
10553
mgmt_cleanup(struct sock * sk)10554 void mgmt_cleanup(struct sock *sk)
10555 {
10556 struct mgmt_mesh_tx *mesh_tx;
10557 struct hci_dev *hdev;
10558
10559 read_lock(&hci_dev_list_lock);
10560
10561 list_for_each_entry(hdev, &hci_dev_list, list) {
10562 do {
10563 mesh_tx = mgmt_mesh_next(hdev, sk);
10564
10565 if (mesh_tx)
10566 mesh_send_complete(hdev, mesh_tx, true);
10567 } while (mesh_tx);
10568 }
10569
10570 read_unlock(&hci_dev_list_lock);
10571 }
10572