1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 };
133 
134 static const u16 mgmt_events[] = {
135 	MGMT_EV_CONTROLLER_ERROR,
136 	MGMT_EV_INDEX_ADDED,
137 	MGMT_EV_INDEX_REMOVED,
138 	MGMT_EV_NEW_SETTINGS,
139 	MGMT_EV_CLASS_OF_DEV_CHANGED,
140 	MGMT_EV_LOCAL_NAME_CHANGED,
141 	MGMT_EV_NEW_LINK_KEY,
142 	MGMT_EV_NEW_LONG_TERM_KEY,
143 	MGMT_EV_DEVICE_CONNECTED,
144 	MGMT_EV_DEVICE_DISCONNECTED,
145 	MGMT_EV_CONNECT_FAILED,
146 	MGMT_EV_PIN_CODE_REQUEST,
147 	MGMT_EV_USER_CONFIRM_REQUEST,
148 	MGMT_EV_USER_PASSKEY_REQUEST,
149 	MGMT_EV_AUTH_FAILED,
150 	MGMT_EV_DEVICE_FOUND,
151 	MGMT_EV_DISCOVERING,
152 	MGMT_EV_DEVICE_BLOCKED,
153 	MGMT_EV_DEVICE_UNBLOCKED,
154 	MGMT_EV_DEVICE_UNPAIRED,
155 	MGMT_EV_PASSKEY_NOTIFY,
156 	MGMT_EV_NEW_IRK,
157 	MGMT_EV_NEW_CSRK,
158 	MGMT_EV_DEVICE_ADDED,
159 	MGMT_EV_DEVICE_REMOVED,
160 	MGMT_EV_NEW_CONN_PARAM,
161 	MGMT_EV_UNCONF_INDEX_ADDED,
162 	MGMT_EV_UNCONF_INDEX_REMOVED,
163 	MGMT_EV_NEW_CONFIG_OPTIONS,
164 	MGMT_EV_EXT_INDEX_ADDED,
165 	MGMT_EV_EXT_INDEX_REMOVED,
166 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
167 	MGMT_EV_ADVERTISING_ADDED,
168 	MGMT_EV_ADVERTISING_REMOVED,
169 	MGMT_EV_EXT_INFO_CHANGED,
170 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
171 	MGMT_EV_EXP_FEATURE_CHANGED,
172 	MGMT_EV_DEVICE_FLAGS_CHANGED,
173 	MGMT_EV_ADV_MONITOR_ADDED,
174 	MGMT_EV_ADV_MONITOR_REMOVED,
175 	MGMT_EV_CONTROLLER_SUSPEND,
176 	MGMT_EV_CONTROLLER_RESUME,
177 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
178 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
179 };
180 
181 static const u16 mgmt_untrusted_commands[] = {
182 	MGMT_OP_READ_INDEX_LIST,
183 	MGMT_OP_READ_INFO,
184 	MGMT_OP_READ_UNCONF_INDEX_LIST,
185 	MGMT_OP_READ_CONFIG_INFO,
186 	MGMT_OP_READ_EXT_INDEX_LIST,
187 	MGMT_OP_READ_EXT_INFO,
188 	MGMT_OP_READ_CONTROLLER_CAP,
189 	MGMT_OP_READ_EXP_FEATURES_INFO,
190 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
192 };
193 
194 static const u16 mgmt_untrusted_events[] = {
195 	MGMT_EV_INDEX_ADDED,
196 	MGMT_EV_INDEX_REMOVED,
197 	MGMT_EV_NEW_SETTINGS,
198 	MGMT_EV_CLASS_OF_DEV_CHANGED,
199 	MGMT_EV_LOCAL_NAME_CHANGED,
200 	MGMT_EV_UNCONF_INDEX_ADDED,
201 	MGMT_EV_UNCONF_INDEX_REMOVED,
202 	MGMT_EV_NEW_CONFIG_OPTIONS,
203 	MGMT_EV_EXT_INDEX_ADDED,
204 	MGMT_EV_EXT_INDEX_REMOVED,
205 	MGMT_EV_EXT_INFO_CHANGED,
206 	MGMT_EV_EXP_FEATURE_CHANGED,
207 };
208 
209 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
210 
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
213 
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
216 	MGMT_STATUS_SUCCESS,
217 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
218 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
219 	MGMT_STATUS_FAILED,		/* Hardware Failure */
220 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
221 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
222 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
223 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
224 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
225 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
226 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
227 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
228 	MGMT_STATUS_BUSY,		/* Command Disallowed */
229 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
230 	MGMT_STATUS_REJECTED,		/* Rejected Security */
231 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
232 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
233 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
234 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
235 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
236 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
237 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
238 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
239 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
240 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
241 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
242 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
243 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
244 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
245 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
246 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
247 	MGMT_STATUS_FAILED,		/* Unspecified Error */
248 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
249 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
250 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
251 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
252 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
253 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
254 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
255 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
256 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
257 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
258 	MGMT_STATUS_FAILED,		/* Transaction Collision */
259 	MGMT_STATUS_FAILED,		/* Reserved for future use */
260 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
261 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
262 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
263 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
265 	MGMT_STATUS_FAILED,		/* Reserved for future use */
266 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
267 	MGMT_STATUS_FAILED,		/* Reserved for future use */
268 	MGMT_STATUS_FAILED,		/* Slot Violation */
269 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
270 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
271 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
272 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
273 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
274 	MGMT_STATUS_BUSY,		/* Controller Busy */
275 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
276 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
277 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
278 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
279 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
280 };
281 
mgmt_errno_status(int err)282 static u8 mgmt_errno_status(int err)
283 {
284 	switch (err) {
285 	case 0:
286 		return MGMT_STATUS_SUCCESS;
287 	case -EPERM:
288 		return MGMT_STATUS_REJECTED;
289 	case -EINVAL:
290 		return MGMT_STATUS_INVALID_PARAMS;
291 	case -EOPNOTSUPP:
292 		return MGMT_STATUS_NOT_SUPPORTED;
293 	case -EBUSY:
294 		return MGMT_STATUS_BUSY;
295 	case -ETIMEDOUT:
296 		return MGMT_STATUS_AUTH_FAILED;
297 	case -ENOMEM:
298 		return MGMT_STATUS_NO_RESOURCES;
299 	case -EISCONN:
300 		return MGMT_STATUS_ALREADY_CONNECTED;
301 	case -ENOTCONN:
302 		return MGMT_STATUS_DISCONNECTED;
303 	}
304 
305 	return MGMT_STATUS_FAILED;
306 }
307 
mgmt_status(int err)308 static u8 mgmt_status(int err)
309 {
310 	if (err < 0)
311 		return mgmt_errno_status(err);
312 
313 	if (err < ARRAY_SIZE(mgmt_status_table))
314 		return mgmt_status_table[err];
315 
316 	return MGMT_STATUS_FAILED;
317 }
318 
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)319 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
320 			    u16 len, int flag)
321 {
322 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
323 			       flag, NULL);
324 }
325 
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)326 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
327 			      u16 len, int flag, struct sock *skip_sk)
328 {
329 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330 			       flag, skip_sk);
331 }
332 
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)333 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
334 		      struct sock *skip_sk)
335 {
336 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 			       HCI_SOCK_TRUSTED, skip_sk);
338 }
339 
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)340 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
341 {
342 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
343 				   skip_sk);
344 }
345 
le_addr_type(u8 mgmt_addr_type)346 static u8 le_addr_type(u8 mgmt_addr_type)
347 {
348 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
349 		return ADDR_LE_DEV_PUBLIC;
350 	else
351 		return ADDR_LE_DEV_RANDOM;
352 }
353 
mgmt_fill_version_info(void * ver)354 void mgmt_fill_version_info(void *ver)
355 {
356 	struct mgmt_rp_read_version *rp = ver;
357 
358 	rp->version = MGMT_VERSION;
359 	rp->revision = cpu_to_le16(MGMT_REVISION);
360 }
361 
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)362 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
363 			u16 data_len)
364 {
365 	struct mgmt_rp_read_version rp;
366 
367 	bt_dev_dbg(hdev, "sock %p", sk);
368 
369 	mgmt_fill_version_info(&rp);
370 
371 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
372 				 &rp, sizeof(rp));
373 }
374 
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)375 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
376 			 u16 data_len)
377 {
378 	struct mgmt_rp_read_commands *rp;
379 	u16 num_commands, num_events;
380 	size_t rp_size;
381 	int i, err;
382 
383 	bt_dev_dbg(hdev, "sock %p", sk);
384 
385 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
386 		num_commands = ARRAY_SIZE(mgmt_commands);
387 		num_events = ARRAY_SIZE(mgmt_events);
388 	} else {
389 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
390 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
391 	}
392 
393 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
394 
395 	rp = kmalloc(rp_size, GFP_KERNEL);
396 	if (!rp)
397 		return -ENOMEM;
398 
399 	rp->num_commands = cpu_to_le16(num_commands);
400 	rp->num_events = cpu_to_le16(num_events);
401 
402 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
403 		__le16 *opcode = rp->opcodes;
404 
405 		for (i = 0; i < num_commands; i++, opcode++)
406 			put_unaligned_le16(mgmt_commands[i], opcode);
407 
408 		for (i = 0; i < num_events; i++, opcode++)
409 			put_unaligned_le16(mgmt_events[i], opcode);
410 	} else {
411 		__le16 *opcode = rp->opcodes;
412 
413 		for (i = 0; i < num_commands; i++, opcode++)
414 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
415 
416 		for (i = 0; i < num_events; i++, opcode++)
417 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
418 	}
419 
420 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
421 				rp, rp_size);
422 	kfree(rp);
423 
424 	return err;
425 }
426 
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)427 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
428 			   u16 data_len)
429 {
430 	struct mgmt_rp_read_index_list *rp;
431 	struct hci_dev *d;
432 	size_t rp_len;
433 	u16 count;
434 	int err;
435 
436 	bt_dev_dbg(hdev, "sock %p", sk);
437 
438 	read_lock(&hci_dev_list_lock);
439 
440 	count = 0;
441 	list_for_each_entry(d, &hci_dev_list, list) {
442 		if (d->dev_type == HCI_PRIMARY &&
443 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
444 			count++;
445 	}
446 
447 	rp_len = sizeof(*rp) + (2 * count);
448 	rp = kmalloc(rp_len, GFP_ATOMIC);
449 	if (!rp) {
450 		read_unlock(&hci_dev_list_lock);
451 		return -ENOMEM;
452 	}
453 
454 	count = 0;
455 	list_for_each_entry(d, &hci_dev_list, list) {
456 		if (hci_dev_test_flag(d, HCI_SETUP) ||
457 		    hci_dev_test_flag(d, HCI_CONFIG) ||
458 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
459 			continue;
460 
461 		/* Devices marked as raw-only are neither configured
462 		 * nor unconfigured controllers.
463 		 */
464 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
465 			continue;
466 
467 		if (d->dev_type == HCI_PRIMARY &&
468 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
469 			rp->index[count++] = cpu_to_le16(d->id);
470 			bt_dev_dbg(hdev, "Added hci%u", d->id);
471 		}
472 	}
473 
474 	rp->num_controllers = cpu_to_le16(count);
475 	rp_len = sizeof(*rp) + (2 * count);
476 
477 	read_unlock(&hci_dev_list_lock);
478 
479 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
480 				0, rp, rp_len);
481 
482 	kfree(rp);
483 
484 	return err;
485 }
486 
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)487 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
488 				  void *data, u16 data_len)
489 {
490 	struct mgmt_rp_read_unconf_index_list *rp;
491 	struct hci_dev *d;
492 	size_t rp_len;
493 	u16 count;
494 	int err;
495 
496 	bt_dev_dbg(hdev, "sock %p", sk);
497 
498 	read_lock(&hci_dev_list_lock);
499 
500 	count = 0;
501 	list_for_each_entry(d, &hci_dev_list, list) {
502 		if (d->dev_type == HCI_PRIMARY &&
503 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
504 			count++;
505 	}
506 
507 	rp_len = sizeof(*rp) + (2 * count);
508 	rp = kmalloc(rp_len, GFP_ATOMIC);
509 	if (!rp) {
510 		read_unlock(&hci_dev_list_lock);
511 		return -ENOMEM;
512 	}
513 
514 	count = 0;
515 	list_for_each_entry(d, &hci_dev_list, list) {
516 		if (hci_dev_test_flag(d, HCI_SETUP) ||
517 		    hci_dev_test_flag(d, HCI_CONFIG) ||
518 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
519 			continue;
520 
521 		/* Devices marked as raw-only are neither configured
522 		 * nor unconfigured controllers.
523 		 */
524 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
525 			continue;
526 
527 		if (d->dev_type == HCI_PRIMARY &&
528 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list) {
561 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
562 			count++;
563 	}
564 
565 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
566 	if (!rp) {
567 		read_unlock(&hci_dev_list_lock);
568 		return -ENOMEM;
569 	}
570 
571 	count = 0;
572 	list_for_each_entry(d, &hci_dev_list, list) {
573 		if (hci_dev_test_flag(d, HCI_SETUP) ||
574 		    hci_dev_test_flag(d, HCI_CONFIG) ||
575 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
576 			continue;
577 
578 		/* Devices marked as raw-only are neither configured
579 		 * nor unconfigured controllers.
580 		 */
581 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
582 			continue;
583 
584 		if (d->dev_type == HCI_PRIMARY) {
585 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
586 				rp->entry[count].type = 0x01;
587 			else
588 				rp->entry[count].type = 0x00;
589 		} else if (d->dev_type == HCI_AMP) {
590 			rp->entry[count].type = 0x02;
591 		} else {
592 			continue;
593 		}
594 
595 		rp->entry[count].bus = d->bus;
596 		rp->entry[count++].index = cpu_to_le16(d->id);
597 		bt_dev_dbg(hdev, "Added hci%u", d->id);
598 	}
599 
600 	rp->num_controllers = cpu_to_le16(count);
601 
602 	read_unlock(&hci_dev_list_lock);
603 
604 	/* If this command is called at least once, then all the
605 	 * default index and unconfigured index events are disabled
606 	 * and from now on only extended index events are used.
607 	 */
608 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
609 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
610 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
611 
612 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
613 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
614 				struct_size(rp, entry, count));
615 
616 	kfree(rp);
617 
618 	return err;
619 }
620 
is_configured(struct hci_dev * hdev)621 static bool is_configured(struct hci_dev *hdev)
622 {
623 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
624 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
625 		return false;
626 
627 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
628 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
629 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
630 		return false;
631 
632 	return true;
633 }
634 
get_missing_options(struct hci_dev * hdev)635 static __le32 get_missing_options(struct hci_dev *hdev)
636 {
637 	u32 options = 0;
638 
639 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
640 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
641 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
642 
643 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
644 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
645 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
646 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
647 
648 	return cpu_to_le32(options);
649 }
650 
new_options(struct hci_dev * hdev,struct sock * skip)651 static int new_options(struct hci_dev *hdev, struct sock *skip)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
656 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
657 }
658 
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)659 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
660 {
661 	__le32 options = get_missing_options(hdev);
662 
663 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
664 				 sizeof(options));
665 }
666 
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)667 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
668 			    void *data, u16 data_len)
669 {
670 	struct mgmt_rp_read_config_info rp;
671 	u32 options = 0;
672 
673 	bt_dev_dbg(hdev, "sock %p", sk);
674 
675 	hci_dev_lock(hdev);
676 
677 	memset(&rp, 0, sizeof(rp));
678 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
679 
680 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
681 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
682 
683 	if (hdev->set_bdaddr)
684 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
685 
686 	rp.supported_options = cpu_to_le32(options);
687 	rp.missing_options = get_missing_options(hdev);
688 
689 	hci_dev_unlock(hdev);
690 
691 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
692 				 &rp, sizeof(rp));
693 }
694 
get_supported_phys(struct hci_dev * hdev)695 static u32 get_supported_phys(struct hci_dev *hdev)
696 {
697 	u32 supported_phys = 0;
698 
699 	if (lmp_bredr_capable(hdev)) {
700 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
701 
702 		if (hdev->features[0][0] & LMP_3SLOT)
703 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
704 
705 		if (hdev->features[0][0] & LMP_5SLOT)
706 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
707 
708 		if (lmp_edr_2m_capable(hdev)) {
709 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
710 
711 			if (lmp_edr_3slot_capable(hdev))
712 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
713 
714 			if (lmp_edr_5slot_capable(hdev))
715 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
716 
717 			if (lmp_edr_3m_capable(hdev)) {
718 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
719 
720 				if (lmp_edr_3slot_capable(hdev))
721 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
722 
723 				if (lmp_edr_5slot_capable(hdev))
724 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
725 			}
726 		}
727 	}
728 
729 	if (lmp_le_capable(hdev)) {
730 		supported_phys |= MGMT_PHY_LE_1M_TX;
731 		supported_phys |= MGMT_PHY_LE_1M_RX;
732 
733 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
734 			supported_phys |= MGMT_PHY_LE_2M_TX;
735 			supported_phys |= MGMT_PHY_LE_2M_RX;
736 		}
737 
738 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
739 			supported_phys |= MGMT_PHY_LE_CODED_TX;
740 			supported_phys |= MGMT_PHY_LE_CODED_RX;
741 		}
742 	}
743 
744 	return supported_phys;
745 }
746 
get_selected_phys(struct hci_dev * hdev)747 static u32 get_selected_phys(struct hci_dev *hdev)
748 {
749 	u32 selected_phys = 0;
750 
751 	if (lmp_bredr_capable(hdev)) {
752 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
753 
754 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
755 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
756 
757 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
758 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
759 
760 		if (lmp_edr_2m_capable(hdev)) {
761 			if (!(hdev->pkt_type & HCI_2DH1))
762 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
763 
764 			if (lmp_edr_3slot_capable(hdev) &&
765 			    !(hdev->pkt_type & HCI_2DH3))
766 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
767 
768 			if (lmp_edr_5slot_capable(hdev) &&
769 			    !(hdev->pkt_type & HCI_2DH5))
770 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
771 
772 			if (lmp_edr_3m_capable(hdev)) {
773 				if (!(hdev->pkt_type & HCI_3DH1))
774 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
775 
776 				if (lmp_edr_3slot_capable(hdev) &&
777 				    !(hdev->pkt_type & HCI_3DH3))
778 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
779 
780 				if (lmp_edr_5slot_capable(hdev) &&
781 				    !(hdev->pkt_type & HCI_3DH5))
782 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
783 			}
784 		}
785 	}
786 
787 	if (lmp_le_capable(hdev)) {
788 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
789 			selected_phys |= MGMT_PHY_LE_1M_TX;
790 
791 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
792 			selected_phys |= MGMT_PHY_LE_1M_RX;
793 
794 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
795 			selected_phys |= MGMT_PHY_LE_2M_TX;
796 
797 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
798 			selected_phys |= MGMT_PHY_LE_2M_RX;
799 
800 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
801 			selected_phys |= MGMT_PHY_LE_CODED_TX;
802 
803 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
804 			selected_phys |= MGMT_PHY_LE_CODED_RX;
805 	}
806 
807 	return selected_phys;
808 }
809 
get_configurable_phys(struct hci_dev * hdev)810 static u32 get_configurable_phys(struct hci_dev *hdev)
811 {
812 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
813 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
814 }
815 
get_supported_settings(struct hci_dev * hdev)816 static u32 get_supported_settings(struct hci_dev *hdev)
817 {
818 	u32 settings = 0;
819 
820 	settings |= MGMT_SETTING_POWERED;
821 	settings |= MGMT_SETTING_BONDABLE;
822 	settings |= MGMT_SETTING_DEBUG_KEYS;
823 	settings |= MGMT_SETTING_CONNECTABLE;
824 	settings |= MGMT_SETTING_DISCOVERABLE;
825 
826 	if (lmp_bredr_capable(hdev)) {
827 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
828 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
829 		settings |= MGMT_SETTING_BREDR;
830 		settings |= MGMT_SETTING_LINK_SECURITY;
831 
832 		if (lmp_ssp_capable(hdev)) {
833 			settings |= MGMT_SETTING_SSP;
834 			if (IS_ENABLED(CONFIG_BT_HS))
835 				settings |= MGMT_SETTING_HS;
836 		}
837 
838 		if (lmp_sc_capable(hdev))
839 			settings |= MGMT_SETTING_SECURE_CONN;
840 
841 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
842 			     &hdev->quirks))
843 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
844 	}
845 
846 	if (lmp_le_capable(hdev)) {
847 		settings |= MGMT_SETTING_LE;
848 		settings |= MGMT_SETTING_SECURE_CONN;
849 		settings |= MGMT_SETTING_PRIVACY;
850 		settings |= MGMT_SETTING_STATIC_ADDRESS;
851 		settings |= MGMT_SETTING_ADVERTISING;
852 	}
853 
854 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
855 	    hdev->set_bdaddr)
856 		settings |= MGMT_SETTING_CONFIGURATION;
857 
858 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
859 
860 	return settings;
861 }
862 
get_current_settings(struct hci_dev * hdev)863 static u32 get_current_settings(struct hci_dev *hdev)
864 {
865 	u32 settings = 0;
866 
867 	if (hdev_is_powered(hdev))
868 		settings |= MGMT_SETTING_POWERED;
869 
870 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
871 		settings |= MGMT_SETTING_CONNECTABLE;
872 
873 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
874 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
875 
876 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
877 		settings |= MGMT_SETTING_DISCOVERABLE;
878 
879 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
880 		settings |= MGMT_SETTING_BONDABLE;
881 
882 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
883 		settings |= MGMT_SETTING_BREDR;
884 
885 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
886 		settings |= MGMT_SETTING_LE;
887 
888 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
889 		settings |= MGMT_SETTING_LINK_SECURITY;
890 
891 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
892 		settings |= MGMT_SETTING_SSP;
893 
894 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
895 		settings |= MGMT_SETTING_HS;
896 
897 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
898 		settings |= MGMT_SETTING_ADVERTISING;
899 
900 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
901 		settings |= MGMT_SETTING_SECURE_CONN;
902 
903 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
904 		settings |= MGMT_SETTING_DEBUG_KEYS;
905 
906 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
907 		settings |= MGMT_SETTING_PRIVACY;
908 
909 	/* The current setting for static address has two purposes. The
910 	 * first is to indicate if the static address will be used and
911 	 * the second is to indicate if it is actually set.
912 	 *
913 	 * This means if the static address is not configured, this flag
914 	 * will never be set. If the address is configured, then if the
915 	 * address is actually used decides if the flag is set or not.
916 	 *
917 	 * For single mode LE only controllers and dual-mode controllers
918 	 * with BR/EDR disabled, the existence of the static address will
919 	 * be evaluated.
920 	 */
921 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
922 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
923 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
924 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
925 			settings |= MGMT_SETTING_STATIC_ADDRESS;
926 	}
927 
928 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
929 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
930 
931 	return settings;
932 }
933 
pending_find(u16 opcode,struct hci_dev * hdev)934 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
935 {
936 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
937 }
938 
mgmt_get_adv_discov_flags(struct hci_dev * hdev)939 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
940 {
941 	struct mgmt_pending_cmd *cmd;
942 
943 	/* If there's a pending mgmt command the flags will not yet have
944 	 * their final values, so check for this first.
945 	 */
946 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
947 	if (cmd) {
948 		struct mgmt_mode *cp = cmd->param;
949 		if (cp->val == 0x01)
950 			return LE_AD_GENERAL;
951 		else if (cp->val == 0x02)
952 			return LE_AD_LIMITED;
953 	} else {
954 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
955 			return LE_AD_LIMITED;
956 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
957 			return LE_AD_GENERAL;
958 	}
959 
960 	return 0;
961 }
962 
mgmt_get_connectable(struct hci_dev * hdev)963 bool mgmt_get_connectable(struct hci_dev *hdev)
964 {
965 	struct mgmt_pending_cmd *cmd;
966 
967 	/* If there's a pending mgmt command the flag will not yet have
968 	 * it's final value, so check for this first.
969 	 */
970 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
971 	if (cmd) {
972 		struct mgmt_mode *cp = cmd->param;
973 
974 		return cp->val;
975 	}
976 
977 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
978 }
979 
service_cache_sync(struct hci_dev * hdev,void * data)980 static int service_cache_sync(struct hci_dev *hdev, void *data)
981 {
982 	hci_update_eir_sync(hdev);
983 	hci_update_class_sync(hdev);
984 
985 	return 0;
986 }
987 
service_cache_off(struct work_struct * work)988 static void service_cache_off(struct work_struct *work)
989 {
990 	struct hci_dev *hdev = container_of(work, struct hci_dev,
991 					    service_cache.work);
992 
993 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
994 		return;
995 
996 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
997 }
998 
rpa_expired_sync(struct hci_dev * hdev,void * data)999 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1000 {
1001 	/* The generation of a new RPA and programming it into the
1002 	 * controller happens in the hci_req_enable_advertising()
1003 	 * function.
1004 	 */
1005 	if (ext_adv_capable(hdev))
1006 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1007 	else
1008 		return hci_enable_advertising_sync(hdev);
1009 }
1010 
rpa_expired(struct work_struct * work)1011 static void rpa_expired(struct work_struct *work)
1012 {
1013 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1014 					    rpa_expired.work);
1015 
1016 	bt_dev_dbg(hdev, "");
1017 
1018 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1019 
1020 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1021 		return;
1022 
1023 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1024 }
1025 
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1026 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1027 {
1028 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1029 		return;
1030 
1031 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1032 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1033 
1034 	/* Non-mgmt controlled devices get this bit set
1035 	 * implicitly so that pairing works for them, however
1036 	 * for mgmt we require user-space to explicitly enable
1037 	 * it
1038 	 */
1039 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1040 }
1041 
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1042 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1043 				void *data, u16 data_len)
1044 {
1045 	struct mgmt_rp_read_info rp;
1046 
1047 	bt_dev_dbg(hdev, "sock %p", sk);
1048 
1049 	hci_dev_lock(hdev);
1050 
1051 	memset(&rp, 0, sizeof(rp));
1052 
1053 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1054 
1055 	rp.version = hdev->hci_ver;
1056 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1057 
1058 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1059 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1060 
1061 	memcpy(rp.dev_class, hdev->dev_class, 3);
1062 
1063 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1064 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1065 
1066 	hci_dev_unlock(hdev);
1067 
1068 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1069 				 sizeof(rp));
1070 }
1071 
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1072 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1073 {
1074 	u16 eir_len = 0;
1075 	size_t name_len;
1076 
1077 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1078 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1079 					  hdev->dev_class, 3);
1080 
1081 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1082 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1083 					  hdev->appearance);
1084 
1085 	name_len = strlen(hdev->dev_name);
1086 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1087 				  hdev->dev_name, name_len);
1088 
1089 	name_len = strlen(hdev->short_name);
1090 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1091 				  hdev->short_name, name_len);
1092 
1093 	return eir_len;
1094 }
1095 
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1096 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1097 				    void *data, u16 data_len)
1098 {
1099 	char buf[512];
1100 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1101 	u16 eir_len;
1102 
1103 	bt_dev_dbg(hdev, "sock %p", sk);
1104 
1105 	memset(&buf, 0, sizeof(buf));
1106 
1107 	hci_dev_lock(hdev);
1108 
1109 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1110 
1111 	rp->version = hdev->hci_ver;
1112 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1113 
1114 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1115 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1116 
1117 
1118 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1119 	rp->eir_len = cpu_to_le16(eir_len);
1120 
1121 	hci_dev_unlock(hdev);
1122 
1123 	/* If this command is called at least once, then the events
1124 	 * for class of device and local name changes are disabled
1125 	 * and only the new extended controller information event
1126 	 * is used.
1127 	 */
1128 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1129 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1130 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1131 
1132 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1133 				 sizeof(*rp) + eir_len);
1134 }
1135 
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1136 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1137 {
1138 	char buf[512];
1139 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1140 	u16 eir_len;
1141 
1142 	memset(buf, 0, sizeof(buf));
1143 
1144 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1145 	ev->eir_len = cpu_to_le16(eir_len);
1146 
1147 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1148 				  sizeof(*ev) + eir_len,
1149 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1150 }
1151 
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1152 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1153 {
1154 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1155 
1156 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1157 				 sizeof(settings));
1158 }
1159 
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1160 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1161 {
1162 	struct mgmt_ev_advertising_added ev;
1163 
1164 	ev.instance = instance;
1165 
1166 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1167 }
1168 
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1169 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1170 			      u8 instance)
1171 {
1172 	struct mgmt_ev_advertising_removed ev;
1173 
1174 	ev.instance = instance;
1175 
1176 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1177 }
1178 
cancel_adv_timeout(struct hci_dev * hdev)1179 static void cancel_adv_timeout(struct hci_dev *hdev)
1180 {
1181 	if (hdev->adv_instance_timeout) {
1182 		hdev->adv_instance_timeout = 0;
1183 		cancel_delayed_work(&hdev->adv_instance_expire);
1184 	}
1185 }
1186 
1187 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1188 static void restart_le_actions(struct hci_dev *hdev)
1189 {
1190 	struct hci_conn_params *p;
1191 
1192 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1193 		/* Needed for AUTO_OFF case where might not "really"
1194 		 * have been powered off.
1195 		 */
1196 		list_del_init(&p->action);
1197 
1198 		switch (p->auto_connect) {
1199 		case HCI_AUTO_CONN_DIRECT:
1200 		case HCI_AUTO_CONN_ALWAYS:
1201 			list_add(&p->action, &hdev->pend_le_conns);
1202 			break;
1203 		case HCI_AUTO_CONN_REPORT:
1204 			list_add(&p->action, &hdev->pend_le_reports);
1205 			break;
1206 		default:
1207 			break;
1208 		}
1209 	}
1210 }
1211 
new_settings(struct hci_dev * hdev,struct sock * skip)1212 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1213 {
1214 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1215 
1216 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1217 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1218 }
1219 
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1220 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1221 {
1222 	struct mgmt_pending_cmd *cmd = data;
1223 	struct mgmt_mode *cp;
1224 
1225 	/* Make sure cmd still outstanding. */
1226 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1227 		return;
1228 
1229 	cp = cmd->param;
1230 
1231 	bt_dev_dbg(hdev, "err %d", err);
1232 
1233 	if (!err) {
1234 		if (cp->val) {
1235 			hci_dev_lock(hdev);
1236 			restart_le_actions(hdev);
1237 			hci_update_passive_scan(hdev);
1238 			hci_dev_unlock(hdev);
1239 		}
1240 
1241 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1242 
1243 		/* Only call new_setting for power on as power off is deferred
1244 		 * to hdev->power_off work which does call hci_dev_do_close.
1245 		 */
1246 		if (cp->val)
1247 			new_settings(hdev, cmd->sk);
1248 	} else {
1249 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1250 				mgmt_status(err));
1251 	}
1252 
1253 	mgmt_pending_remove(cmd);
1254 }
1255 
set_powered_sync(struct hci_dev * hdev,void * data)1256 static int set_powered_sync(struct hci_dev *hdev, void *data)
1257 {
1258 	struct mgmt_pending_cmd *cmd = data;
1259 	struct mgmt_mode *cp = cmd->param;
1260 
1261 	BT_DBG("%s", hdev->name);
1262 
1263 	return hci_set_powered_sync(hdev, cp->val);
1264 }
1265 
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1266 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1267 		       u16 len)
1268 {
1269 	struct mgmt_mode *cp = data;
1270 	struct mgmt_pending_cmd *cmd;
1271 	int err;
1272 
1273 	bt_dev_dbg(hdev, "sock %p", sk);
1274 
1275 	if (cp->val != 0x00 && cp->val != 0x01)
1276 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1277 				       MGMT_STATUS_INVALID_PARAMS);
1278 
1279 	hci_dev_lock(hdev);
1280 
1281 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1282 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1283 				      MGMT_STATUS_BUSY);
1284 		goto failed;
1285 	}
1286 
1287 	if (!!cp->val == hdev_is_powered(hdev)) {
1288 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1289 		goto failed;
1290 	}
1291 
1292 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1293 	if (!cmd) {
1294 		err = -ENOMEM;
1295 		goto failed;
1296 	}
1297 
1298 	err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1299 				 mgmt_set_powered_complete);
1300 
1301 	if (err < 0)
1302 		mgmt_pending_remove(cmd);
1303 
1304 failed:
1305 	hci_dev_unlock(hdev);
1306 	return err;
1307 }
1308 
mgmt_new_settings(struct hci_dev * hdev)1309 int mgmt_new_settings(struct hci_dev *hdev)
1310 {
1311 	return new_settings(hdev, NULL);
1312 }
1313 
1314 struct cmd_lookup {
1315 	struct sock *sk;
1316 	struct hci_dev *hdev;
1317 	u8 mgmt_status;
1318 };
1319 
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1320 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1321 {
1322 	struct cmd_lookup *match = data;
1323 
1324 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1325 
1326 	list_del(&cmd->list);
1327 
1328 	if (match->sk == NULL) {
1329 		match->sk = cmd->sk;
1330 		sock_hold(match->sk);
1331 	}
1332 
1333 	mgmt_pending_free(cmd);
1334 }
1335 
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1336 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1337 {
1338 	u8 *status = data;
1339 
1340 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1341 	mgmt_pending_remove(cmd);
1342 }
1343 
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1344 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1345 {
1346 	if (cmd->cmd_complete) {
1347 		u8 *status = data;
1348 
1349 		cmd->cmd_complete(cmd, *status);
1350 		mgmt_pending_remove(cmd);
1351 
1352 		return;
1353 	}
1354 
1355 	cmd_status_rsp(cmd, data);
1356 }
1357 
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1358 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1359 {
1360 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1361 				 cmd->param, cmd->param_len);
1362 }
1363 
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1364 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1365 {
1366 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1367 				 cmd->param, sizeof(struct mgmt_addr_info));
1368 }
1369 
mgmt_bredr_support(struct hci_dev * hdev)1370 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1371 {
1372 	if (!lmp_bredr_capable(hdev))
1373 		return MGMT_STATUS_NOT_SUPPORTED;
1374 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1375 		return MGMT_STATUS_REJECTED;
1376 	else
1377 		return MGMT_STATUS_SUCCESS;
1378 }
1379 
mgmt_le_support(struct hci_dev * hdev)1380 static u8 mgmt_le_support(struct hci_dev *hdev)
1381 {
1382 	if (!lmp_le_capable(hdev))
1383 		return MGMT_STATUS_NOT_SUPPORTED;
1384 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1385 		return MGMT_STATUS_REJECTED;
1386 	else
1387 		return MGMT_STATUS_SUCCESS;
1388 }
1389 
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1390 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1391 					   int err)
1392 {
1393 	struct mgmt_pending_cmd *cmd = data;
1394 
1395 	bt_dev_dbg(hdev, "err %d", err);
1396 
1397 	/* Make sure cmd still outstanding. */
1398 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1399 		return;
1400 
1401 	hci_dev_lock(hdev);
1402 
1403 	if (err) {
1404 		u8 mgmt_err = mgmt_status(err);
1405 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1406 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1407 		goto done;
1408 	}
1409 
1410 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1411 	    hdev->discov_timeout > 0) {
1412 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1413 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1414 	}
1415 
1416 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1417 	new_settings(hdev, cmd->sk);
1418 
1419 done:
1420 	mgmt_pending_remove(cmd);
1421 	hci_dev_unlock(hdev);
1422 }
1423 
set_discoverable_sync(struct hci_dev * hdev,void * data)1424 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1425 {
1426 	BT_DBG("%s", hdev->name);
1427 
1428 	return hci_update_discoverable_sync(hdev);
1429 }
1430 
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1431 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1432 			    u16 len)
1433 {
1434 	struct mgmt_cp_set_discoverable *cp = data;
1435 	struct mgmt_pending_cmd *cmd;
1436 	u16 timeout;
1437 	int err;
1438 
1439 	bt_dev_dbg(hdev, "sock %p", sk);
1440 
1441 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1442 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1443 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1444 				       MGMT_STATUS_REJECTED);
1445 
1446 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1447 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1448 				       MGMT_STATUS_INVALID_PARAMS);
1449 
1450 	timeout = __le16_to_cpu(cp->timeout);
1451 
1452 	/* Disabling discoverable requires that no timeout is set,
1453 	 * and enabling limited discoverable requires a timeout.
1454 	 */
1455 	if ((cp->val == 0x00 && timeout > 0) ||
1456 	    (cp->val == 0x02 && timeout == 0))
1457 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1458 				       MGMT_STATUS_INVALID_PARAMS);
1459 
1460 	hci_dev_lock(hdev);
1461 
1462 	if (!hdev_is_powered(hdev) && timeout > 0) {
1463 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1464 				      MGMT_STATUS_NOT_POWERED);
1465 		goto failed;
1466 	}
1467 
1468 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1469 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1470 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1471 				      MGMT_STATUS_BUSY);
1472 		goto failed;
1473 	}
1474 
1475 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1476 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1477 				      MGMT_STATUS_REJECTED);
1478 		goto failed;
1479 	}
1480 
1481 	if (hdev->advertising_paused) {
1482 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1483 				      MGMT_STATUS_BUSY);
1484 		goto failed;
1485 	}
1486 
1487 	if (!hdev_is_powered(hdev)) {
1488 		bool changed = false;
1489 
1490 		/* Setting limited discoverable when powered off is
1491 		 * not a valid operation since it requires a timeout
1492 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1493 		 */
1494 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1495 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1496 			changed = true;
1497 		}
1498 
1499 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1500 		if (err < 0)
1501 			goto failed;
1502 
1503 		if (changed)
1504 			err = new_settings(hdev, sk);
1505 
1506 		goto failed;
1507 	}
1508 
1509 	/* If the current mode is the same, then just update the timeout
1510 	 * value with the new value. And if only the timeout gets updated,
1511 	 * then no need for any HCI transactions.
1512 	 */
1513 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1514 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1515 						   HCI_LIMITED_DISCOVERABLE)) {
1516 		cancel_delayed_work(&hdev->discov_off);
1517 		hdev->discov_timeout = timeout;
1518 
1519 		if (cp->val && hdev->discov_timeout > 0) {
1520 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1521 			queue_delayed_work(hdev->req_workqueue,
1522 					   &hdev->discov_off, to);
1523 		}
1524 
1525 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1526 		goto failed;
1527 	}
1528 
1529 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1530 	if (!cmd) {
1531 		err = -ENOMEM;
1532 		goto failed;
1533 	}
1534 
1535 	/* Cancel any potential discoverable timeout that might be
1536 	 * still active and store new timeout value. The arming of
1537 	 * the timeout happens in the complete handler.
1538 	 */
1539 	cancel_delayed_work(&hdev->discov_off);
1540 	hdev->discov_timeout = timeout;
1541 
1542 	if (cp->val)
1543 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1544 	else
1545 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1546 
1547 	/* Limited discoverable mode */
1548 	if (cp->val == 0x02)
1549 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1550 	else
1551 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1552 
1553 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1554 				 mgmt_set_discoverable_complete);
1555 
1556 	if (err < 0)
1557 		mgmt_pending_remove(cmd);
1558 
1559 failed:
1560 	hci_dev_unlock(hdev);
1561 	return err;
1562 }
1563 
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1564 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1565 					  int err)
1566 {
1567 	struct mgmt_pending_cmd *cmd = data;
1568 
1569 	bt_dev_dbg(hdev, "err %d", err);
1570 
1571 	/* Make sure cmd still outstanding. */
1572 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1573 		return;
1574 
1575 	hci_dev_lock(hdev);
1576 
1577 	if (err) {
1578 		u8 mgmt_err = mgmt_status(err);
1579 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1580 		goto done;
1581 	}
1582 
1583 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1584 	new_settings(hdev, cmd->sk);
1585 
1586 done:
1587 	if (cmd)
1588 		mgmt_pending_remove(cmd);
1589 
1590 	hci_dev_unlock(hdev);
1591 }
1592 
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1593 static int set_connectable_update_settings(struct hci_dev *hdev,
1594 					   struct sock *sk, u8 val)
1595 {
1596 	bool changed = false;
1597 	int err;
1598 
1599 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1600 		changed = true;
1601 
1602 	if (val) {
1603 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1604 	} else {
1605 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1606 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1607 	}
1608 
1609 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1610 	if (err < 0)
1611 		return err;
1612 
1613 	if (changed) {
1614 		hci_req_update_scan(hdev);
1615 		hci_update_passive_scan(hdev);
1616 		return new_settings(hdev, sk);
1617 	}
1618 
1619 	return 0;
1620 }
1621 
set_connectable_sync(struct hci_dev * hdev,void * data)1622 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1623 {
1624 	BT_DBG("%s", hdev->name);
1625 
1626 	return hci_update_connectable_sync(hdev);
1627 }
1628 
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1629 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1630 			   u16 len)
1631 {
1632 	struct mgmt_mode *cp = data;
1633 	struct mgmt_pending_cmd *cmd;
1634 	int err;
1635 
1636 	bt_dev_dbg(hdev, "sock %p", sk);
1637 
1638 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1639 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1640 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1641 				       MGMT_STATUS_REJECTED);
1642 
1643 	if (cp->val != 0x00 && cp->val != 0x01)
1644 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1645 				       MGMT_STATUS_INVALID_PARAMS);
1646 
1647 	hci_dev_lock(hdev);
1648 
1649 	if (!hdev_is_powered(hdev)) {
1650 		err = set_connectable_update_settings(hdev, sk, cp->val);
1651 		goto failed;
1652 	}
1653 
1654 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1655 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1656 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1657 				      MGMT_STATUS_BUSY);
1658 		goto failed;
1659 	}
1660 
1661 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1662 	if (!cmd) {
1663 		err = -ENOMEM;
1664 		goto failed;
1665 	}
1666 
1667 	if (cp->val) {
1668 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1669 	} else {
1670 		if (hdev->discov_timeout > 0)
1671 			cancel_delayed_work(&hdev->discov_off);
1672 
1673 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1675 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1676 	}
1677 
1678 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1679 				 mgmt_set_connectable_complete);
1680 
1681 	if (err < 0)
1682 		mgmt_pending_remove(cmd);
1683 
1684 failed:
1685 	hci_dev_unlock(hdev);
1686 	return err;
1687 }
1688 
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1689 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1690 			u16 len)
1691 {
1692 	struct mgmt_mode *cp = data;
1693 	bool changed;
1694 	int err;
1695 
1696 	bt_dev_dbg(hdev, "sock %p", sk);
1697 
1698 	if (cp->val != 0x00 && cp->val != 0x01)
1699 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1700 				       MGMT_STATUS_INVALID_PARAMS);
1701 
1702 	hci_dev_lock(hdev);
1703 
1704 	if (cp->val)
1705 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1706 	else
1707 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1708 
1709 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1710 	if (err < 0)
1711 		goto unlock;
1712 
1713 	if (changed) {
1714 		/* In limited privacy mode the change of bondable mode
1715 		 * may affect the local advertising address.
1716 		 */
1717 		hci_update_discoverable(hdev);
1718 
1719 		err = new_settings(hdev, sk);
1720 	}
1721 
1722 unlock:
1723 	hci_dev_unlock(hdev);
1724 	return err;
1725 }
1726 
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1727 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1728 			     u16 len)
1729 {
1730 	struct mgmt_mode *cp = data;
1731 	struct mgmt_pending_cmd *cmd;
1732 	u8 val, status;
1733 	int err;
1734 
1735 	bt_dev_dbg(hdev, "sock %p", sk);
1736 
1737 	status = mgmt_bredr_support(hdev);
1738 	if (status)
1739 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1740 				       status);
1741 
1742 	if (cp->val != 0x00 && cp->val != 0x01)
1743 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1744 				       MGMT_STATUS_INVALID_PARAMS);
1745 
1746 	hci_dev_lock(hdev);
1747 
1748 	if (!hdev_is_powered(hdev)) {
1749 		bool changed = false;
1750 
1751 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1752 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1753 			changed = true;
1754 		}
1755 
1756 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1757 		if (err < 0)
1758 			goto failed;
1759 
1760 		if (changed)
1761 			err = new_settings(hdev, sk);
1762 
1763 		goto failed;
1764 	}
1765 
1766 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1767 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1768 				      MGMT_STATUS_BUSY);
1769 		goto failed;
1770 	}
1771 
1772 	val = !!cp->val;
1773 
1774 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1775 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1776 		goto failed;
1777 	}
1778 
1779 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1780 	if (!cmd) {
1781 		err = -ENOMEM;
1782 		goto failed;
1783 	}
1784 
1785 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1786 	if (err < 0) {
1787 		mgmt_pending_remove(cmd);
1788 		goto failed;
1789 	}
1790 
1791 failed:
1792 	hci_dev_unlock(hdev);
1793 	return err;
1794 }
1795 
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1796 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1797 {
1798 	struct cmd_lookup match = { NULL, hdev };
1799 	struct mgmt_pending_cmd *cmd = data;
1800 	struct mgmt_mode *cp = cmd->param;
1801 	u8 enable = cp->val;
1802 	bool changed;
1803 
1804 	/* Make sure cmd still outstanding. */
1805 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1806 		return;
1807 
1808 	if (err) {
1809 		u8 mgmt_err = mgmt_status(err);
1810 
1811 		if (enable && hci_dev_test_and_clear_flag(hdev,
1812 							  HCI_SSP_ENABLED)) {
1813 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1814 			new_settings(hdev, NULL);
1815 		}
1816 
1817 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1818 				     &mgmt_err);
1819 		return;
1820 	}
1821 
1822 	if (enable) {
1823 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1824 	} else {
1825 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1826 
1827 		if (!changed)
1828 			changed = hci_dev_test_and_clear_flag(hdev,
1829 							      HCI_HS_ENABLED);
1830 		else
1831 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1832 	}
1833 
1834 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1835 
1836 	if (changed)
1837 		new_settings(hdev, match.sk);
1838 
1839 	if (match.sk)
1840 		sock_put(match.sk);
1841 
1842 	hci_update_eir_sync(hdev);
1843 }
1844 
set_ssp_sync(struct hci_dev * hdev,void * data)1845 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1846 {
1847 	struct mgmt_pending_cmd *cmd = data;
1848 	struct mgmt_mode *cp = cmd->param;
1849 	bool changed = false;
1850 	int err;
1851 
1852 	if (cp->val)
1853 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1854 
1855 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1856 
1857 	if (!err && changed)
1858 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1859 
1860 	return err;
1861 }
1862 
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1863 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1864 {
1865 	struct mgmt_mode *cp = data;
1866 	struct mgmt_pending_cmd *cmd;
1867 	u8 status;
1868 	int err;
1869 
1870 	bt_dev_dbg(hdev, "sock %p", sk);
1871 
1872 	status = mgmt_bredr_support(hdev);
1873 	if (status)
1874 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1875 
1876 	if (!lmp_ssp_capable(hdev))
1877 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1878 				       MGMT_STATUS_NOT_SUPPORTED);
1879 
1880 	if (cp->val != 0x00 && cp->val != 0x01)
1881 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1882 				       MGMT_STATUS_INVALID_PARAMS);
1883 
1884 	hci_dev_lock(hdev);
1885 
1886 	if (!hdev_is_powered(hdev)) {
1887 		bool changed;
1888 
1889 		if (cp->val) {
1890 			changed = !hci_dev_test_and_set_flag(hdev,
1891 							     HCI_SSP_ENABLED);
1892 		} else {
1893 			changed = hci_dev_test_and_clear_flag(hdev,
1894 							      HCI_SSP_ENABLED);
1895 			if (!changed)
1896 				changed = hci_dev_test_and_clear_flag(hdev,
1897 								      HCI_HS_ENABLED);
1898 			else
1899 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1900 		}
1901 
1902 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1903 		if (err < 0)
1904 			goto failed;
1905 
1906 		if (changed)
1907 			err = new_settings(hdev, sk);
1908 
1909 		goto failed;
1910 	}
1911 
1912 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1913 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1914 				      MGMT_STATUS_BUSY);
1915 		goto failed;
1916 	}
1917 
1918 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1919 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1920 		goto failed;
1921 	}
1922 
1923 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1924 	if (!cmd)
1925 		err = -ENOMEM;
1926 	else
1927 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
1928 					 set_ssp_complete);
1929 
1930 	if (err < 0) {
1931 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1932 				      MGMT_STATUS_FAILED);
1933 
1934 		if (cmd)
1935 			mgmt_pending_remove(cmd);
1936 	}
1937 
1938 failed:
1939 	hci_dev_unlock(hdev);
1940 	return err;
1941 }
1942 
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1943 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1944 {
1945 	struct mgmt_mode *cp = data;
1946 	bool changed;
1947 	u8 status;
1948 	int err;
1949 
1950 	bt_dev_dbg(hdev, "sock %p", sk);
1951 
1952 	if (!IS_ENABLED(CONFIG_BT_HS))
1953 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1954 				       MGMT_STATUS_NOT_SUPPORTED);
1955 
1956 	status = mgmt_bredr_support(hdev);
1957 	if (status)
1958 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1959 
1960 	if (!lmp_ssp_capable(hdev))
1961 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1962 				       MGMT_STATUS_NOT_SUPPORTED);
1963 
1964 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1965 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1966 				       MGMT_STATUS_REJECTED);
1967 
1968 	if (cp->val != 0x00 && cp->val != 0x01)
1969 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1970 				       MGMT_STATUS_INVALID_PARAMS);
1971 
1972 	hci_dev_lock(hdev);
1973 
1974 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1975 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1976 				      MGMT_STATUS_BUSY);
1977 		goto unlock;
1978 	}
1979 
1980 	if (cp->val) {
1981 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1982 	} else {
1983 		if (hdev_is_powered(hdev)) {
1984 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1985 					      MGMT_STATUS_REJECTED);
1986 			goto unlock;
1987 		}
1988 
1989 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1990 	}
1991 
1992 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1993 	if (err < 0)
1994 		goto unlock;
1995 
1996 	if (changed)
1997 		err = new_settings(hdev, sk);
1998 
1999 unlock:
2000 	hci_dev_unlock(hdev);
2001 	return err;
2002 }
2003 
set_le_complete(struct hci_dev * hdev,void * data,int err)2004 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2005 {
2006 	struct cmd_lookup match = { NULL, hdev };
2007 	u8 status = mgmt_status(err);
2008 
2009 	bt_dev_dbg(hdev, "err %d", err);
2010 
2011 	if (status) {
2012 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2013 							&status);
2014 		return;
2015 	}
2016 
2017 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2018 
2019 	new_settings(hdev, match.sk);
2020 
2021 	if (match.sk)
2022 		sock_put(match.sk);
2023 }
2024 
set_le_sync(struct hci_dev * hdev,void * data)2025 static int set_le_sync(struct hci_dev *hdev, void *data)
2026 {
2027 	struct mgmt_pending_cmd *cmd = data;
2028 	struct mgmt_mode *cp = cmd->param;
2029 	u8 val = !!cp->val;
2030 	int err;
2031 
2032 	if (!val) {
2033 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2034 			hci_disable_advertising_sync(hdev);
2035 
2036 		if (ext_adv_capable(hdev))
2037 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2038 	} else {
2039 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2040 	}
2041 
2042 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2043 
2044 	/* Make sure the controller has a good default for
2045 	 * advertising data. Restrict the update to when LE
2046 	 * has actually been enabled. During power on, the
2047 	 * update in powered_update_hci will take care of it.
2048 	 */
2049 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2050 		if (ext_adv_capable(hdev)) {
2051 			int status;
2052 
2053 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2054 			if (!status)
2055 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2056 		} else {
2057 			hci_update_adv_data_sync(hdev, 0x00);
2058 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2059 		}
2060 
2061 		hci_update_passive_scan(hdev);
2062 	}
2063 
2064 	return err;
2065 }
2066 
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2067 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2068 {
2069 	struct mgmt_mode *cp = data;
2070 	struct mgmt_pending_cmd *cmd;
2071 	int err;
2072 	u8 val, enabled;
2073 
2074 	bt_dev_dbg(hdev, "sock %p", sk);
2075 
2076 	if (!lmp_le_capable(hdev))
2077 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2078 				       MGMT_STATUS_NOT_SUPPORTED);
2079 
2080 	if (cp->val != 0x00 && cp->val != 0x01)
2081 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2082 				       MGMT_STATUS_INVALID_PARAMS);
2083 
2084 	/* Bluetooth single mode LE only controllers or dual-mode
2085 	 * controllers configured as LE only devices, do not allow
2086 	 * switching LE off. These have either LE enabled explicitly
2087 	 * or BR/EDR has been previously switched off.
2088 	 *
2089 	 * When trying to enable an already enabled LE, then gracefully
2090 	 * send a positive response. Trying to disable it however will
2091 	 * result into rejection.
2092 	 */
2093 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2094 		if (cp->val == 0x01)
2095 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2096 
2097 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2098 				       MGMT_STATUS_REJECTED);
2099 	}
2100 
2101 	hci_dev_lock(hdev);
2102 
2103 	val = !!cp->val;
2104 	enabled = lmp_host_le_capable(hdev);
2105 
2106 	if (!val)
2107 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2108 
2109 	if (!hdev_is_powered(hdev) || val == enabled) {
2110 		bool changed = false;
2111 
2112 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2113 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2114 			changed = true;
2115 		}
2116 
2117 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2118 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2119 			changed = true;
2120 		}
2121 
2122 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2123 		if (err < 0)
2124 			goto unlock;
2125 
2126 		if (changed)
2127 			err = new_settings(hdev, sk);
2128 
2129 		goto unlock;
2130 	}
2131 
2132 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2133 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2134 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2135 				      MGMT_STATUS_BUSY);
2136 		goto unlock;
2137 	}
2138 
2139 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2140 	if (!cmd)
2141 		err = -ENOMEM;
2142 	else
2143 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2144 					 set_le_complete);
2145 
2146 	if (err < 0) {
2147 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2148 				      MGMT_STATUS_FAILED);
2149 
2150 		if (cmd)
2151 			mgmt_pending_remove(cmd);
2152 	}
2153 
2154 unlock:
2155 	hci_dev_unlock(hdev);
2156 	return err;
2157 }
2158 
2159 /* This is a helper function to test for pending mgmt commands that can
2160  * cause CoD or EIR HCI commands. We can only allow one such pending
2161  * mgmt command at a time since otherwise we cannot easily track what
2162  * the current values are, will be, and based on that calculate if a new
2163  * HCI command needs to be sent and if yes with what value.
2164  */
pending_eir_or_class(struct hci_dev * hdev)2165 static bool pending_eir_or_class(struct hci_dev *hdev)
2166 {
2167 	struct mgmt_pending_cmd *cmd;
2168 
2169 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2170 		switch (cmd->opcode) {
2171 		case MGMT_OP_ADD_UUID:
2172 		case MGMT_OP_REMOVE_UUID:
2173 		case MGMT_OP_SET_DEV_CLASS:
2174 		case MGMT_OP_SET_POWERED:
2175 			return true;
2176 		}
2177 	}
2178 
2179 	return false;
2180 }
2181 
2182 static const u8 bluetooth_base_uuid[] = {
2183 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2184 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2185 };
2186 
get_uuid_size(const u8 * uuid)2187 static u8 get_uuid_size(const u8 *uuid)
2188 {
2189 	u32 val;
2190 
2191 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2192 		return 128;
2193 
2194 	val = get_unaligned_le32(&uuid[12]);
2195 	if (val > 0xffff)
2196 		return 32;
2197 
2198 	return 16;
2199 }
2200 
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2201 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2202 {
2203 	struct mgmt_pending_cmd *cmd = data;
2204 
2205 	bt_dev_dbg(hdev, "err %d", err);
2206 
2207 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2208 			  mgmt_status(err), hdev->dev_class, 3);
2209 
2210 	mgmt_pending_free(cmd);
2211 }
2212 
add_uuid_sync(struct hci_dev * hdev,void * data)2213 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2214 {
2215 	int err;
2216 
2217 	err = hci_update_class_sync(hdev);
2218 	if (err)
2219 		return err;
2220 
2221 	return hci_update_eir_sync(hdev);
2222 }
2223 
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2224 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2225 {
2226 	struct mgmt_cp_add_uuid *cp = data;
2227 	struct mgmt_pending_cmd *cmd;
2228 	struct bt_uuid *uuid;
2229 	int err;
2230 
2231 	bt_dev_dbg(hdev, "sock %p", sk);
2232 
2233 	hci_dev_lock(hdev);
2234 
2235 	if (pending_eir_or_class(hdev)) {
2236 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2237 				      MGMT_STATUS_BUSY);
2238 		goto failed;
2239 	}
2240 
2241 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2242 	if (!uuid) {
2243 		err = -ENOMEM;
2244 		goto failed;
2245 	}
2246 
2247 	memcpy(uuid->uuid, cp->uuid, 16);
2248 	uuid->svc_hint = cp->svc_hint;
2249 	uuid->size = get_uuid_size(cp->uuid);
2250 
2251 	list_add_tail(&uuid->list, &hdev->uuids);
2252 
2253 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2254 	if (!cmd) {
2255 		err = -ENOMEM;
2256 		goto failed;
2257 	}
2258 
2259 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2260 	if (err < 0) {
2261 		mgmt_pending_free(cmd);
2262 		goto failed;
2263 	}
2264 
2265 failed:
2266 	hci_dev_unlock(hdev);
2267 	return err;
2268 }
2269 
enable_service_cache(struct hci_dev * hdev)2270 static bool enable_service_cache(struct hci_dev *hdev)
2271 {
2272 	if (!hdev_is_powered(hdev))
2273 		return false;
2274 
2275 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2276 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2277 				   CACHE_TIMEOUT);
2278 		return true;
2279 	}
2280 
2281 	return false;
2282 }
2283 
remove_uuid_sync(struct hci_dev * hdev,void * data)2284 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2285 {
2286 	int err;
2287 
2288 	err = hci_update_class_sync(hdev);
2289 	if (err)
2290 		return err;
2291 
2292 	return hci_update_eir_sync(hdev);
2293 }
2294 
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2295 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2296 		       u16 len)
2297 {
2298 	struct mgmt_cp_remove_uuid *cp = data;
2299 	struct mgmt_pending_cmd *cmd;
2300 	struct bt_uuid *match, *tmp;
2301 	static const u8 bt_uuid_any[] = {
2302 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2303 	};
2304 	int err, found;
2305 
2306 	bt_dev_dbg(hdev, "sock %p", sk);
2307 
2308 	hci_dev_lock(hdev);
2309 
2310 	if (pending_eir_or_class(hdev)) {
2311 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2312 				      MGMT_STATUS_BUSY);
2313 		goto unlock;
2314 	}
2315 
2316 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2317 		hci_uuids_clear(hdev);
2318 
2319 		if (enable_service_cache(hdev)) {
2320 			err = mgmt_cmd_complete(sk, hdev->id,
2321 						MGMT_OP_REMOVE_UUID,
2322 						0, hdev->dev_class, 3);
2323 			goto unlock;
2324 		}
2325 
2326 		goto update_class;
2327 	}
2328 
2329 	found = 0;
2330 
2331 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2332 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2333 			continue;
2334 
2335 		list_del(&match->list);
2336 		kfree(match);
2337 		found++;
2338 	}
2339 
2340 	if (found == 0) {
2341 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2342 				      MGMT_STATUS_INVALID_PARAMS);
2343 		goto unlock;
2344 	}
2345 
2346 update_class:
2347 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2348 	if (!cmd) {
2349 		err = -ENOMEM;
2350 		goto unlock;
2351 	}
2352 
2353 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2354 				 mgmt_class_complete);
2355 	if (err < 0)
2356 		mgmt_pending_free(cmd);
2357 
2358 unlock:
2359 	hci_dev_unlock(hdev);
2360 	return err;
2361 }
2362 
set_class_sync(struct hci_dev * hdev,void * data)2363 static int set_class_sync(struct hci_dev *hdev, void *data)
2364 {
2365 	int err = 0;
2366 
2367 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2368 		cancel_delayed_work_sync(&hdev->service_cache);
2369 		err = hci_update_eir_sync(hdev);
2370 	}
2371 
2372 	if (err)
2373 		return err;
2374 
2375 	return hci_update_class_sync(hdev);
2376 }
2377 
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2378 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2379 			 u16 len)
2380 {
2381 	struct mgmt_cp_set_dev_class *cp = data;
2382 	struct mgmt_pending_cmd *cmd;
2383 	int err;
2384 
2385 	bt_dev_dbg(hdev, "sock %p", sk);
2386 
2387 	if (!lmp_bredr_capable(hdev))
2388 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2389 				       MGMT_STATUS_NOT_SUPPORTED);
2390 
2391 	hci_dev_lock(hdev);
2392 
2393 	if (pending_eir_or_class(hdev)) {
2394 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2395 				      MGMT_STATUS_BUSY);
2396 		goto unlock;
2397 	}
2398 
2399 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2400 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2401 				      MGMT_STATUS_INVALID_PARAMS);
2402 		goto unlock;
2403 	}
2404 
2405 	hdev->major_class = cp->major;
2406 	hdev->minor_class = cp->minor;
2407 
2408 	if (!hdev_is_powered(hdev)) {
2409 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2410 					hdev->dev_class, 3);
2411 		goto unlock;
2412 	}
2413 
2414 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2415 	if (!cmd) {
2416 		err = -ENOMEM;
2417 		goto unlock;
2418 	}
2419 
2420 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2421 				 mgmt_class_complete);
2422 	if (err < 0)
2423 		mgmt_pending_free(cmd);
2424 
2425 unlock:
2426 	hci_dev_unlock(hdev);
2427 	return err;
2428 }
2429 
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2430 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2431 			  u16 len)
2432 {
2433 	struct mgmt_cp_load_link_keys *cp = data;
2434 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2435 				   sizeof(struct mgmt_link_key_info));
2436 	u16 key_count, expected_len;
2437 	bool changed;
2438 	int i;
2439 
2440 	bt_dev_dbg(hdev, "sock %p", sk);
2441 
2442 	if (!lmp_bredr_capable(hdev))
2443 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2444 				       MGMT_STATUS_NOT_SUPPORTED);
2445 
2446 	key_count = __le16_to_cpu(cp->key_count);
2447 	if (key_count > max_key_count) {
2448 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2449 			   key_count);
2450 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2451 				       MGMT_STATUS_INVALID_PARAMS);
2452 	}
2453 
2454 	expected_len = struct_size(cp, keys, key_count);
2455 	if (expected_len != len) {
2456 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2457 			   expected_len, len);
2458 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2459 				       MGMT_STATUS_INVALID_PARAMS);
2460 	}
2461 
2462 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2463 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2464 				       MGMT_STATUS_INVALID_PARAMS);
2465 
2466 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2467 		   key_count);
2468 
2469 	for (i = 0; i < key_count; i++) {
2470 		struct mgmt_link_key_info *key = &cp->keys[i];
2471 
2472 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2473 			return mgmt_cmd_status(sk, hdev->id,
2474 					       MGMT_OP_LOAD_LINK_KEYS,
2475 					       MGMT_STATUS_INVALID_PARAMS);
2476 	}
2477 
2478 	hci_dev_lock(hdev);
2479 
2480 	hci_link_keys_clear(hdev);
2481 
2482 	if (cp->debug_keys)
2483 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2484 	else
2485 		changed = hci_dev_test_and_clear_flag(hdev,
2486 						      HCI_KEEP_DEBUG_KEYS);
2487 
2488 	if (changed)
2489 		new_settings(hdev, NULL);
2490 
2491 	for (i = 0; i < key_count; i++) {
2492 		struct mgmt_link_key_info *key = &cp->keys[i];
2493 
2494 		if (hci_is_blocked_key(hdev,
2495 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2496 				       key->val)) {
2497 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2498 				    &key->addr.bdaddr);
2499 			continue;
2500 		}
2501 
2502 		/* Always ignore debug keys and require a new pairing if
2503 		 * the user wants to use them.
2504 		 */
2505 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2506 			continue;
2507 
2508 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2509 				 key->type, key->pin_len, NULL);
2510 	}
2511 
2512 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2513 
2514 	hci_dev_unlock(hdev);
2515 
2516 	return 0;
2517 }
2518 
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2519 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2520 			   u8 addr_type, struct sock *skip_sk)
2521 {
2522 	struct mgmt_ev_device_unpaired ev;
2523 
2524 	bacpy(&ev.addr.bdaddr, bdaddr);
2525 	ev.addr.type = addr_type;
2526 
2527 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2528 			  skip_sk);
2529 }
2530 
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2531 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2532 			 u16 len)
2533 {
2534 	struct mgmt_cp_unpair_device *cp = data;
2535 	struct mgmt_rp_unpair_device rp;
2536 	struct hci_conn_params *params;
2537 	struct mgmt_pending_cmd *cmd;
2538 	struct hci_conn *conn;
2539 	u8 addr_type;
2540 	int err;
2541 
2542 	memset(&rp, 0, sizeof(rp));
2543 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2544 	rp.addr.type = cp->addr.type;
2545 
2546 	if (!bdaddr_type_is_valid(cp->addr.type))
2547 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2548 					 MGMT_STATUS_INVALID_PARAMS,
2549 					 &rp, sizeof(rp));
2550 
2551 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2552 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2553 					 MGMT_STATUS_INVALID_PARAMS,
2554 					 &rp, sizeof(rp));
2555 
2556 	hci_dev_lock(hdev);
2557 
2558 	if (!hdev_is_powered(hdev)) {
2559 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2560 					MGMT_STATUS_NOT_POWERED, &rp,
2561 					sizeof(rp));
2562 		goto unlock;
2563 	}
2564 
2565 	if (cp->addr.type == BDADDR_BREDR) {
2566 		/* If disconnection is requested, then look up the
2567 		 * connection. If the remote device is connected, it
2568 		 * will be later used to terminate the link.
2569 		 *
2570 		 * Setting it to NULL explicitly will cause no
2571 		 * termination of the link.
2572 		 */
2573 		if (cp->disconnect)
2574 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2575 						       &cp->addr.bdaddr);
2576 		else
2577 			conn = NULL;
2578 
2579 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2580 		if (err < 0) {
2581 			err = mgmt_cmd_complete(sk, hdev->id,
2582 						MGMT_OP_UNPAIR_DEVICE,
2583 						MGMT_STATUS_NOT_PAIRED, &rp,
2584 						sizeof(rp));
2585 			goto unlock;
2586 		}
2587 
2588 		goto done;
2589 	}
2590 
2591 	/* LE address type */
2592 	addr_type = le_addr_type(cp->addr.type);
2593 
2594 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2595 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2596 	if (err < 0) {
2597 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2598 					MGMT_STATUS_NOT_PAIRED, &rp,
2599 					sizeof(rp));
2600 		goto unlock;
2601 	}
2602 
2603 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2604 	if (!conn) {
2605 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2606 		goto done;
2607 	}
2608 
2609 
2610 	/* Defer clearing up the connection parameters until closing to
2611 	 * give a chance of keeping them if a repairing happens.
2612 	 */
2613 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2614 
2615 	/* Disable auto-connection parameters if present */
2616 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2617 	if (params) {
2618 		if (params->explicit_connect)
2619 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2620 		else
2621 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2622 	}
2623 
2624 	/* If disconnection is not requested, then clear the connection
2625 	 * variable so that the link is not terminated.
2626 	 */
2627 	if (!cp->disconnect)
2628 		conn = NULL;
2629 
2630 done:
2631 	/* If the connection variable is set, then termination of the
2632 	 * link is requested.
2633 	 */
2634 	if (!conn) {
2635 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2636 					&rp, sizeof(rp));
2637 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2638 		goto unlock;
2639 	}
2640 
2641 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2642 			       sizeof(*cp));
2643 	if (!cmd) {
2644 		err = -ENOMEM;
2645 		goto unlock;
2646 	}
2647 
2648 	cmd->cmd_complete = addr_cmd_complete;
2649 
2650 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2651 	if (err < 0)
2652 		mgmt_pending_remove(cmd);
2653 
2654 unlock:
2655 	hci_dev_unlock(hdev);
2656 	return err;
2657 }
2658 
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2659 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2660 		      u16 len)
2661 {
2662 	struct mgmt_cp_disconnect *cp = data;
2663 	struct mgmt_rp_disconnect rp;
2664 	struct mgmt_pending_cmd *cmd;
2665 	struct hci_conn *conn;
2666 	int err;
2667 
2668 	bt_dev_dbg(hdev, "sock %p", sk);
2669 
2670 	memset(&rp, 0, sizeof(rp));
2671 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2672 	rp.addr.type = cp->addr.type;
2673 
2674 	if (!bdaddr_type_is_valid(cp->addr.type))
2675 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2676 					 MGMT_STATUS_INVALID_PARAMS,
2677 					 &rp, sizeof(rp));
2678 
2679 	hci_dev_lock(hdev);
2680 
2681 	if (!test_bit(HCI_UP, &hdev->flags)) {
2682 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2683 					MGMT_STATUS_NOT_POWERED, &rp,
2684 					sizeof(rp));
2685 		goto failed;
2686 	}
2687 
2688 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2689 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2690 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2691 		goto failed;
2692 	}
2693 
2694 	if (cp->addr.type == BDADDR_BREDR)
2695 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2696 					       &cp->addr.bdaddr);
2697 	else
2698 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2699 					       le_addr_type(cp->addr.type));
2700 
2701 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2702 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2703 					MGMT_STATUS_NOT_CONNECTED, &rp,
2704 					sizeof(rp));
2705 		goto failed;
2706 	}
2707 
2708 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2709 	if (!cmd) {
2710 		err = -ENOMEM;
2711 		goto failed;
2712 	}
2713 
2714 	cmd->cmd_complete = generic_cmd_complete;
2715 
2716 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2717 	if (err < 0)
2718 		mgmt_pending_remove(cmd);
2719 
2720 failed:
2721 	hci_dev_unlock(hdev);
2722 	return err;
2723 }
2724 
link_to_bdaddr(u8 link_type,u8 addr_type)2725 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2726 {
2727 	switch (link_type) {
2728 	case LE_LINK:
2729 		switch (addr_type) {
2730 		case ADDR_LE_DEV_PUBLIC:
2731 			return BDADDR_LE_PUBLIC;
2732 
2733 		default:
2734 			/* Fallback to LE Random address type */
2735 			return BDADDR_LE_RANDOM;
2736 		}
2737 
2738 	default:
2739 		/* Fallback to BR/EDR type */
2740 		return BDADDR_BREDR;
2741 	}
2742 }
2743 
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)2744 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2745 			   u16 data_len)
2746 {
2747 	struct mgmt_rp_get_connections *rp;
2748 	struct hci_conn *c;
2749 	int err;
2750 	u16 i;
2751 
2752 	bt_dev_dbg(hdev, "sock %p", sk);
2753 
2754 	hci_dev_lock(hdev);
2755 
2756 	if (!hdev_is_powered(hdev)) {
2757 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2758 				      MGMT_STATUS_NOT_POWERED);
2759 		goto unlock;
2760 	}
2761 
2762 	i = 0;
2763 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2764 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2765 			i++;
2766 	}
2767 
2768 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2769 	if (!rp) {
2770 		err = -ENOMEM;
2771 		goto unlock;
2772 	}
2773 
2774 	i = 0;
2775 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2776 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2777 			continue;
2778 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2779 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2780 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2781 			continue;
2782 		i++;
2783 	}
2784 
2785 	rp->conn_count = cpu_to_le16(i);
2786 
2787 	/* Recalculate length in case of filtered SCO connections, etc */
2788 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2789 				struct_size(rp, addr, i));
2790 
2791 	kfree(rp);
2792 
2793 unlock:
2794 	hci_dev_unlock(hdev);
2795 	return err;
2796 }
2797 
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)2798 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2799 				   struct mgmt_cp_pin_code_neg_reply *cp)
2800 {
2801 	struct mgmt_pending_cmd *cmd;
2802 	int err;
2803 
2804 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2805 			       sizeof(*cp));
2806 	if (!cmd)
2807 		return -ENOMEM;
2808 
2809 	cmd->cmd_complete = addr_cmd_complete;
2810 
2811 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2812 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2813 	if (err < 0)
2814 		mgmt_pending_remove(cmd);
2815 
2816 	return err;
2817 }
2818 
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2819 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2820 			  u16 len)
2821 {
2822 	struct hci_conn *conn;
2823 	struct mgmt_cp_pin_code_reply *cp = data;
2824 	struct hci_cp_pin_code_reply reply;
2825 	struct mgmt_pending_cmd *cmd;
2826 	int err;
2827 
2828 	bt_dev_dbg(hdev, "sock %p", sk);
2829 
2830 	hci_dev_lock(hdev);
2831 
2832 	if (!hdev_is_powered(hdev)) {
2833 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2834 				      MGMT_STATUS_NOT_POWERED);
2835 		goto failed;
2836 	}
2837 
2838 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2839 	if (!conn) {
2840 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2841 				      MGMT_STATUS_NOT_CONNECTED);
2842 		goto failed;
2843 	}
2844 
2845 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2846 		struct mgmt_cp_pin_code_neg_reply ncp;
2847 
2848 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2849 
2850 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2851 
2852 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2853 		if (err >= 0)
2854 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2855 					      MGMT_STATUS_INVALID_PARAMS);
2856 
2857 		goto failed;
2858 	}
2859 
2860 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2861 	if (!cmd) {
2862 		err = -ENOMEM;
2863 		goto failed;
2864 	}
2865 
2866 	cmd->cmd_complete = addr_cmd_complete;
2867 
2868 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2869 	reply.pin_len = cp->pin_len;
2870 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2871 
2872 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2873 	if (err < 0)
2874 		mgmt_pending_remove(cmd);
2875 
2876 failed:
2877 	hci_dev_unlock(hdev);
2878 	return err;
2879 }
2880 
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2881 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2882 			     u16 len)
2883 {
2884 	struct mgmt_cp_set_io_capability *cp = data;
2885 
2886 	bt_dev_dbg(hdev, "sock %p", sk);
2887 
2888 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2889 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2890 				       MGMT_STATUS_INVALID_PARAMS);
2891 
2892 	hci_dev_lock(hdev);
2893 
2894 	hdev->io_capability = cp->io_capability;
2895 
2896 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2897 
2898 	hci_dev_unlock(hdev);
2899 
2900 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2901 				 NULL, 0);
2902 }
2903 
find_pairing(struct hci_conn * conn)2904 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2905 {
2906 	struct hci_dev *hdev = conn->hdev;
2907 	struct mgmt_pending_cmd *cmd;
2908 
2909 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2910 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2911 			continue;
2912 
2913 		if (cmd->user_data != conn)
2914 			continue;
2915 
2916 		return cmd;
2917 	}
2918 
2919 	return NULL;
2920 }
2921 
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)2922 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2923 {
2924 	struct mgmt_rp_pair_device rp;
2925 	struct hci_conn *conn = cmd->user_data;
2926 	int err;
2927 
2928 	bacpy(&rp.addr.bdaddr, &conn->dst);
2929 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2930 
2931 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2932 				status, &rp, sizeof(rp));
2933 
2934 	/* So we don't get further callbacks for this connection */
2935 	conn->connect_cfm_cb = NULL;
2936 	conn->security_cfm_cb = NULL;
2937 	conn->disconn_cfm_cb = NULL;
2938 
2939 	hci_conn_drop(conn);
2940 
2941 	/* The device is paired so there is no need to remove
2942 	 * its connection parameters anymore.
2943 	 */
2944 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2945 
2946 	hci_conn_put(conn);
2947 
2948 	return err;
2949 }
2950 
mgmt_smp_complete(struct hci_conn * conn,bool complete)2951 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2952 {
2953 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2954 	struct mgmt_pending_cmd *cmd;
2955 
2956 	cmd = find_pairing(conn);
2957 	if (cmd) {
2958 		cmd->cmd_complete(cmd, status);
2959 		mgmt_pending_remove(cmd);
2960 	}
2961 }
2962 
pairing_complete_cb(struct hci_conn * conn,u8 status)2963 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2964 {
2965 	struct mgmt_pending_cmd *cmd;
2966 
2967 	BT_DBG("status %u", status);
2968 
2969 	cmd = find_pairing(conn);
2970 	if (!cmd) {
2971 		BT_DBG("Unable to find a pending command");
2972 		return;
2973 	}
2974 
2975 	cmd->cmd_complete(cmd, mgmt_status(status));
2976 	mgmt_pending_remove(cmd);
2977 }
2978 
le_pairing_complete_cb(struct hci_conn * conn,u8 status)2979 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2980 {
2981 	struct mgmt_pending_cmd *cmd;
2982 
2983 	BT_DBG("status %u", status);
2984 
2985 	if (!status)
2986 		return;
2987 
2988 	cmd = find_pairing(conn);
2989 	if (!cmd) {
2990 		BT_DBG("Unable to find a pending command");
2991 		return;
2992 	}
2993 
2994 	cmd->cmd_complete(cmd, mgmt_status(status));
2995 	mgmt_pending_remove(cmd);
2996 }
2997 
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2998 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2999 		       u16 len)
3000 {
3001 	struct mgmt_cp_pair_device *cp = data;
3002 	struct mgmt_rp_pair_device rp;
3003 	struct mgmt_pending_cmd *cmd;
3004 	u8 sec_level, auth_type;
3005 	struct hci_conn *conn;
3006 	int err;
3007 
3008 	bt_dev_dbg(hdev, "sock %p", sk);
3009 
3010 	memset(&rp, 0, sizeof(rp));
3011 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3012 	rp.addr.type = cp->addr.type;
3013 
3014 	if (!bdaddr_type_is_valid(cp->addr.type))
3015 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3016 					 MGMT_STATUS_INVALID_PARAMS,
3017 					 &rp, sizeof(rp));
3018 
3019 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3020 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3021 					 MGMT_STATUS_INVALID_PARAMS,
3022 					 &rp, sizeof(rp));
3023 
3024 	hci_dev_lock(hdev);
3025 
3026 	if (!hdev_is_powered(hdev)) {
3027 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3028 					MGMT_STATUS_NOT_POWERED, &rp,
3029 					sizeof(rp));
3030 		goto unlock;
3031 	}
3032 
3033 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3034 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3035 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3036 					sizeof(rp));
3037 		goto unlock;
3038 	}
3039 
3040 	sec_level = BT_SECURITY_MEDIUM;
3041 	auth_type = HCI_AT_DEDICATED_BONDING;
3042 
3043 	if (cp->addr.type == BDADDR_BREDR) {
3044 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3045 				       auth_type, CONN_REASON_PAIR_DEVICE);
3046 	} else {
3047 		u8 addr_type = le_addr_type(cp->addr.type);
3048 		struct hci_conn_params *p;
3049 
3050 		/* When pairing a new device, it is expected to remember
3051 		 * this device for future connections. Adding the connection
3052 		 * parameter information ahead of time allows tracking
3053 		 * of the peripheral preferred values and will speed up any
3054 		 * further connection establishment.
3055 		 *
3056 		 * If connection parameters already exist, then they
3057 		 * will be kept and this function does nothing.
3058 		 */
3059 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3060 
3061 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3062 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3063 
3064 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3065 					   sec_level, HCI_LE_CONN_TIMEOUT,
3066 					   CONN_REASON_PAIR_DEVICE);
3067 	}
3068 
3069 	if (IS_ERR(conn)) {
3070 		int status;
3071 
3072 		if (PTR_ERR(conn) == -EBUSY)
3073 			status = MGMT_STATUS_BUSY;
3074 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3075 			status = MGMT_STATUS_NOT_SUPPORTED;
3076 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3077 			status = MGMT_STATUS_REJECTED;
3078 		else
3079 			status = MGMT_STATUS_CONNECT_FAILED;
3080 
3081 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3082 					status, &rp, sizeof(rp));
3083 		goto unlock;
3084 	}
3085 
3086 	if (conn->connect_cfm_cb) {
3087 		hci_conn_drop(conn);
3088 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3089 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3090 		goto unlock;
3091 	}
3092 
3093 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3094 	if (!cmd) {
3095 		err = -ENOMEM;
3096 		hci_conn_drop(conn);
3097 		goto unlock;
3098 	}
3099 
3100 	cmd->cmd_complete = pairing_complete;
3101 
3102 	/* For LE, just connecting isn't a proof that the pairing finished */
3103 	if (cp->addr.type == BDADDR_BREDR) {
3104 		conn->connect_cfm_cb = pairing_complete_cb;
3105 		conn->security_cfm_cb = pairing_complete_cb;
3106 		conn->disconn_cfm_cb = pairing_complete_cb;
3107 	} else {
3108 		conn->connect_cfm_cb = le_pairing_complete_cb;
3109 		conn->security_cfm_cb = le_pairing_complete_cb;
3110 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3111 	}
3112 
3113 	conn->io_capability = cp->io_cap;
3114 	cmd->user_data = hci_conn_get(conn);
3115 
3116 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3117 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3118 		cmd->cmd_complete(cmd, 0);
3119 		mgmt_pending_remove(cmd);
3120 	}
3121 
3122 	err = 0;
3123 
3124 unlock:
3125 	hci_dev_unlock(hdev);
3126 	return err;
3127 }
3128 
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3129 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3130 			      u16 len)
3131 {
3132 	struct mgmt_addr_info *addr = data;
3133 	struct mgmt_pending_cmd *cmd;
3134 	struct hci_conn *conn;
3135 	int err;
3136 
3137 	bt_dev_dbg(hdev, "sock %p", sk);
3138 
3139 	hci_dev_lock(hdev);
3140 
3141 	if (!hdev_is_powered(hdev)) {
3142 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3143 				      MGMT_STATUS_NOT_POWERED);
3144 		goto unlock;
3145 	}
3146 
3147 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3148 	if (!cmd) {
3149 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3150 				      MGMT_STATUS_INVALID_PARAMS);
3151 		goto unlock;
3152 	}
3153 
3154 	conn = cmd->user_data;
3155 
3156 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3157 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3158 				      MGMT_STATUS_INVALID_PARAMS);
3159 		goto unlock;
3160 	}
3161 
3162 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3163 	mgmt_pending_remove(cmd);
3164 
3165 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3166 				addr, sizeof(*addr));
3167 
3168 	/* Since user doesn't want to proceed with the connection, abort any
3169 	 * ongoing pairing and then terminate the link if it was created
3170 	 * because of the pair device action.
3171 	 */
3172 	if (addr->type == BDADDR_BREDR)
3173 		hci_remove_link_key(hdev, &addr->bdaddr);
3174 	else
3175 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3176 					      le_addr_type(addr->type));
3177 
3178 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3179 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3180 
3181 unlock:
3182 	hci_dev_unlock(hdev);
3183 	return err;
3184 }
3185 
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3186 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3187 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3188 			     u16 hci_op, __le32 passkey)
3189 {
3190 	struct mgmt_pending_cmd *cmd;
3191 	struct hci_conn *conn;
3192 	int err;
3193 
3194 	hci_dev_lock(hdev);
3195 
3196 	if (!hdev_is_powered(hdev)) {
3197 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3198 					MGMT_STATUS_NOT_POWERED, addr,
3199 					sizeof(*addr));
3200 		goto done;
3201 	}
3202 
3203 	if (addr->type == BDADDR_BREDR)
3204 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3205 	else
3206 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3207 					       le_addr_type(addr->type));
3208 
3209 	if (!conn) {
3210 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3211 					MGMT_STATUS_NOT_CONNECTED, addr,
3212 					sizeof(*addr));
3213 		goto done;
3214 	}
3215 
3216 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3217 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3218 		if (!err)
3219 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3220 						MGMT_STATUS_SUCCESS, addr,
3221 						sizeof(*addr));
3222 		else
3223 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3224 						MGMT_STATUS_FAILED, addr,
3225 						sizeof(*addr));
3226 
3227 		goto done;
3228 	}
3229 
3230 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3231 	if (!cmd) {
3232 		err = -ENOMEM;
3233 		goto done;
3234 	}
3235 
3236 	cmd->cmd_complete = addr_cmd_complete;
3237 
3238 	/* Continue with pairing via HCI */
3239 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3240 		struct hci_cp_user_passkey_reply cp;
3241 
3242 		bacpy(&cp.bdaddr, &addr->bdaddr);
3243 		cp.passkey = passkey;
3244 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3245 	} else
3246 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3247 				   &addr->bdaddr);
3248 
3249 	if (err < 0)
3250 		mgmt_pending_remove(cmd);
3251 
3252 done:
3253 	hci_dev_unlock(hdev);
3254 	return err;
3255 }
3256 
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3257 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3258 			      void *data, u16 len)
3259 {
3260 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3261 
3262 	bt_dev_dbg(hdev, "sock %p", sk);
3263 
3264 	return user_pairing_resp(sk, hdev, &cp->addr,
3265 				MGMT_OP_PIN_CODE_NEG_REPLY,
3266 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3267 }
3268 
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3269 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3270 			      u16 len)
3271 {
3272 	struct mgmt_cp_user_confirm_reply *cp = data;
3273 
3274 	bt_dev_dbg(hdev, "sock %p", sk);
3275 
3276 	if (len != sizeof(*cp))
3277 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3278 				       MGMT_STATUS_INVALID_PARAMS);
3279 
3280 	return user_pairing_resp(sk, hdev, &cp->addr,
3281 				 MGMT_OP_USER_CONFIRM_REPLY,
3282 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3283 }
3284 
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3285 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3286 				  void *data, u16 len)
3287 {
3288 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3289 
3290 	bt_dev_dbg(hdev, "sock %p", sk);
3291 
3292 	return user_pairing_resp(sk, hdev, &cp->addr,
3293 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3294 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3295 }
3296 
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3297 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3298 			      u16 len)
3299 {
3300 	struct mgmt_cp_user_passkey_reply *cp = data;
3301 
3302 	bt_dev_dbg(hdev, "sock %p", sk);
3303 
3304 	return user_pairing_resp(sk, hdev, &cp->addr,
3305 				 MGMT_OP_USER_PASSKEY_REPLY,
3306 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3307 }
3308 
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3309 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3310 				  void *data, u16 len)
3311 {
3312 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3313 
3314 	bt_dev_dbg(hdev, "sock %p", sk);
3315 
3316 	return user_pairing_resp(sk, hdev, &cp->addr,
3317 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3318 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3319 }
3320 
adv_expire_sync(struct hci_dev * hdev,u32 flags)3321 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3322 {
3323 	struct adv_info *adv_instance;
3324 
3325 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3326 	if (!adv_instance)
3327 		return 0;
3328 
3329 	/* stop if current instance doesn't need to be changed */
3330 	if (!(adv_instance->flags & flags))
3331 		return 0;
3332 
3333 	cancel_adv_timeout(hdev);
3334 
3335 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3336 	if (!adv_instance)
3337 		return 0;
3338 
3339 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3340 
3341 	return 0;
3342 }
3343 
name_changed_sync(struct hci_dev * hdev,void * data)3344 static int name_changed_sync(struct hci_dev *hdev, void *data)
3345 {
3346 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3347 }
3348 
set_name_complete(struct hci_dev * hdev,void * data,int err)3349 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3350 {
3351 	struct mgmt_pending_cmd *cmd = data;
3352 	struct mgmt_cp_set_local_name *cp = cmd->param;
3353 	u8 status = mgmt_status(err);
3354 
3355 	bt_dev_dbg(hdev, "err %d", err);
3356 
3357 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3358 		return;
3359 
3360 	if (status) {
3361 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3362 				status);
3363 	} else {
3364 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3365 				  cp, sizeof(*cp));
3366 
3367 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3368 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3369 	}
3370 
3371 	mgmt_pending_remove(cmd);
3372 }
3373 
set_name_sync(struct hci_dev * hdev,void * data)3374 static int set_name_sync(struct hci_dev *hdev, void *data)
3375 {
3376 	if (lmp_bredr_capable(hdev)) {
3377 		hci_update_name_sync(hdev);
3378 		hci_update_eir_sync(hdev);
3379 	}
3380 
3381 	/* The name is stored in the scan response data and so
3382 	 * no need to update the advertising data here.
3383 	 */
3384 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3385 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3386 
3387 	return 0;
3388 }
3389 
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3390 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3391 			  u16 len)
3392 {
3393 	struct mgmt_cp_set_local_name *cp = data;
3394 	struct mgmt_pending_cmd *cmd;
3395 	int err;
3396 
3397 	bt_dev_dbg(hdev, "sock %p", sk);
3398 
3399 	hci_dev_lock(hdev);
3400 
3401 	/* If the old values are the same as the new ones just return a
3402 	 * direct command complete event.
3403 	 */
3404 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3405 	    !memcmp(hdev->short_name, cp->short_name,
3406 		    sizeof(hdev->short_name))) {
3407 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3408 					data, len);
3409 		goto failed;
3410 	}
3411 
3412 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3413 
3414 	if (!hdev_is_powered(hdev)) {
3415 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3416 
3417 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3418 					data, len);
3419 		if (err < 0)
3420 			goto failed;
3421 
3422 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3423 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3424 		ext_info_changed(hdev, sk);
3425 
3426 		goto failed;
3427 	}
3428 
3429 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3430 	if (!cmd)
3431 		err = -ENOMEM;
3432 	else
3433 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3434 					 set_name_complete);
3435 
3436 	if (err < 0) {
3437 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3438 				      MGMT_STATUS_FAILED);
3439 
3440 		if (cmd)
3441 			mgmt_pending_remove(cmd);
3442 
3443 		goto failed;
3444 	}
3445 
3446 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3447 
3448 failed:
3449 	hci_dev_unlock(hdev);
3450 	return err;
3451 }
3452 
appearance_changed_sync(struct hci_dev * hdev,void * data)3453 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3454 {
3455 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3456 }
3457 
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3458 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3459 			  u16 len)
3460 {
3461 	struct mgmt_cp_set_appearance *cp = data;
3462 	u16 appearance;
3463 	int err;
3464 
3465 	bt_dev_dbg(hdev, "sock %p", sk);
3466 
3467 	if (!lmp_le_capable(hdev))
3468 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3469 				       MGMT_STATUS_NOT_SUPPORTED);
3470 
3471 	appearance = le16_to_cpu(cp->appearance);
3472 
3473 	hci_dev_lock(hdev);
3474 
3475 	if (hdev->appearance != appearance) {
3476 		hdev->appearance = appearance;
3477 
3478 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3479 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3480 					   NULL);
3481 
3482 		ext_info_changed(hdev, sk);
3483 	}
3484 
3485 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3486 				0);
3487 
3488 	hci_dev_unlock(hdev);
3489 
3490 	return err;
3491 }
3492 
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3493 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3494 				 void *data, u16 len)
3495 {
3496 	struct mgmt_rp_get_phy_configuration rp;
3497 
3498 	bt_dev_dbg(hdev, "sock %p", sk);
3499 
3500 	hci_dev_lock(hdev);
3501 
3502 	memset(&rp, 0, sizeof(rp));
3503 
3504 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3505 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3506 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3507 
3508 	hci_dev_unlock(hdev);
3509 
3510 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3511 				 &rp, sizeof(rp));
3512 }
3513 
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)3514 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3515 {
3516 	struct mgmt_ev_phy_configuration_changed ev;
3517 
3518 	memset(&ev, 0, sizeof(ev));
3519 
3520 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3521 
3522 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3523 			  sizeof(ev), skip);
3524 }
3525 
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)3526 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3527 {
3528 	struct mgmt_pending_cmd *cmd = data;
3529 	struct sk_buff *skb = cmd->skb;
3530 	u8 status = mgmt_status(err);
3531 
3532 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3533 		return;
3534 
3535 	if (!status) {
3536 		if (!skb)
3537 			status = MGMT_STATUS_FAILED;
3538 		else if (IS_ERR(skb))
3539 			status = mgmt_status(PTR_ERR(skb));
3540 		else
3541 			status = mgmt_status(skb->data[0]);
3542 	}
3543 
3544 	bt_dev_dbg(hdev, "status %d", status);
3545 
3546 	if (status) {
3547 		mgmt_cmd_status(cmd->sk, hdev->id,
3548 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3549 	} else {
3550 		mgmt_cmd_complete(cmd->sk, hdev->id,
3551 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3552 				  NULL, 0);
3553 
3554 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3555 	}
3556 
3557 	if (skb && !IS_ERR(skb))
3558 		kfree_skb(skb);
3559 
3560 	mgmt_pending_remove(cmd);
3561 }
3562 
set_default_phy_sync(struct hci_dev * hdev,void * data)3563 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3564 {
3565 	struct mgmt_pending_cmd *cmd = data;
3566 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3567 	struct hci_cp_le_set_default_phy cp_phy;
3568 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3569 
3570 	memset(&cp_phy, 0, sizeof(cp_phy));
3571 
3572 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3573 		cp_phy.all_phys |= 0x01;
3574 
3575 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3576 		cp_phy.all_phys |= 0x02;
3577 
3578 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3579 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3580 
3581 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3582 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3583 
3584 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3585 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3586 
3587 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3588 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3589 
3590 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3591 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3592 
3593 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3594 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3595 
3596 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3597 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3598 
3599 	return 0;
3600 }
3601 
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3602 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3603 				 void *data, u16 len)
3604 {
3605 	struct mgmt_cp_set_phy_configuration *cp = data;
3606 	struct mgmt_pending_cmd *cmd;
3607 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3608 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3609 	bool changed = false;
3610 	int err;
3611 
3612 	bt_dev_dbg(hdev, "sock %p", sk);
3613 
3614 	configurable_phys = get_configurable_phys(hdev);
3615 	supported_phys = get_supported_phys(hdev);
3616 	selected_phys = __le32_to_cpu(cp->selected_phys);
3617 
3618 	if (selected_phys & ~supported_phys)
3619 		return mgmt_cmd_status(sk, hdev->id,
3620 				       MGMT_OP_SET_PHY_CONFIGURATION,
3621 				       MGMT_STATUS_INVALID_PARAMS);
3622 
3623 	unconfigure_phys = supported_phys & ~configurable_phys;
3624 
3625 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3626 		return mgmt_cmd_status(sk, hdev->id,
3627 				       MGMT_OP_SET_PHY_CONFIGURATION,
3628 				       MGMT_STATUS_INVALID_PARAMS);
3629 
3630 	if (selected_phys == get_selected_phys(hdev))
3631 		return mgmt_cmd_complete(sk, hdev->id,
3632 					 MGMT_OP_SET_PHY_CONFIGURATION,
3633 					 0, NULL, 0);
3634 
3635 	hci_dev_lock(hdev);
3636 
3637 	if (!hdev_is_powered(hdev)) {
3638 		err = mgmt_cmd_status(sk, hdev->id,
3639 				      MGMT_OP_SET_PHY_CONFIGURATION,
3640 				      MGMT_STATUS_REJECTED);
3641 		goto unlock;
3642 	}
3643 
3644 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3645 		err = mgmt_cmd_status(sk, hdev->id,
3646 				      MGMT_OP_SET_PHY_CONFIGURATION,
3647 				      MGMT_STATUS_BUSY);
3648 		goto unlock;
3649 	}
3650 
3651 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3652 		pkt_type |= (HCI_DH3 | HCI_DM3);
3653 	else
3654 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3655 
3656 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3657 		pkt_type |= (HCI_DH5 | HCI_DM5);
3658 	else
3659 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3660 
3661 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3662 		pkt_type &= ~HCI_2DH1;
3663 	else
3664 		pkt_type |= HCI_2DH1;
3665 
3666 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3667 		pkt_type &= ~HCI_2DH3;
3668 	else
3669 		pkt_type |= HCI_2DH3;
3670 
3671 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3672 		pkt_type &= ~HCI_2DH5;
3673 	else
3674 		pkt_type |= HCI_2DH5;
3675 
3676 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3677 		pkt_type &= ~HCI_3DH1;
3678 	else
3679 		pkt_type |= HCI_3DH1;
3680 
3681 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3682 		pkt_type &= ~HCI_3DH3;
3683 	else
3684 		pkt_type |= HCI_3DH3;
3685 
3686 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3687 		pkt_type &= ~HCI_3DH5;
3688 	else
3689 		pkt_type |= HCI_3DH5;
3690 
3691 	if (pkt_type != hdev->pkt_type) {
3692 		hdev->pkt_type = pkt_type;
3693 		changed = true;
3694 	}
3695 
3696 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3697 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3698 		if (changed)
3699 			mgmt_phy_configuration_changed(hdev, sk);
3700 
3701 		err = mgmt_cmd_complete(sk, hdev->id,
3702 					MGMT_OP_SET_PHY_CONFIGURATION,
3703 					0, NULL, 0);
3704 
3705 		goto unlock;
3706 	}
3707 
3708 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3709 			       len);
3710 	if (!cmd)
3711 		err = -ENOMEM;
3712 	else
3713 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
3714 					 set_default_phy_complete);
3715 
3716 	if (err < 0) {
3717 		err = mgmt_cmd_status(sk, hdev->id,
3718 				      MGMT_OP_SET_PHY_CONFIGURATION,
3719 				      MGMT_STATUS_FAILED);
3720 
3721 		if (cmd)
3722 			mgmt_pending_remove(cmd);
3723 	}
3724 
3725 unlock:
3726 	hci_dev_unlock(hdev);
3727 
3728 	return err;
3729 }
3730 
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3731 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3732 			    u16 len)
3733 {
3734 	int err = MGMT_STATUS_SUCCESS;
3735 	struct mgmt_cp_set_blocked_keys *keys = data;
3736 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3737 				   sizeof(struct mgmt_blocked_key_info));
3738 	u16 key_count, expected_len;
3739 	int i;
3740 
3741 	bt_dev_dbg(hdev, "sock %p", sk);
3742 
3743 	key_count = __le16_to_cpu(keys->key_count);
3744 	if (key_count > max_key_count) {
3745 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3746 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3747 				       MGMT_STATUS_INVALID_PARAMS);
3748 	}
3749 
3750 	expected_len = struct_size(keys, keys, key_count);
3751 	if (expected_len != len) {
3752 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3753 			   expected_len, len);
3754 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3755 				       MGMT_STATUS_INVALID_PARAMS);
3756 	}
3757 
3758 	hci_dev_lock(hdev);
3759 
3760 	hci_blocked_keys_clear(hdev);
3761 
3762 	for (i = 0; i < keys->key_count; ++i) {
3763 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3764 
3765 		if (!b) {
3766 			err = MGMT_STATUS_NO_RESOURCES;
3767 			break;
3768 		}
3769 
3770 		b->type = keys->keys[i].type;
3771 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3772 		list_add_rcu(&b->list, &hdev->blocked_keys);
3773 	}
3774 	hci_dev_unlock(hdev);
3775 
3776 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3777 				err, NULL, 0);
3778 }
3779 
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3780 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3781 			       void *data, u16 len)
3782 {
3783 	struct mgmt_mode *cp = data;
3784 	int err;
3785 	bool changed = false;
3786 
3787 	bt_dev_dbg(hdev, "sock %p", sk);
3788 
3789 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3790 		return mgmt_cmd_status(sk, hdev->id,
3791 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3792 				       MGMT_STATUS_NOT_SUPPORTED);
3793 
3794 	if (cp->val != 0x00 && cp->val != 0x01)
3795 		return mgmt_cmd_status(sk, hdev->id,
3796 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3797 				       MGMT_STATUS_INVALID_PARAMS);
3798 
3799 	hci_dev_lock(hdev);
3800 
3801 	if (hdev_is_powered(hdev) &&
3802 	    !!cp->val != hci_dev_test_flag(hdev,
3803 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3804 		err = mgmt_cmd_status(sk, hdev->id,
3805 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3806 				      MGMT_STATUS_REJECTED);
3807 		goto unlock;
3808 	}
3809 
3810 	if (cp->val)
3811 		changed = !hci_dev_test_and_set_flag(hdev,
3812 						   HCI_WIDEBAND_SPEECH_ENABLED);
3813 	else
3814 		changed = hci_dev_test_and_clear_flag(hdev,
3815 						   HCI_WIDEBAND_SPEECH_ENABLED);
3816 
3817 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3818 	if (err < 0)
3819 		goto unlock;
3820 
3821 	if (changed)
3822 		err = new_settings(hdev, sk);
3823 
3824 unlock:
3825 	hci_dev_unlock(hdev);
3826 	return err;
3827 }
3828 
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3829 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3830 			       void *data, u16 data_len)
3831 {
3832 	char buf[20];
3833 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3834 	u16 cap_len = 0;
3835 	u8 flags = 0;
3836 	u8 tx_power_range[2];
3837 
3838 	bt_dev_dbg(hdev, "sock %p", sk);
3839 
3840 	memset(&buf, 0, sizeof(buf));
3841 
3842 	hci_dev_lock(hdev);
3843 
3844 	/* When the Read Simple Pairing Options command is supported, then
3845 	 * the remote public key validation is supported.
3846 	 *
3847 	 * Alternatively, when Microsoft extensions are available, they can
3848 	 * indicate support for public key validation as well.
3849 	 */
3850 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3851 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3852 
3853 	flags |= 0x02;		/* Remote public key validation (LE) */
3854 
3855 	/* When the Read Encryption Key Size command is supported, then the
3856 	 * encryption key size is enforced.
3857 	 */
3858 	if (hdev->commands[20] & 0x10)
3859 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3860 
3861 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3862 
3863 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3864 				  &flags, 1);
3865 
3866 	/* When the Read Simple Pairing Options command is supported, then
3867 	 * also max encryption key size information is provided.
3868 	 */
3869 	if (hdev->commands[41] & 0x08)
3870 		cap_len = eir_append_le16(rp->cap, cap_len,
3871 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
3872 					  hdev->max_enc_key_size);
3873 
3874 	cap_len = eir_append_le16(rp->cap, cap_len,
3875 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3876 				  SMP_MAX_ENC_KEY_SIZE);
3877 
3878 	/* Append the min/max LE tx power parameters if we were able to fetch
3879 	 * it from the controller
3880 	 */
3881 	if (hdev->commands[38] & 0x80) {
3882 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3883 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3884 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3885 					  tx_power_range, 2);
3886 	}
3887 
3888 	rp->cap_len = cpu_to_le16(cap_len);
3889 
3890 	hci_dev_unlock(hdev);
3891 
3892 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3893 				 rp, sizeof(*rp) + cap_len);
3894 }
3895 
3896 #ifdef CONFIG_BT_FEATURE_DEBUG
3897 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3898 static const u8 debug_uuid[16] = {
3899 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3900 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3901 };
3902 #endif
3903 
3904 /* 330859bc-7506-492d-9370-9a6f0614037f */
3905 static const u8 quality_report_uuid[16] = {
3906 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3907 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3908 };
3909 
3910 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3911 static const u8 offload_codecs_uuid[16] = {
3912 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3913 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3914 };
3915 
3916 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3917 static const u8 le_simultaneous_roles_uuid[16] = {
3918 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3919 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3920 };
3921 
3922 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3923 static const u8 rpa_resolution_uuid[16] = {
3924 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3925 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3926 };
3927 
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3928 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3929 				  void *data, u16 data_len)
3930 {
3931 	char buf[102];   /* Enough space for 5 features: 2 + 20 * 5 */
3932 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3933 	u16 idx = 0;
3934 	u32 flags;
3935 
3936 	bt_dev_dbg(hdev, "sock %p", sk);
3937 
3938 	memset(&buf, 0, sizeof(buf));
3939 
3940 #ifdef CONFIG_BT_FEATURE_DEBUG
3941 	if (!hdev) {
3942 		flags = bt_dbg_get() ? BIT(0) : 0;
3943 
3944 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3945 		rp->features[idx].flags = cpu_to_le32(flags);
3946 		idx++;
3947 	}
3948 #endif
3949 
3950 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
3951 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
3952 			flags = BIT(0);
3953 		else
3954 			flags = 0;
3955 
3956 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
3957 		rp->features[idx].flags = cpu_to_le32(flags);
3958 		idx++;
3959 	}
3960 
3961 	if (hdev && ll_privacy_capable(hdev)) {
3962 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3963 			flags = BIT(0) | BIT(1);
3964 		else
3965 			flags = BIT(1);
3966 
3967 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3968 		rp->features[idx].flags = cpu_to_le32(flags);
3969 		idx++;
3970 	}
3971 
3972 	if (hdev && (aosp_has_quality_report(hdev) ||
3973 		     hdev->set_quality_report)) {
3974 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
3975 			flags = BIT(0);
3976 		else
3977 			flags = 0;
3978 
3979 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
3980 		rp->features[idx].flags = cpu_to_le32(flags);
3981 		idx++;
3982 	}
3983 
3984 	if (hdev && hdev->get_data_path_id) {
3985 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
3986 			flags = BIT(0);
3987 		else
3988 			flags = 0;
3989 
3990 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
3991 		rp->features[idx].flags = cpu_to_le32(flags);
3992 		idx++;
3993 	}
3994 
3995 	rp->feature_count = cpu_to_le16(idx);
3996 
3997 	/* After reading the experimental features information, enable
3998 	 * the events to update client on any future change.
3999 	 */
4000 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4001 
4002 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4003 				 MGMT_OP_READ_EXP_FEATURES_INFO,
4004 				 0, rp, sizeof(*rp) + (20 * idx));
4005 }
4006 
exp_ll_privacy_feature_changed(bool enabled,struct hci_dev * hdev,struct sock * skip)4007 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4008 					  struct sock *skip)
4009 {
4010 	struct mgmt_ev_exp_feature_changed ev;
4011 
4012 	memset(&ev, 0, sizeof(ev));
4013 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4014 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4015 
4016 	// Do we need to be atomic with the conn_flags?
4017 	if (enabled && privacy_mode_capable(hdev))
4018 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4019 	else
4020 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4021 
4022 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4023 				  &ev, sizeof(ev),
4024 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4025 
4026 }
4027 
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4028 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4029 			       bool enabled, struct sock *skip)
4030 {
4031 	struct mgmt_ev_exp_feature_changed ev;
4032 
4033 	memset(&ev, 0, sizeof(ev));
4034 	memcpy(ev.uuid, uuid, 16);
4035 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4036 
4037 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4038 				  &ev, sizeof(ev),
4039 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4040 }
4041 
4042 #define EXP_FEAT(_uuid, _set_func)	\
4043 {					\
4044 	.uuid = _uuid,			\
4045 	.set_func = _set_func,		\
4046 }
4047 
4048 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4049 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4050 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4051 {
4052 	struct mgmt_rp_set_exp_feature rp;
4053 
4054 	memset(rp.uuid, 0, 16);
4055 	rp.flags = cpu_to_le32(0);
4056 
4057 #ifdef CONFIG_BT_FEATURE_DEBUG
4058 	if (!hdev) {
4059 		bool changed = bt_dbg_get();
4060 
4061 		bt_dbg_set(false);
4062 
4063 		if (changed)
4064 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4065 	}
4066 #endif
4067 
4068 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4069 		bool changed;
4070 
4071 		changed = hci_dev_test_and_clear_flag(hdev,
4072 						      HCI_ENABLE_LL_PRIVACY);
4073 		if (changed)
4074 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4075 					    sk);
4076 	}
4077 
4078 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4079 
4080 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4081 				 MGMT_OP_SET_EXP_FEATURE, 0,
4082 				 &rp, sizeof(rp));
4083 }
4084 
4085 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4086 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4087 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4088 {
4089 	struct mgmt_rp_set_exp_feature rp;
4090 
4091 	bool val, changed;
4092 	int err;
4093 
4094 	/* Command requires to use the non-controller index */
4095 	if (hdev)
4096 		return mgmt_cmd_status(sk, hdev->id,
4097 				       MGMT_OP_SET_EXP_FEATURE,
4098 				       MGMT_STATUS_INVALID_INDEX);
4099 
4100 	/* Parameters are limited to a single octet */
4101 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4102 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4103 				       MGMT_OP_SET_EXP_FEATURE,
4104 				       MGMT_STATUS_INVALID_PARAMS);
4105 
4106 	/* Only boolean on/off is supported */
4107 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4108 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4109 				       MGMT_OP_SET_EXP_FEATURE,
4110 				       MGMT_STATUS_INVALID_PARAMS);
4111 
4112 	val = !!cp->param[0];
4113 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4114 	bt_dbg_set(val);
4115 
4116 	memcpy(rp.uuid, debug_uuid, 16);
4117 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4118 
4119 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4120 
4121 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4122 				MGMT_OP_SET_EXP_FEATURE, 0,
4123 				&rp, sizeof(rp));
4124 
4125 	if (changed)
4126 		exp_feature_changed(hdev, debug_uuid, val, sk);
4127 
4128 	return err;
4129 }
4130 #endif
4131 
set_rpa_resolution_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4132 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4133 				   struct mgmt_cp_set_exp_feature *cp,
4134 				   u16 data_len)
4135 {
4136 	struct mgmt_rp_set_exp_feature rp;
4137 	bool val, changed;
4138 	int err;
4139 	u32 flags;
4140 
4141 	/* Command requires to use the controller index */
4142 	if (!hdev)
4143 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4144 				       MGMT_OP_SET_EXP_FEATURE,
4145 				       MGMT_STATUS_INVALID_INDEX);
4146 
4147 	/* Changes can only be made when controller is powered down */
4148 	if (hdev_is_powered(hdev))
4149 		return mgmt_cmd_status(sk, hdev->id,
4150 				       MGMT_OP_SET_EXP_FEATURE,
4151 				       MGMT_STATUS_REJECTED);
4152 
4153 	/* Parameters are limited to a single octet */
4154 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4155 		return mgmt_cmd_status(sk, hdev->id,
4156 				       MGMT_OP_SET_EXP_FEATURE,
4157 				       MGMT_STATUS_INVALID_PARAMS);
4158 
4159 	/* Only boolean on/off is supported */
4160 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4161 		return mgmt_cmd_status(sk, hdev->id,
4162 				       MGMT_OP_SET_EXP_FEATURE,
4163 				       MGMT_STATUS_INVALID_PARAMS);
4164 
4165 	val = !!cp->param[0];
4166 
4167 	if (val) {
4168 		changed = !hci_dev_test_and_set_flag(hdev,
4169 						     HCI_ENABLE_LL_PRIVACY);
4170 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4171 
4172 		/* Enable LL privacy + supported settings changed */
4173 		flags = BIT(0) | BIT(1);
4174 	} else {
4175 		changed = hci_dev_test_and_clear_flag(hdev,
4176 						      HCI_ENABLE_LL_PRIVACY);
4177 
4178 		/* Disable LL privacy + supported settings changed */
4179 		flags = BIT(1);
4180 	}
4181 
4182 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4183 	rp.flags = cpu_to_le32(flags);
4184 
4185 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4186 
4187 	err = mgmt_cmd_complete(sk, hdev->id,
4188 				MGMT_OP_SET_EXP_FEATURE, 0,
4189 				&rp, sizeof(rp));
4190 
4191 	if (changed)
4192 		exp_ll_privacy_feature_changed(val, hdev, sk);
4193 
4194 	return err;
4195 }
4196 
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4197 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4198 				   struct mgmt_cp_set_exp_feature *cp,
4199 				   u16 data_len)
4200 {
4201 	struct mgmt_rp_set_exp_feature rp;
4202 	bool val, changed;
4203 	int err;
4204 
4205 	/* Command requires to use a valid controller index */
4206 	if (!hdev)
4207 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4208 				       MGMT_OP_SET_EXP_FEATURE,
4209 				       MGMT_STATUS_INVALID_INDEX);
4210 
4211 	/* Parameters are limited to a single octet */
4212 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4213 		return mgmt_cmd_status(sk, hdev->id,
4214 				       MGMT_OP_SET_EXP_FEATURE,
4215 				       MGMT_STATUS_INVALID_PARAMS);
4216 
4217 	/* Only boolean on/off is supported */
4218 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4219 		return mgmt_cmd_status(sk, hdev->id,
4220 				       MGMT_OP_SET_EXP_FEATURE,
4221 				       MGMT_STATUS_INVALID_PARAMS);
4222 
4223 	hci_req_sync_lock(hdev);
4224 
4225 	val = !!cp->param[0];
4226 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4227 
4228 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4229 		err = mgmt_cmd_status(sk, hdev->id,
4230 				      MGMT_OP_SET_EXP_FEATURE,
4231 				      MGMT_STATUS_NOT_SUPPORTED);
4232 		goto unlock_quality_report;
4233 	}
4234 
4235 	if (changed) {
4236 		if (hdev->set_quality_report)
4237 			err = hdev->set_quality_report(hdev, val);
4238 		else
4239 			err = aosp_set_quality_report(hdev, val);
4240 
4241 		if (err) {
4242 			err = mgmt_cmd_status(sk, hdev->id,
4243 					      MGMT_OP_SET_EXP_FEATURE,
4244 					      MGMT_STATUS_FAILED);
4245 			goto unlock_quality_report;
4246 		}
4247 
4248 		if (val)
4249 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4250 		else
4251 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4252 	}
4253 
4254 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4255 
4256 	memcpy(rp.uuid, quality_report_uuid, 16);
4257 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4258 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4259 
4260 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4261 				&rp, sizeof(rp));
4262 
4263 	if (changed)
4264 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4265 
4266 unlock_quality_report:
4267 	hci_req_sync_unlock(hdev);
4268 	return err;
4269 }
4270 
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4271 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4272 				  struct mgmt_cp_set_exp_feature *cp,
4273 				  u16 data_len)
4274 {
4275 	bool val, changed;
4276 	int err;
4277 	struct mgmt_rp_set_exp_feature rp;
4278 
4279 	/* Command requires to use a valid controller index */
4280 	if (!hdev)
4281 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4282 				       MGMT_OP_SET_EXP_FEATURE,
4283 				       MGMT_STATUS_INVALID_INDEX);
4284 
4285 	/* Parameters are limited to a single octet */
4286 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4287 		return mgmt_cmd_status(sk, hdev->id,
4288 				       MGMT_OP_SET_EXP_FEATURE,
4289 				       MGMT_STATUS_INVALID_PARAMS);
4290 
4291 	/* Only boolean on/off is supported */
4292 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4293 		return mgmt_cmd_status(sk, hdev->id,
4294 				       MGMT_OP_SET_EXP_FEATURE,
4295 				       MGMT_STATUS_INVALID_PARAMS);
4296 
4297 	val = !!cp->param[0];
4298 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4299 
4300 	if (!hdev->get_data_path_id) {
4301 		return mgmt_cmd_status(sk, hdev->id,
4302 				       MGMT_OP_SET_EXP_FEATURE,
4303 				       MGMT_STATUS_NOT_SUPPORTED);
4304 	}
4305 
4306 	if (changed) {
4307 		if (val)
4308 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4309 		else
4310 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4311 	}
4312 
4313 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4314 		    val, changed);
4315 
4316 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4317 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4318 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4319 	err = mgmt_cmd_complete(sk, hdev->id,
4320 				MGMT_OP_SET_EXP_FEATURE, 0,
4321 				&rp, sizeof(rp));
4322 
4323 	if (changed)
4324 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4325 
4326 	return err;
4327 }
4328 
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4329 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4330 					  struct mgmt_cp_set_exp_feature *cp,
4331 					  u16 data_len)
4332 {
4333 	bool val, changed;
4334 	int err;
4335 	struct mgmt_rp_set_exp_feature rp;
4336 
4337 	/* Command requires to use a valid controller index */
4338 	if (!hdev)
4339 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4340 				       MGMT_OP_SET_EXP_FEATURE,
4341 				       MGMT_STATUS_INVALID_INDEX);
4342 
4343 	/* Parameters are limited to a single octet */
4344 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4345 		return mgmt_cmd_status(sk, hdev->id,
4346 				       MGMT_OP_SET_EXP_FEATURE,
4347 				       MGMT_STATUS_INVALID_PARAMS);
4348 
4349 	/* Only boolean on/off is supported */
4350 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4351 		return mgmt_cmd_status(sk, hdev->id,
4352 				       MGMT_OP_SET_EXP_FEATURE,
4353 				       MGMT_STATUS_INVALID_PARAMS);
4354 
4355 	val = !!cp->param[0];
4356 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4357 
4358 	if (!hci_dev_le_state_simultaneous(hdev)) {
4359 		return mgmt_cmd_status(sk, hdev->id,
4360 				       MGMT_OP_SET_EXP_FEATURE,
4361 				       MGMT_STATUS_NOT_SUPPORTED);
4362 	}
4363 
4364 	if (changed) {
4365 		if (val)
4366 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4367 		else
4368 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4369 	}
4370 
4371 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4372 		    val, changed);
4373 
4374 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4375 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4376 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4377 	err = mgmt_cmd_complete(sk, hdev->id,
4378 				MGMT_OP_SET_EXP_FEATURE, 0,
4379 				&rp, sizeof(rp));
4380 
4381 	if (changed)
4382 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4383 
4384 	return err;
4385 }
4386 
4387 static const struct mgmt_exp_feature {
4388 	const u8 *uuid;
4389 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4390 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4391 } exp_features[] = {
4392 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4393 #ifdef CONFIG_BT_FEATURE_DEBUG
4394 	EXP_FEAT(debug_uuid, set_debug_func),
4395 #endif
4396 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4397 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4398 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4399 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4400 
4401 	/* end with a null feature */
4402 	EXP_FEAT(NULL, NULL)
4403 };
4404 
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4405 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4406 			   void *data, u16 data_len)
4407 {
4408 	struct mgmt_cp_set_exp_feature *cp = data;
4409 	size_t i = 0;
4410 
4411 	bt_dev_dbg(hdev, "sock %p", sk);
4412 
4413 	for (i = 0; exp_features[i].uuid; i++) {
4414 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4415 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4416 	}
4417 
4418 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4419 			       MGMT_OP_SET_EXP_FEATURE,
4420 			       MGMT_STATUS_NOT_SUPPORTED);
4421 }
4422 
get_params_flags(struct hci_dev * hdev,struct hci_conn_params * params)4423 static u32 get_params_flags(struct hci_dev *hdev,
4424 			    struct hci_conn_params *params)
4425 {
4426 	u32 flags = hdev->conn_flags;
4427 
4428 	/* Devices using RPAs can only be programmed in the acceptlist if
4429 	 * LL Privacy has been enable otherwise they cannot mark
4430 	 * HCI_CONN_FLAG_REMOTE_WAKEUP.
4431 	 */
4432 	if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4433 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
4434 		flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
4435 
4436 	return flags;
4437 }
4438 
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4439 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4440 			    u16 data_len)
4441 {
4442 	struct mgmt_cp_get_device_flags *cp = data;
4443 	struct mgmt_rp_get_device_flags rp;
4444 	struct bdaddr_list_with_flags *br_params;
4445 	struct hci_conn_params *params;
4446 	u32 supported_flags;
4447 	u32 current_flags = 0;
4448 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4449 
4450 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4451 		   &cp->addr.bdaddr, cp->addr.type);
4452 
4453 	hci_dev_lock(hdev);
4454 
4455 	supported_flags = hdev->conn_flags;
4456 
4457 	memset(&rp, 0, sizeof(rp));
4458 
4459 	if (cp->addr.type == BDADDR_BREDR) {
4460 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4461 							      &cp->addr.bdaddr,
4462 							      cp->addr.type);
4463 		if (!br_params)
4464 			goto done;
4465 
4466 		current_flags = br_params->flags;
4467 	} else {
4468 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4469 						le_addr_type(cp->addr.type));
4470 		if (!params)
4471 			goto done;
4472 
4473 		supported_flags = get_params_flags(hdev, params);
4474 		current_flags = params->flags;
4475 	}
4476 
4477 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4478 	rp.addr.type = cp->addr.type;
4479 	rp.supported_flags = cpu_to_le32(supported_flags);
4480 	rp.current_flags = cpu_to_le32(current_flags);
4481 
4482 	status = MGMT_STATUS_SUCCESS;
4483 
4484 done:
4485 	hci_dev_unlock(hdev);
4486 
4487 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4488 				&rp, sizeof(rp));
4489 }
4490 
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)4491 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4492 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4493 				 u32 supported_flags, u32 current_flags)
4494 {
4495 	struct mgmt_ev_device_flags_changed ev;
4496 
4497 	bacpy(&ev.addr.bdaddr, bdaddr);
4498 	ev.addr.type = bdaddr_type;
4499 	ev.supported_flags = cpu_to_le32(supported_flags);
4500 	ev.current_flags = cpu_to_le32(current_flags);
4501 
4502 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4503 }
4504 
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4505 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4506 			    u16 len)
4507 {
4508 	struct mgmt_cp_set_device_flags *cp = data;
4509 	struct bdaddr_list_with_flags *br_params;
4510 	struct hci_conn_params *params;
4511 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4512 	u32 supported_flags;
4513 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4514 
4515 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4516 		   &cp->addr.bdaddr, cp->addr.type,
4517 		   __le32_to_cpu(current_flags));
4518 
4519 	// We should take hci_dev_lock() early, I think.. conn_flags can change
4520 	supported_flags = hdev->conn_flags;
4521 
4522 	if ((supported_flags | current_flags) != supported_flags) {
4523 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4524 			    current_flags, supported_flags);
4525 		goto done;
4526 	}
4527 
4528 	hci_dev_lock(hdev);
4529 
4530 	if (cp->addr.type == BDADDR_BREDR) {
4531 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4532 							      &cp->addr.bdaddr,
4533 							      cp->addr.type);
4534 
4535 		if (br_params) {
4536 			br_params->flags = current_flags;
4537 			status = MGMT_STATUS_SUCCESS;
4538 		} else {
4539 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4540 				    &cp->addr.bdaddr, cp->addr.type);
4541 		}
4542 
4543 		goto unlock;
4544 	}
4545 
4546 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4547 					le_addr_type(cp->addr.type));
4548 	if (!params) {
4549 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4550 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
4551 		goto unlock;
4552 	}
4553 
4554 	supported_flags = get_params_flags(hdev, params);
4555 
4556 	if ((supported_flags | current_flags) != supported_flags) {
4557 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4558 			    current_flags, supported_flags);
4559 		goto unlock;
4560 	}
4561 
4562 	params->flags = current_flags;
4563 	status = MGMT_STATUS_SUCCESS;
4564 
4565 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
4566 	 * has been set.
4567 	 */
4568 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
4569 		hci_update_passive_scan(hdev);
4570 
4571 unlock:
4572 	hci_dev_unlock(hdev);
4573 
4574 done:
4575 	if (status == MGMT_STATUS_SUCCESS)
4576 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4577 				     supported_flags, current_flags);
4578 
4579 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4580 				 &cp->addr, sizeof(cp->addr));
4581 }
4582 
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)4583 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4584 				   u16 handle)
4585 {
4586 	struct mgmt_ev_adv_monitor_added ev;
4587 
4588 	ev.monitor_handle = cpu_to_le16(handle);
4589 
4590 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4591 }
4592 
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)4593 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4594 {
4595 	struct mgmt_ev_adv_monitor_removed ev;
4596 	struct mgmt_pending_cmd *cmd;
4597 	struct sock *sk_skip = NULL;
4598 	struct mgmt_cp_remove_adv_monitor *cp;
4599 
4600 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4601 	if (cmd) {
4602 		cp = cmd->param;
4603 
4604 		if (cp->monitor_handle)
4605 			sk_skip = cmd->sk;
4606 	}
4607 
4608 	ev.monitor_handle = cpu_to_le16(handle);
4609 
4610 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4611 }
4612 
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4613 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4614 				 void *data, u16 len)
4615 {
4616 	struct adv_monitor *monitor = NULL;
4617 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4618 	int handle, err;
4619 	size_t rp_size = 0;
4620 	__u32 supported = 0;
4621 	__u32 enabled = 0;
4622 	__u16 num_handles = 0;
4623 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4624 
4625 	BT_DBG("request for %s", hdev->name);
4626 
4627 	hci_dev_lock(hdev);
4628 
4629 	if (msft_monitor_supported(hdev))
4630 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4631 
4632 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4633 		handles[num_handles++] = monitor->handle;
4634 
4635 	hci_dev_unlock(hdev);
4636 
4637 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4638 	rp = kmalloc(rp_size, GFP_KERNEL);
4639 	if (!rp)
4640 		return -ENOMEM;
4641 
4642 	/* All supported features are currently enabled */
4643 	enabled = supported;
4644 
4645 	rp->supported_features = cpu_to_le32(supported);
4646 	rp->enabled_features = cpu_to_le32(enabled);
4647 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4648 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4649 	rp->num_handles = cpu_to_le16(num_handles);
4650 	if (num_handles)
4651 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4652 
4653 	err = mgmt_cmd_complete(sk, hdev->id,
4654 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4655 				MGMT_STATUS_SUCCESS, rp, rp_size);
4656 
4657 	kfree(rp);
4658 
4659 	return err;
4660 }
4661 
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,u8 status)4662 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4663 {
4664 	struct mgmt_rp_add_adv_patterns_monitor rp;
4665 	struct mgmt_pending_cmd *cmd;
4666 	struct adv_monitor *monitor;
4667 	int err = 0;
4668 
4669 	hci_dev_lock(hdev);
4670 
4671 	cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4672 	if (!cmd) {
4673 		cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4674 		if (!cmd)
4675 			goto done;
4676 	}
4677 
4678 	monitor = cmd->user_data;
4679 	rp.monitor_handle = cpu_to_le16(monitor->handle);
4680 
4681 	if (!status) {
4682 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4683 		hdev->adv_monitors_cnt++;
4684 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4685 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
4686 		hci_update_passive_scan(hdev);
4687 	}
4688 
4689 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4690 				mgmt_status(status), &rp, sizeof(rp));
4691 	mgmt_pending_remove(cmd);
4692 
4693 done:
4694 	hci_dev_unlock(hdev);
4695 	bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4696 		   rp.monitor_handle, status);
4697 
4698 	return err;
4699 }
4700 
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)4701 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4702 				      struct adv_monitor *m, u8 status,
4703 				      void *data, u16 len, u16 op)
4704 {
4705 	struct mgmt_rp_add_adv_patterns_monitor rp;
4706 	struct mgmt_pending_cmd *cmd;
4707 	int err;
4708 	bool pending;
4709 
4710 	hci_dev_lock(hdev);
4711 
4712 	if (status)
4713 		goto unlock;
4714 
4715 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4716 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4717 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4718 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4719 		status = MGMT_STATUS_BUSY;
4720 		goto unlock;
4721 	}
4722 
4723 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4724 	if (!cmd) {
4725 		status = MGMT_STATUS_NO_RESOURCES;
4726 		goto unlock;
4727 	}
4728 
4729 	cmd->user_data = m;
4730 	pending = hci_add_adv_monitor(hdev, m, &err);
4731 	if (err) {
4732 		if (err == -ENOSPC || err == -ENOMEM)
4733 			status = MGMT_STATUS_NO_RESOURCES;
4734 		else if (err == -EINVAL)
4735 			status = MGMT_STATUS_INVALID_PARAMS;
4736 		else
4737 			status = MGMT_STATUS_FAILED;
4738 
4739 		goto unlock;
4740 	}
4741 
4742 	if (!pending) {
4743 		mgmt_pending_remove(cmd);
4744 		rp.monitor_handle = cpu_to_le16(m->handle);
4745 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4746 		m->state = ADV_MONITOR_STATE_REGISTERED;
4747 		hdev->adv_monitors_cnt++;
4748 
4749 		hci_dev_unlock(hdev);
4750 		return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4751 					 &rp, sizeof(rp));
4752 	}
4753 
4754 	hci_dev_unlock(hdev);
4755 
4756 	return 0;
4757 
4758 unlock:
4759 	hci_free_adv_monitor(hdev, m);
4760 	hci_dev_unlock(hdev);
4761 	return mgmt_cmd_status(sk, hdev->id, op, status);
4762 }
4763 
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)4764 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4765 				   struct mgmt_adv_rssi_thresholds *rssi)
4766 {
4767 	if (rssi) {
4768 		m->rssi.low_threshold = rssi->low_threshold;
4769 		m->rssi.low_threshold_timeout =
4770 		    __le16_to_cpu(rssi->low_threshold_timeout);
4771 		m->rssi.high_threshold = rssi->high_threshold;
4772 		m->rssi.high_threshold_timeout =
4773 		    __le16_to_cpu(rssi->high_threshold_timeout);
4774 		m->rssi.sampling_period = rssi->sampling_period;
4775 	} else {
4776 		/* Default values. These numbers are the least constricting
4777 		 * parameters for MSFT API to work, so it behaves as if there
4778 		 * are no rssi parameter to consider. May need to be changed
4779 		 * if other API are to be supported.
4780 		 */
4781 		m->rssi.low_threshold = -127;
4782 		m->rssi.low_threshold_timeout = 60;
4783 		m->rssi.high_threshold = -127;
4784 		m->rssi.high_threshold_timeout = 0;
4785 		m->rssi.sampling_period = 0;
4786 	}
4787 }
4788 
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)4789 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4790 				    struct mgmt_adv_pattern *patterns)
4791 {
4792 	u8 offset = 0, length = 0;
4793 	struct adv_pattern *p = NULL;
4794 	int i;
4795 
4796 	for (i = 0; i < pattern_count; i++) {
4797 		offset = patterns[i].offset;
4798 		length = patterns[i].length;
4799 		if (offset >= HCI_MAX_AD_LENGTH ||
4800 		    length > HCI_MAX_AD_LENGTH ||
4801 		    (offset + length) > HCI_MAX_AD_LENGTH)
4802 			return MGMT_STATUS_INVALID_PARAMS;
4803 
4804 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4805 		if (!p)
4806 			return MGMT_STATUS_NO_RESOURCES;
4807 
4808 		p->ad_type = patterns[i].ad_type;
4809 		p->offset = patterns[i].offset;
4810 		p->length = patterns[i].length;
4811 		memcpy(p->value, patterns[i].value, p->length);
4812 
4813 		INIT_LIST_HEAD(&p->list);
4814 		list_add(&p->list, &m->patterns);
4815 	}
4816 
4817 	return MGMT_STATUS_SUCCESS;
4818 }
4819 
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4820 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4821 				    void *data, u16 len)
4822 {
4823 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4824 	struct adv_monitor *m = NULL;
4825 	u8 status = MGMT_STATUS_SUCCESS;
4826 	size_t expected_size = sizeof(*cp);
4827 
4828 	BT_DBG("request for %s", hdev->name);
4829 
4830 	if (len <= sizeof(*cp)) {
4831 		status = MGMT_STATUS_INVALID_PARAMS;
4832 		goto done;
4833 	}
4834 
4835 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4836 	if (len != expected_size) {
4837 		status = MGMT_STATUS_INVALID_PARAMS;
4838 		goto done;
4839 	}
4840 
4841 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4842 	if (!m) {
4843 		status = MGMT_STATUS_NO_RESOURCES;
4844 		goto done;
4845 	}
4846 
4847 	INIT_LIST_HEAD(&m->patterns);
4848 
4849 	parse_adv_monitor_rssi(m, NULL);
4850 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4851 
4852 done:
4853 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4854 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4855 }
4856 
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4857 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4858 					 void *data, u16 len)
4859 {
4860 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4861 	struct adv_monitor *m = NULL;
4862 	u8 status = MGMT_STATUS_SUCCESS;
4863 	size_t expected_size = sizeof(*cp);
4864 
4865 	BT_DBG("request for %s", hdev->name);
4866 
4867 	if (len <= sizeof(*cp)) {
4868 		status = MGMT_STATUS_INVALID_PARAMS;
4869 		goto done;
4870 	}
4871 
4872 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4873 	if (len != expected_size) {
4874 		status = MGMT_STATUS_INVALID_PARAMS;
4875 		goto done;
4876 	}
4877 
4878 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4879 	if (!m) {
4880 		status = MGMT_STATUS_NO_RESOURCES;
4881 		goto done;
4882 	}
4883 
4884 	INIT_LIST_HEAD(&m->patterns);
4885 
4886 	parse_adv_monitor_rssi(m, &cp->rssi);
4887 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4888 
4889 done:
4890 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4891 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4892 }
4893 
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,u8 status)4894 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4895 {
4896 	struct mgmt_rp_remove_adv_monitor rp;
4897 	struct mgmt_cp_remove_adv_monitor *cp;
4898 	struct mgmt_pending_cmd *cmd;
4899 	int err = 0;
4900 
4901 	hci_dev_lock(hdev);
4902 
4903 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4904 	if (!cmd)
4905 		goto done;
4906 
4907 	cp = cmd->param;
4908 	rp.monitor_handle = cp->monitor_handle;
4909 
4910 	if (!status)
4911 		hci_update_passive_scan(hdev);
4912 
4913 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4914 				mgmt_status(status), &rp, sizeof(rp));
4915 	mgmt_pending_remove(cmd);
4916 
4917 done:
4918 	hci_dev_unlock(hdev);
4919 	bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4920 		   rp.monitor_handle, status);
4921 
4922 	return err;
4923 }
4924 
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4925 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4926 			      void *data, u16 len)
4927 {
4928 	struct mgmt_cp_remove_adv_monitor *cp = data;
4929 	struct mgmt_rp_remove_adv_monitor rp;
4930 	struct mgmt_pending_cmd *cmd;
4931 	u16 handle = __le16_to_cpu(cp->monitor_handle);
4932 	int err, status;
4933 	bool pending;
4934 
4935 	BT_DBG("request for %s", hdev->name);
4936 	rp.monitor_handle = cp->monitor_handle;
4937 
4938 	hci_dev_lock(hdev);
4939 
4940 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4941 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4942 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4943 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4944 		status = MGMT_STATUS_BUSY;
4945 		goto unlock;
4946 	}
4947 
4948 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4949 	if (!cmd) {
4950 		status = MGMT_STATUS_NO_RESOURCES;
4951 		goto unlock;
4952 	}
4953 
4954 	if (handle)
4955 		pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4956 	else
4957 		pending = hci_remove_all_adv_monitor(hdev, &err);
4958 
4959 	if (err) {
4960 		mgmt_pending_remove(cmd);
4961 
4962 		if (err == -ENOENT)
4963 			status = MGMT_STATUS_INVALID_INDEX;
4964 		else
4965 			status = MGMT_STATUS_FAILED;
4966 
4967 		goto unlock;
4968 	}
4969 
4970 	/* monitor can be removed without forwarding request to controller */
4971 	if (!pending) {
4972 		mgmt_pending_remove(cmd);
4973 		hci_dev_unlock(hdev);
4974 
4975 		return mgmt_cmd_complete(sk, hdev->id,
4976 					 MGMT_OP_REMOVE_ADV_MONITOR,
4977 					 MGMT_STATUS_SUCCESS,
4978 					 &rp, sizeof(rp));
4979 	}
4980 
4981 	hci_dev_unlock(hdev);
4982 	return 0;
4983 
4984 unlock:
4985 	hci_dev_unlock(hdev);
4986 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4987 			       status);
4988 }
4989 
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)4990 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
4991 {
4992 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4993 	size_t rp_size = sizeof(mgmt_rp);
4994 	struct mgmt_pending_cmd *cmd = data;
4995 	struct sk_buff *skb = cmd->skb;
4996 	u8 status = mgmt_status(err);
4997 
4998 	if (!status) {
4999 		if (!skb)
5000 			status = MGMT_STATUS_FAILED;
5001 		else if (IS_ERR(skb))
5002 			status = mgmt_status(PTR_ERR(skb));
5003 		else
5004 			status = mgmt_status(skb->data[0]);
5005 	}
5006 
5007 	bt_dev_dbg(hdev, "status %d", status);
5008 
5009 	if (status) {
5010 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5011 		goto remove;
5012 	}
5013 
5014 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5015 
5016 	if (!bredr_sc_enabled(hdev)) {
5017 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5018 
5019 		if (skb->len < sizeof(*rp)) {
5020 			mgmt_cmd_status(cmd->sk, hdev->id,
5021 					MGMT_OP_READ_LOCAL_OOB_DATA,
5022 					MGMT_STATUS_FAILED);
5023 			goto remove;
5024 		}
5025 
5026 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5027 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5028 
5029 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5030 	} else {
5031 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5032 
5033 		if (skb->len < sizeof(*rp)) {
5034 			mgmt_cmd_status(cmd->sk, hdev->id,
5035 					MGMT_OP_READ_LOCAL_OOB_DATA,
5036 					MGMT_STATUS_FAILED);
5037 			goto remove;
5038 		}
5039 
5040 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5041 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5042 
5043 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5044 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5045 	}
5046 
5047 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5048 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5049 
5050 remove:
5051 	if (skb && !IS_ERR(skb))
5052 		kfree_skb(skb);
5053 
5054 	mgmt_pending_free(cmd);
5055 }
5056 
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5057 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5058 {
5059 	struct mgmt_pending_cmd *cmd = data;
5060 
5061 	if (bredr_sc_enabled(hdev))
5062 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5063 	else
5064 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5065 
5066 	if (IS_ERR(cmd->skb))
5067 		return PTR_ERR(cmd->skb);
5068 	else
5069 		return 0;
5070 }
5071 
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5072 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5073 			       void *data, u16 data_len)
5074 {
5075 	struct mgmt_pending_cmd *cmd;
5076 	int err;
5077 
5078 	bt_dev_dbg(hdev, "sock %p", sk);
5079 
5080 	hci_dev_lock(hdev);
5081 
5082 	if (!hdev_is_powered(hdev)) {
5083 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5084 				      MGMT_STATUS_NOT_POWERED);
5085 		goto unlock;
5086 	}
5087 
5088 	if (!lmp_ssp_capable(hdev)) {
5089 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5090 				      MGMT_STATUS_NOT_SUPPORTED);
5091 		goto unlock;
5092 	}
5093 
5094 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5095 	if (!cmd)
5096 		err = -ENOMEM;
5097 	else
5098 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5099 					 read_local_oob_data_complete);
5100 
5101 	if (err < 0) {
5102 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5103 				      MGMT_STATUS_FAILED);
5104 
5105 		if (cmd)
5106 			mgmt_pending_free(cmd);
5107 	}
5108 
5109 unlock:
5110 	hci_dev_unlock(hdev);
5111 	return err;
5112 }
5113 
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5114 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5115 			       void *data, u16 len)
5116 {
5117 	struct mgmt_addr_info *addr = data;
5118 	int err;
5119 
5120 	bt_dev_dbg(hdev, "sock %p", sk);
5121 
5122 	if (!bdaddr_type_is_valid(addr->type))
5123 		return mgmt_cmd_complete(sk, hdev->id,
5124 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5125 					 MGMT_STATUS_INVALID_PARAMS,
5126 					 addr, sizeof(*addr));
5127 
5128 	hci_dev_lock(hdev);
5129 
5130 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5131 		struct mgmt_cp_add_remote_oob_data *cp = data;
5132 		u8 status;
5133 
5134 		if (cp->addr.type != BDADDR_BREDR) {
5135 			err = mgmt_cmd_complete(sk, hdev->id,
5136 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5137 						MGMT_STATUS_INVALID_PARAMS,
5138 						&cp->addr, sizeof(cp->addr));
5139 			goto unlock;
5140 		}
5141 
5142 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5143 					      cp->addr.type, cp->hash,
5144 					      cp->rand, NULL, NULL);
5145 		if (err < 0)
5146 			status = MGMT_STATUS_FAILED;
5147 		else
5148 			status = MGMT_STATUS_SUCCESS;
5149 
5150 		err = mgmt_cmd_complete(sk, hdev->id,
5151 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5152 					&cp->addr, sizeof(cp->addr));
5153 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5154 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5155 		u8 *rand192, *hash192, *rand256, *hash256;
5156 		u8 status;
5157 
5158 		if (bdaddr_type_is_le(cp->addr.type)) {
5159 			/* Enforce zero-valued 192-bit parameters as
5160 			 * long as legacy SMP OOB isn't implemented.
5161 			 */
5162 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5163 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5164 				err = mgmt_cmd_complete(sk, hdev->id,
5165 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5166 							MGMT_STATUS_INVALID_PARAMS,
5167 							addr, sizeof(*addr));
5168 				goto unlock;
5169 			}
5170 
5171 			rand192 = NULL;
5172 			hash192 = NULL;
5173 		} else {
5174 			/* In case one of the P-192 values is set to zero,
5175 			 * then just disable OOB data for P-192.
5176 			 */
5177 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5178 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5179 				rand192 = NULL;
5180 				hash192 = NULL;
5181 			} else {
5182 				rand192 = cp->rand192;
5183 				hash192 = cp->hash192;
5184 			}
5185 		}
5186 
5187 		/* In case one of the P-256 values is set to zero, then just
5188 		 * disable OOB data for P-256.
5189 		 */
5190 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5191 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5192 			rand256 = NULL;
5193 			hash256 = NULL;
5194 		} else {
5195 			rand256 = cp->rand256;
5196 			hash256 = cp->hash256;
5197 		}
5198 
5199 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5200 					      cp->addr.type, hash192, rand192,
5201 					      hash256, rand256);
5202 		if (err < 0)
5203 			status = MGMT_STATUS_FAILED;
5204 		else
5205 			status = MGMT_STATUS_SUCCESS;
5206 
5207 		err = mgmt_cmd_complete(sk, hdev->id,
5208 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5209 					status, &cp->addr, sizeof(cp->addr));
5210 	} else {
5211 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5212 			   len);
5213 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5214 				      MGMT_STATUS_INVALID_PARAMS);
5215 	}
5216 
5217 unlock:
5218 	hci_dev_unlock(hdev);
5219 	return err;
5220 }
5221 
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5222 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5223 				  void *data, u16 len)
5224 {
5225 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5226 	u8 status;
5227 	int err;
5228 
5229 	bt_dev_dbg(hdev, "sock %p", sk);
5230 
5231 	if (cp->addr.type != BDADDR_BREDR)
5232 		return mgmt_cmd_complete(sk, hdev->id,
5233 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5234 					 MGMT_STATUS_INVALID_PARAMS,
5235 					 &cp->addr, sizeof(cp->addr));
5236 
5237 	hci_dev_lock(hdev);
5238 
5239 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5240 		hci_remote_oob_data_clear(hdev);
5241 		status = MGMT_STATUS_SUCCESS;
5242 		goto done;
5243 	}
5244 
5245 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5246 	if (err < 0)
5247 		status = MGMT_STATUS_INVALID_PARAMS;
5248 	else
5249 		status = MGMT_STATUS_SUCCESS;
5250 
5251 done:
5252 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5253 				status, &cp->addr, sizeof(cp->addr));
5254 
5255 	hci_dev_unlock(hdev);
5256 	return err;
5257 }
5258 
mgmt_start_discovery_complete(struct hci_dev * hdev,u8 status)5259 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5260 {
5261 	struct mgmt_pending_cmd *cmd;
5262 
5263 	bt_dev_dbg(hdev, "status %u", status);
5264 
5265 	hci_dev_lock(hdev);
5266 
5267 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5268 	if (!cmd)
5269 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5270 
5271 	if (!cmd)
5272 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5273 
5274 	if (cmd) {
5275 		cmd->cmd_complete(cmd, mgmt_status(status));
5276 		mgmt_pending_remove(cmd);
5277 	}
5278 
5279 	hci_dev_unlock(hdev);
5280 }
5281 
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5282 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5283 				    uint8_t *mgmt_status)
5284 {
5285 	switch (type) {
5286 	case DISCOV_TYPE_LE:
5287 		*mgmt_status = mgmt_le_support(hdev);
5288 		if (*mgmt_status)
5289 			return false;
5290 		break;
5291 	case DISCOV_TYPE_INTERLEAVED:
5292 		*mgmt_status = mgmt_le_support(hdev);
5293 		if (*mgmt_status)
5294 			return false;
5295 		fallthrough;
5296 	case DISCOV_TYPE_BREDR:
5297 		*mgmt_status = mgmt_bredr_support(hdev);
5298 		if (*mgmt_status)
5299 			return false;
5300 		break;
5301 	default:
5302 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5303 		return false;
5304 	}
5305 
5306 	return true;
5307 }
5308 
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5309 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5310 {
5311 	struct mgmt_pending_cmd *cmd = data;
5312 
5313 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5314 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5315 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5316 		return;
5317 
5318 	bt_dev_dbg(hdev, "err %d", err);
5319 
5320 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5321 			  cmd->param, 1);
5322 	mgmt_pending_remove(cmd);
5323 
5324 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5325 				DISCOVERY_FINDING);
5326 }
5327 
start_discovery_sync(struct hci_dev * hdev,void * data)5328 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5329 {
5330 	return hci_start_discovery_sync(hdev);
5331 }
5332 
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5333 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5334 				    u16 op, void *data, u16 len)
5335 {
5336 	struct mgmt_cp_start_discovery *cp = data;
5337 	struct mgmt_pending_cmd *cmd;
5338 	u8 status;
5339 	int err;
5340 
5341 	bt_dev_dbg(hdev, "sock %p", sk);
5342 
5343 	hci_dev_lock(hdev);
5344 
5345 	if (!hdev_is_powered(hdev)) {
5346 		err = mgmt_cmd_complete(sk, hdev->id, op,
5347 					MGMT_STATUS_NOT_POWERED,
5348 					&cp->type, sizeof(cp->type));
5349 		goto failed;
5350 	}
5351 
5352 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5353 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5354 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5355 					&cp->type, sizeof(cp->type));
5356 		goto failed;
5357 	}
5358 
5359 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5360 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5361 					&cp->type, sizeof(cp->type));
5362 		goto failed;
5363 	}
5364 
5365 	/* Can't start discovery when it is paused */
5366 	if (hdev->discovery_paused) {
5367 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5368 					&cp->type, sizeof(cp->type));
5369 		goto failed;
5370 	}
5371 
5372 	/* Clear the discovery filter first to free any previously
5373 	 * allocated memory for the UUID list.
5374 	 */
5375 	hci_discovery_filter_clear(hdev);
5376 
5377 	hdev->discovery.type = cp->type;
5378 	hdev->discovery.report_invalid_rssi = false;
5379 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5380 		hdev->discovery.limited = true;
5381 	else
5382 		hdev->discovery.limited = false;
5383 
5384 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5385 	if (!cmd) {
5386 		err = -ENOMEM;
5387 		goto failed;
5388 	}
5389 
5390 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5391 				 start_discovery_complete);
5392 	if (err < 0) {
5393 		mgmt_pending_remove(cmd);
5394 		goto failed;
5395 	}
5396 
5397 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5398 
5399 failed:
5400 	hci_dev_unlock(hdev);
5401 	return err;
5402 }
5403 
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5404 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5405 			   void *data, u16 len)
5406 {
5407 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5408 					data, len);
5409 }
5410 
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5411 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5412 				   void *data, u16 len)
5413 {
5414 	return start_discovery_internal(sk, hdev,
5415 					MGMT_OP_START_LIMITED_DISCOVERY,
5416 					data, len);
5417 }
5418 
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5419 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5420 				   void *data, u16 len)
5421 {
5422 	struct mgmt_cp_start_service_discovery *cp = data;
5423 	struct mgmt_pending_cmd *cmd;
5424 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5425 	u16 uuid_count, expected_len;
5426 	u8 status;
5427 	int err;
5428 
5429 	bt_dev_dbg(hdev, "sock %p", sk);
5430 
5431 	hci_dev_lock(hdev);
5432 
5433 	if (!hdev_is_powered(hdev)) {
5434 		err = mgmt_cmd_complete(sk, hdev->id,
5435 					MGMT_OP_START_SERVICE_DISCOVERY,
5436 					MGMT_STATUS_NOT_POWERED,
5437 					&cp->type, sizeof(cp->type));
5438 		goto failed;
5439 	}
5440 
5441 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5442 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5443 		err = mgmt_cmd_complete(sk, hdev->id,
5444 					MGMT_OP_START_SERVICE_DISCOVERY,
5445 					MGMT_STATUS_BUSY, &cp->type,
5446 					sizeof(cp->type));
5447 		goto failed;
5448 	}
5449 
5450 	if (hdev->discovery_paused) {
5451 		err = mgmt_cmd_complete(sk, hdev->id,
5452 					MGMT_OP_START_SERVICE_DISCOVERY,
5453 					MGMT_STATUS_BUSY, &cp->type,
5454 					sizeof(cp->type));
5455 		goto failed;
5456 	}
5457 
5458 	uuid_count = __le16_to_cpu(cp->uuid_count);
5459 	if (uuid_count > max_uuid_count) {
5460 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5461 			   uuid_count);
5462 		err = mgmt_cmd_complete(sk, hdev->id,
5463 					MGMT_OP_START_SERVICE_DISCOVERY,
5464 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5465 					sizeof(cp->type));
5466 		goto failed;
5467 	}
5468 
5469 	expected_len = sizeof(*cp) + uuid_count * 16;
5470 	if (expected_len != len) {
5471 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5472 			   expected_len, len);
5473 		err = mgmt_cmd_complete(sk, hdev->id,
5474 					MGMT_OP_START_SERVICE_DISCOVERY,
5475 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5476 					sizeof(cp->type));
5477 		goto failed;
5478 	}
5479 
5480 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5481 		err = mgmt_cmd_complete(sk, hdev->id,
5482 					MGMT_OP_START_SERVICE_DISCOVERY,
5483 					status, &cp->type, sizeof(cp->type));
5484 		goto failed;
5485 	}
5486 
5487 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5488 			       hdev, data, len);
5489 	if (!cmd) {
5490 		err = -ENOMEM;
5491 		goto failed;
5492 	}
5493 
5494 	/* Clear the discovery filter first to free any previously
5495 	 * allocated memory for the UUID list.
5496 	 */
5497 	hci_discovery_filter_clear(hdev);
5498 
5499 	hdev->discovery.result_filtering = true;
5500 	hdev->discovery.type = cp->type;
5501 	hdev->discovery.rssi = cp->rssi;
5502 	hdev->discovery.uuid_count = uuid_count;
5503 
5504 	if (uuid_count > 0) {
5505 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5506 						GFP_KERNEL);
5507 		if (!hdev->discovery.uuids) {
5508 			err = mgmt_cmd_complete(sk, hdev->id,
5509 						MGMT_OP_START_SERVICE_DISCOVERY,
5510 						MGMT_STATUS_FAILED,
5511 						&cp->type, sizeof(cp->type));
5512 			mgmt_pending_remove(cmd);
5513 			goto failed;
5514 		}
5515 	}
5516 
5517 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5518 				 start_discovery_complete);
5519 	if (err < 0) {
5520 		mgmt_pending_remove(cmd);
5521 		goto failed;
5522 	}
5523 
5524 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5525 
5526 failed:
5527 	hci_dev_unlock(hdev);
5528 	return err;
5529 }
5530 
mgmt_stop_discovery_complete(struct hci_dev * hdev,u8 status)5531 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5532 {
5533 	struct mgmt_pending_cmd *cmd;
5534 
5535 	bt_dev_dbg(hdev, "status %u", status);
5536 
5537 	hci_dev_lock(hdev);
5538 
5539 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5540 	if (cmd) {
5541 		cmd->cmd_complete(cmd, mgmt_status(status));
5542 		mgmt_pending_remove(cmd);
5543 	}
5544 
5545 	hci_dev_unlock(hdev);
5546 }
5547 
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)5548 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5549 {
5550 	struct mgmt_pending_cmd *cmd = data;
5551 
5552 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
5553 		return;
5554 
5555 	bt_dev_dbg(hdev, "err %d", err);
5556 
5557 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5558 			  cmd->param, 1);
5559 	mgmt_pending_remove(cmd);
5560 
5561 	if (!err)
5562 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5563 }
5564 
stop_discovery_sync(struct hci_dev * hdev,void * data)5565 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5566 {
5567 	return hci_stop_discovery_sync(hdev);
5568 }
5569 
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5570 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5571 			  u16 len)
5572 {
5573 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
5574 	struct mgmt_pending_cmd *cmd;
5575 	int err;
5576 
5577 	bt_dev_dbg(hdev, "sock %p", sk);
5578 
5579 	hci_dev_lock(hdev);
5580 
5581 	if (!hci_discovery_active(hdev)) {
5582 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5583 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
5584 					sizeof(mgmt_cp->type));
5585 		goto unlock;
5586 	}
5587 
5588 	if (hdev->discovery.type != mgmt_cp->type) {
5589 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5590 					MGMT_STATUS_INVALID_PARAMS,
5591 					&mgmt_cp->type, sizeof(mgmt_cp->type));
5592 		goto unlock;
5593 	}
5594 
5595 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5596 	if (!cmd) {
5597 		err = -ENOMEM;
5598 		goto unlock;
5599 	}
5600 
5601 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5602 				 stop_discovery_complete);
5603 	if (err < 0) {
5604 		mgmt_pending_remove(cmd);
5605 		goto unlock;
5606 	}
5607 
5608 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5609 
5610 unlock:
5611 	hci_dev_unlock(hdev);
5612 	return err;
5613 }
5614 
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5615 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5616 			u16 len)
5617 {
5618 	struct mgmt_cp_confirm_name *cp = data;
5619 	struct inquiry_entry *e;
5620 	int err;
5621 
5622 	bt_dev_dbg(hdev, "sock %p", sk);
5623 
5624 	hci_dev_lock(hdev);
5625 
5626 	if (!hci_discovery_active(hdev)) {
5627 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5628 					MGMT_STATUS_FAILED, &cp->addr,
5629 					sizeof(cp->addr));
5630 		goto failed;
5631 	}
5632 
5633 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5634 	if (!e) {
5635 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5636 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5637 					sizeof(cp->addr));
5638 		goto failed;
5639 	}
5640 
5641 	if (cp->name_known) {
5642 		e->name_state = NAME_KNOWN;
5643 		list_del(&e->list);
5644 	} else {
5645 		e->name_state = NAME_NEEDED;
5646 		hci_inquiry_cache_update_resolve(hdev, e);
5647 	}
5648 
5649 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5650 				&cp->addr, sizeof(cp->addr));
5651 
5652 failed:
5653 	hci_dev_unlock(hdev);
5654 	return err;
5655 }
5656 
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5657 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5658 			u16 len)
5659 {
5660 	struct mgmt_cp_block_device *cp = data;
5661 	u8 status;
5662 	int err;
5663 
5664 	bt_dev_dbg(hdev, "sock %p", sk);
5665 
5666 	if (!bdaddr_type_is_valid(cp->addr.type))
5667 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5668 					 MGMT_STATUS_INVALID_PARAMS,
5669 					 &cp->addr, sizeof(cp->addr));
5670 
5671 	hci_dev_lock(hdev);
5672 
5673 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5674 				  cp->addr.type);
5675 	if (err < 0) {
5676 		status = MGMT_STATUS_FAILED;
5677 		goto done;
5678 	}
5679 
5680 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5681 		   sk);
5682 	status = MGMT_STATUS_SUCCESS;
5683 
5684 done:
5685 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5686 				&cp->addr, sizeof(cp->addr));
5687 
5688 	hci_dev_unlock(hdev);
5689 
5690 	return err;
5691 }
5692 
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5693 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5694 			  u16 len)
5695 {
5696 	struct mgmt_cp_unblock_device *cp = data;
5697 	u8 status;
5698 	int err;
5699 
5700 	bt_dev_dbg(hdev, "sock %p", sk);
5701 
5702 	if (!bdaddr_type_is_valid(cp->addr.type))
5703 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5704 					 MGMT_STATUS_INVALID_PARAMS,
5705 					 &cp->addr, sizeof(cp->addr));
5706 
5707 	hci_dev_lock(hdev);
5708 
5709 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5710 				  cp->addr.type);
5711 	if (err < 0) {
5712 		status = MGMT_STATUS_INVALID_PARAMS;
5713 		goto done;
5714 	}
5715 
5716 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5717 		   sk);
5718 	status = MGMT_STATUS_SUCCESS;
5719 
5720 done:
5721 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5722 				&cp->addr, sizeof(cp->addr));
5723 
5724 	hci_dev_unlock(hdev);
5725 
5726 	return err;
5727 }
5728 
set_device_id_sync(struct hci_dev * hdev,void * data)5729 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5730 {
5731 	return hci_update_eir_sync(hdev);
5732 }
5733 
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5734 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5735 			 u16 len)
5736 {
5737 	struct mgmt_cp_set_device_id *cp = data;
5738 	int err;
5739 	__u16 source;
5740 
5741 	bt_dev_dbg(hdev, "sock %p", sk);
5742 
5743 	source = __le16_to_cpu(cp->source);
5744 
5745 	if (source > 0x0002)
5746 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5747 				       MGMT_STATUS_INVALID_PARAMS);
5748 
5749 	hci_dev_lock(hdev);
5750 
5751 	hdev->devid_source = source;
5752 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5753 	hdev->devid_product = __le16_to_cpu(cp->product);
5754 	hdev->devid_version = __le16_to_cpu(cp->version);
5755 
5756 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5757 				NULL, 0);
5758 
5759 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5760 
5761 	hci_dev_unlock(hdev);
5762 
5763 	return err;
5764 }
5765 
enable_advertising_instance(struct hci_dev * hdev,int err)5766 static void enable_advertising_instance(struct hci_dev *hdev, int err)
5767 {
5768 	if (err)
5769 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
5770 	else
5771 		bt_dev_dbg(hdev, "status %d", err);
5772 }
5773 
set_advertising_complete(struct hci_dev * hdev,void * data,int err)5774 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
5775 {
5776 	struct cmd_lookup match = { NULL, hdev };
5777 	u8 instance;
5778 	struct adv_info *adv_instance;
5779 	u8 status = mgmt_status(err);
5780 
5781 	if (status) {
5782 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5783 				     cmd_status_rsp, &status);
5784 		return;
5785 	}
5786 
5787 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5788 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5789 	else
5790 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5791 
5792 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5793 			     &match);
5794 
5795 	new_settings(hdev, match.sk);
5796 
5797 	if (match.sk)
5798 		sock_put(match.sk);
5799 
5800 	/* If "Set Advertising" was just disabled and instance advertising was
5801 	 * set up earlier, then re-enable multi-instance advertising.
5802 	 */
5803 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5804 	    list_empty(&hdev->adv_instances))
5805 		return;
5806 
5807 	instance = hdev->cur_adv_instance;
5808 	if (!instance) {
5809 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5810 							struct adv_info, list);
5811 		if (!adv_instance)
5812 			return;
5813 
5814 		instance = adv_instance->instance;
5815 	}
5816 
5817 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
5818 
5819 	enable_advertising_instance(hdev, err);
5820 }
5821 
set_adv_sync(struct hci_dev * hdev,void * data)5822 static int set_adv_sync(struct hci_dev *hdev, void *data)
5823 {
5824 	struct mgmt_pending_cmd *cmd = data;
5825 	struct mgmt_mode *cp = cmd->param;
5826 	u8 val = !!cp->val;
5827 
5828 	if (cp->val == 0x02)
5829 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5830 	else
5831 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5832 
5833 	cancel_adv_timeout(hdev);
5834 
5835 	if (val) {
5836 		/* Switch to instance "0" for the Set Advertising setting.
5837 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5838 		 * HCI_ADVERTISING flag is not yet set.
5839 		 */
5840 		hdev->cur_adv_instance = 0x00;
5841 
5842 		if (ext_adv_capable(hdev)) {
5843 			hci_start_ext_adv_sync(hdev, 0x00);
5844 		} else {
5845 			hci_update_adv_data_sync(hdev, 0x00);
5846 			hci_update_scan_rsp_data_sync(hdev, 0x00);
5847 			hci_enable_advertising_sync(hdev);
5848 		}
5849 	} else {
5850 		hci_disable_advertising_sync(hdev);
5851 	}
5852 
5853 	return 0;
5854 }
5855 
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5856 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5857 			   u16 len)
5858 {
5859 	struct mgmt_mode *cp = data;
5860 	struct mgmt_pending_cmd *cmd;
5861 	u8 val, status;
5862 	int err;
5863 
5864 	bt_dev_dbg(hdev, "sock %p", sk);
5865 
5866 	status = mgmt_le_support(hdev);
5867 	if (status)
5868 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5869 				       status);
5870 
5871 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5872 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5873 				       MGMT_STATUS_INVALID_PARAMS);
5874 
5875 	if (hdev->advertising_paused)
5876 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5877 				       MGMT_STATUS_BUSY);
5878 
5879 	hci_dev_lock(hdev);
5880 
5881 	val = !!cp->val;
5882 
5883 	/* The following conditions are ones which mean that we should
5884 	 * not do any HCI communication but directly send a mgmt
5885 	 * response to user space (after toggling the flag if
5886 	 * necessary).
5887 	 */
5888 	if (!hdev_is_powered(hdev) ||
5889 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5890 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5891 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5892 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5893 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5894 		bool changed;
5895 
5896 		if (cp->val) {
5897 			hdev->cur_adv_instance = 0x00;
5898 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5899 			if (cp->val == 0x02)
5900 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5901 			else
5902 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5903 		} else {
5904 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5905 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5906 		}
5907 
5908 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5909 		if (err < 0)
5910 			goto unlock;
5911 
5912 		if (changed)
5913 			err = new_settings(hdev, sk);
5914 
5915 		goto unlock;
5916 	}
5917 
5918 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5919 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5920 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5921 				      MGMT_STATUS_BUSY);
5922 		goto unlock;
5923 	}
5924 
5925 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5926 	if (!cmd)
5927 		err = -ENOMEM;
5928 	else
5929 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
5930 					 set_advertising_complete);
5931 
5932 	if (err < 0 && cmd)
5933 		mgmt_pending_remove(cmd);
5934 
5935 unlock:
5936 	hci_dev_unlock(hdev);
5937 	return err;
5938 }
5939 
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5940 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5941 			      void *data, u16 len)
5942 {
5943 	struct mgmt_cp_set_static_address *cp = data;
5944 	int err;
5945 
5946 	bt_dev_dbg(hdev, "sock %p", sk);
5947 
5948 	if (!lmp_le_capable(hdev))
5949 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5950 				       MGMT_STATUS_NOT_SUPPORTED);
5951 
5952 	if (hdev_is_powered(hdev))
5953 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5954 				       MGMT_STATUS_REJECTED);
5955 
5956 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5957 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5958 			return mgmt_cmd_status(sk, hdev->id,
5959 					       MGMT_OP_SET_STATIC_ADDRESS,
5960 					       MGMT_STATUS_INVALID_PARAMS);
5961 
5962 		/* Two most significant bits shall be set */
5963 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5964 			return mgmt_cmd_status(sk, hdev->id,
5965 					       MGMT_OP_SET_STATIC_ADDRESS,
5966 					       MGMT_STATUS_INVALID_PARAMS);
5967 	}
5968 
5969 	hci_dev_lock(hdev);
5970 
5971 	bacpy(&hdev->static_addr, &cp->bdaddr);
5972 
5973 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5974 	if (err < 0)
5975 		goto unlock;
5976 
5977 	err = new_settings(hdev, sk);
5978 
5979 unlock:
5980 	hci_dev_unlock(hdev);
5981 	return err;
5982 }
5983 
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5984 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5985 			   void *data, u16 len)
5986 {
5987 	struct mgmt_cp_set_scan_params *cp = data;
5988 	__u16 interval, window;
5989 	int err;
5990 
5991 	bt_dev_dbg(hdev, "sock %p", sk);
5992 
5993 	if (!lmp_le_capable(hdev))
5994 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5995 				       MGMT_STATUS_NOT_SUPPORTED);
5996 
5997 	interval = __le16_to_cpu(cp->interval);
5998 
5999 	if (interval < 0x0004 || interval > 0x4000)
6000 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6001 				       MGMT_STATUS_INVALID_PARAMS);
6002 
6003 	window = __le16_to_cpu(cp->window);
6004 
6005 	if (window < 0x0004 || window > 0x4000)
6006 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6007 				       MGMT_STATUS_INVALID_PARAMS);
6008 
6009 	if (window > interval)
6010 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6011 				       MGMT_STATUS_INVALID_PARAMS);
6012 
6013 	hci_dev_lock(hdev);
6014 
6015 	hdev->le_scan_interval = interval;
6016 	hdev->le_scan_window = window;
6017 
6018 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6019 				NULL, 0);
6020 
6021 	/* If background scan is running, restart it so new parameters are
6022 	 * loaded.
6023 	 */
6024 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6025 	    hdev->discovery.state == DISCOVERY_STOPPED)
6026 		hci_update_passive_scan(hdev);
6027 
6028 	hci_dev_unlock(hdev);
6029 
6030 	return err;
6031 }
6032 
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6033 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6034 {
6035 	struct mgmt_pending_cmd *cmd = data;
6036 
6037 	bt_dev_dbg(hdev, "err %d", err);
6038 
6039 	if (err) {
6040 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6041 				mgmt_status(err));
6042 	} else {
6043 		struct mgmt_mode *cp = cmd->param;
6044 
6045 		if (cp->val)
6046 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6047 		else
6048 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6049 
6050 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6051 		new_settings(hdev, cmd->sk);
6052 	}
6053 
6054 	mgmt_pending_free(cmd);
6055 }
6056 
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6057 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6058 {
6059 	struct mgmt_pending_cmd *cmd = data;
6060 	struct mgmt_mode *cp = cmd->param;
6061 
6062 	return hci_write_fast_connectable_sync(hdev, cp->val);
6063 }
6064 
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6065 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6066 				void *data, u16 len)
6067 {
6068 	struct mgmt_mode *cp = data;
6069 	struct mgmt_pending_cmd *cmd;
6070 	int err;
6071 
6072 	bt_dev_dbg(hdev, "sock %p", sk);
6073 
6074 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6075 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6076 		return mgmt_cmd_status(sk, hdev->id,
6077 				       MGMT_OP_SET_FAST_CONNECTABLE,
6078 				       MGMT_STATUS_NOT_SUPPORTED);
6079 
6080 	if (cp->val != 0x00 && cp->val != 0x01)
6081 		return mgmt_cmd_status(sk, hdev->id,
6082 				       MGMT_OP_SET_FAST_CONNECTABLE,
6083 				       MGMT_STATUS_INVALID_PARAMS);
6084 
6085 	hci_dev_lock(hdev);
6086 
6087 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6088 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6089 		goto unlock;
6090 	}
6091 
6092 	if (!hdev_is_powered(hdev)) {
6093 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6094 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6095 		new_settings(hdev, sk);
6096 		goto unlock;
6097 	}
6098 
6099 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6100 			       len);
6101 	if (!cmd)
6102 		err = -ENOMEM;
6103 	else
6104 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6105 					 fast_connectable_complete);
6106 
6107 	if (err < 0) {
6108 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6109 				MGMT_STATUS_FAILED);
6110 
6111 		if (cmd)
6112 			mgmt_pending_free(cmd);
6113 	}
6114 
6115 unlock:
6116 	hci_dev_unlock(hdev);
6117 
6118 	return err;
6119 }
6120 
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6121 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6122 {
6123 	struct mgmt_pending_cmd *cmd = data;
6124 
6125 	bt_dev_dbg(hdev, "err %d", err);
6126 
6127 	if (err) {
6128 		u8 mgmt_err = mgmt_status(err);
6129 
6130 		/* We need to restore the flag if related HCI commands
6131 		 * failed.
6132 		 */
6133 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6134 
6135 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6136 	} else {
6137 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6138 		new_settings(hdev, cmd->sk);
6139 	}
6140 
6141 	mgmt_pending_free(cmd);
6142 }
6143 
set_bredr_sync(struct hci_dev * hdev,void * data)6144 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6145 {
6146 	int status;
6147 
6148 	status = hci_write_fast_connectable_sync(hdev, false);
6149 
6150 	if (!status)
6151 		status = hci_update_scan_sync(hdev);
6152 
6153 	/* Since only the advertising data flags will change, there
6154 	 * is no need to update the scan response data.
6155 	 */
6156 	if (!status)
6157 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6158 
6159 	return status;
6160 }
6161 
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6162 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6163 {
6164 	struct mgmt_mode *cp = data;
6165 	struct mgmt_pending_cmd *cmd;
6166 	int err;
6167 
6168 	bt_dev_dbg(hdev, "sock %p", sk);
6169 
6170 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6171 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6172 				       MGMT_STATUS_NOT_SUPPORTED);
6173 
6174 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6175 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6176 				       MGMT_STATUS_REJECTED);
6177 
6178 	if (cp->val != 0x00 && cp->val != 0x01)
6179 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6180 				       MGMT_STATUS_INVALID_PARAMS);
6181 
6182 	hci_dev_lock(hdev);
6183 
6184 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6185 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6186 		goto unlock;
6187 	}
6188 
6189 	if (!hdev_is_powered(hdev)) {
6190 		if (!cp->val) {
6191 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6192 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6193 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6194 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6195 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6196 		}
6197 
6198 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6199 
6200 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6201 		if (err < 0)
6202 			goto unlock;
6203 
6204 		err = new_settings(hdev, sk);
6205 		goto unlock;
6206 	}
6207 
6208 	/* Reject disabling when powered on */
6209 	if (!cp->val) {
6210 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6211 				      MGMT_STATUS_REJECTED);
6212 		goto unlock;
6213 	} else {
6214 		/* When configuring a dual-mode controller to operate
6215 		 * with LE only and using a static address, then switching
6216 		 * BR/EDR back on is not allowed.
6217 		 *
6218 		 * Dual-mode controllers shall operate with the public
6219 		 * address as its identity address for BR/EDR and LE. So
6220 		 * reject the attempt to create an invalid configuration.
6221 		 *
6222 		 * The same restrictions applies when secure connections
6223 		 * has been enabled. For BR/EDR this is a controller feature
6224 		 * while for LE it is a host stack feature. This means that
6225 		 * switching BR/EDR back on when secure connections has been
6226 		 * enabled is not a supported transaction.
6227 		 */
6228 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6229 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6230 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6231 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6232 					      MGMT_STATUS_REJECTED);
6233 			goto unlock;
6234 		}
6235 	}
6236 
6237 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6238 	if (!cmd)
6239 		err = -ENOMEM;
6240 	else
6241 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6242 					 set_bredr_complete);
6243 
6244 	if (err < 0) {
6245 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6246 				MGMT_STATUS_FAILED);
6247 		if (cmd)
6248 			mgmt_pending_free(cmd);
6249 
6250 		goto unlock;
6251 	}
6252 
6253 	/* We need to flip the bit already here so that
6254 	 * hci_req_update_adv_data generates the correct flags.
6255 	 */
6256 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6257 
6258 unlock:
6259 	hci_dev_unlock(hdev);
6260 	return err;
6261 }
6262 
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6263 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6264 {
6265 	struct mgmt_pending_cmd *cmd = data;
6266 	struct mgmt_mode *cp;
6267 
6268 	bt_dev_dbg(hdev, "err %d", err);
6269 
6270 	if (err) {
6271 		u8 mgmt_err = mgmt_status(err);
6272 
6273 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6274 		goto done;
6275 	}
6276 
6277 	cp = cmd->param;
6278 
6279 	switch (cp->val) {
6280 	case 0x00:
6281 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6282 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6283 		break;
6284 	case 0x01:
6285 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6286 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6287 		break;
6288 	case 0x02:
6289 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6290 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6291 		break;
6292 	}
6293 
6294 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6295 	new_settings(hdev, cmd->sk);
6296 
6297 done:
6298 	mgmt_pending_free(cmd);
6299 }
6300 
set_secure_conn_sync(struct hci_dev * hdev,void * data)6301 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6302 {
6303 	struct mgmt_pending_cmd *cmd = data;
6304 	struct mgmt_mode *cp = cmd->param;
6305 	u8 val = !!cp->val;
6306 
6307 	/* Force write of val */
6308 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6309 
6310 	return hci_write_sc_support_sync(hdev, val);
6311 }
6312 
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6313 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6314 			   void *data, u16 len)
6315 {
6316 	struct mgmt_mode *cp = data;
6317 	struct mgmt_pending_cmd *cmd;
6318 	u8 val;
6319 	int err;
6320 
6321 	bt_dev_dbg(hdev, "sock %p", sk);
6322 
6323 	if (!lmp_sc_capable(hdev) &&
6324 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6325 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6326 				       MGMT_STATUS_NOT_SUPPORTED);
6327 
6328 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6329 	    lmp_sc_capable(hdev) &&
6330 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6331 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6332 				       MGMT_STATUS_REJECTED);
6333 
6334 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6335 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6336 				       MGMT_STATUS_INVALID_PARAMS);
6337 
6338 	hci_dev_lock(hdev);
6339 
6340 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6341 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6342 		bool changed;
6343 
6344 		if (cp->val) {
6345 			changed = !hci_dev_test_and_set_flag(hdev,
6346 							     HCI_SC_ENABLED);
6347 			if (cp->val == 0x02)
6348 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6349 			else
6350 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6351 		} else {
6352 			changed = hci_dev_test_and_clear_flag(hdev,
6353 							      HCI_SC_ENABLED);
6354 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6355 		}
6356 
6357 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6358 		if (err < 0)
6359 			goto failed;
6360 
6361 		if (changed)
6362 			err = new_settings(hdev, sk);
6363 
6364 		goto failed;
6365 	}
6366 
6367 	val = !!cp->val;
6368 
6369 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6370 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6371 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6372 		goto failed;
6373 	}
6374 
6375 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6376 	if (!cmd)
6377 		err = -ENOMEM;
6378 	else
6379 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6380 					 set_secure_conn_complete);
6381 
6382 	if (err < 0) {
6383 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6384 				MGMT_STATUS_FAILED);
6385 		if (cmd)
6386 			mgmt_pending_free(cmd);
6387 	}
6388 
6389 failed:
6390 	hci_dev_unlock(hdev);
6391 	return err;
6392 }
6393 
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6394 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6395 			  void *data, u16 len)
6396 {
6397 	struct mgmt_mode *cp = data;
6398 	bool changed, use_changed;
6399 	int err;
6400 
6401 	bt_dev_dbg(hdev, "sock %p", sk);
6402 
6403 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6404 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6405 				       MGMT_STATUS_INVALID_PARAMS);
6406 
6407 	hci_dev_lock(hdev);
6408 
6409 	if (cp->val)
6410 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6411 	else
6412 		changed = hci_dev_test_and_clear_flag(hdev,
6413 						      HCI_KEEP_DEBUG_KEYS);
6414 
6415 	if (cp->val == 0x02)
6416 		use_changed = !hci_dev_test_and_set_flag(hdev,
6417 							 HCI_USE_DEBUG_KEYS);
6418 	else
6419 		use_changed = hci_dev_test_and_clear_flag(hdev,
6420 							  HCI_USE_DEBUG_KEYS);
6421 
6422 	if (hdev_is_powered(hdev) && use_changed &&
6423 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6424 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6425 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6426 			     sizeof(mode), &mode);
6427 	}
6428 
6429 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6430 	if (err < 0)
6431 		goto unlock;
6432 
6433 	if (changed)
6434 		err = new_settings(hdev, sk);
6435 
6436 unlock:
6437 	hci_dev_unlock(hdev);
6438 	return err;
6439 }
6440 
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6441 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6442 		       u16 len)
6443 {
6444 	struct mgmt_cp_set_privacy *cp = cp_data;
6445 	bool changed;
6446 	int err;
6447 
6448 	bt_dev_dbg(hdev, "sock %p", sk);
6449 
6450 	if (!lmp_le_capable(hdev))
6451 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6452 				       MGMT_STATUS_NOT_SUPPORTED);
6453 
6454 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6455 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6456 				       MGMT_STATUS_INVALID_PARAMS);
6457 
6458 	if (hdev_is_powered(hdev))
6459 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6460 				       MGMT_STATUS_REJECTED);
6461 
6462 	hci_dev_lock(hdev);
6463 
6464 	/* If user space supports this command it is also expected to
6465 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6466 	 */
6467 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6468 
6469 	if (cp->privacy) {
6470 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6471 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6472 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6473 		hci_adv_instances_set_rpa_expired(hdev, true);
6474 		if (cp->privacy == 0x02)
6475 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6476 		else
6477 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6478 	} else {
6479 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6480 		memset(hdev->irk, 0, sizeof(hdev->irk));
6481 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6482 		hci_adv_instances_set_rpa_expired(hdev, false);
6483 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6484 	}
6485 
6486 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6487 	if (err < 0)
6488 		goto unlock;
6489 
6490 	if (changed)
6491 		err = new_settings(hdev, sk);
6492 
6493 unlock:
6494 	hci_dev_unlock(hdev);
6495 	return err;
6496 }
6497 
irk_is_valid(struct mgmt_irk_info * irk)6498 static bool irk_is_valid(struct mgmt_irk_info *irk)
6499 {
6500 	switch (irk->addr.type) {
6501 	case BDADDR_LE_PUBLIC:
6502 		return true;
6503 
6504 	case BDADDR_LE_RANDOM:
6505 		/* Two most significant bits shall be set */
6506 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6507 			return false;
6508 		return true;
6509 	}
6510 
6511 	return false;
6512 }
6513 
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6514 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6515 		     u16 len)
6516 {
6517 	struct mgmt_cp_load_irks *cp = cp_data;
6518 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6519 				   sizeof(struct mgmt_irk_info));
6520 	u16 irk_count, expected_len;
6521 	int i, err;
6522 
6523 	bt_dev_dbg(hdev, "sock %p", sk);
6524 
6525 	if (!lmp_le_capable(hdev))
6526 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6527 				       MGMT_STATUS_NOT_SUPPORTED);
6528 
6529 	irk_count = __le16_to_cpu(cp->irk_count);
6530 	if (irk_count > max_irk_count) {
6531 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6532 			   irk_count);
6533 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6534 				       MGMT_STATUS_INVALID_PARAMS);
6535 	}
6536 
6537 	expected_len = struct_size(cp, irks, irk_count);
6538 	if (expected_len != len) {
6539 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6540 			   expected_len, len);
6541 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6542 				       MGMT_STATUS_INVALID_PARAMS);
6543 	}
6544 
6545 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
6546 
6547 	for (i = 0; i < irk_count; i++) {
6548 		struct mgmt_irk_info *key = &cp->irks[i];
6549 
6550 		if (!irk_is_valid(key))
6551 			return mgmt_cmd_status(sk, hdev->id,
6552 					       MGMT_OP_LOAD_IRKS,
6553 					       MGMT_STATUS_INVALID_PARAMS);
6554 	}
6555 
6556 	hci_dev_lock(hdev);
6557 
6558 	hci_smp_irks_clear(hdev);
6559 
6560 	for (i = 0; i < irk_count; i++) {
6561 		struct mgmt_irk_info *irk = &cp->irks[i];
6562 
6563 		if (hci_is_blocked_key(hdev,
6564 				       HCI_BLOCKED_KEY_TYPE_IRK,
6565 				       irk->val)) {
6566 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6567 				    &irk->addr.bdaddr);
6568 			continue;
6569 		}
6570 
6571 		hci_add_irk(hdev, &irk->addr.bdaddr,
6572 			    le_addr_type(irk->addr.type), irk->val,
6573 			    BDADDR_ANY);
6574 	}
6575 
6576 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6577 
6578 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6579 
6580 	hci_dev_unlock(hdev);
6581 
6582 	return err;
6583 }
6584 
ltk_is_valid(struct mgmt_ltk_info * key)6585 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6586 {
6587 	if (key->initiator != 0x00 && key->initiator != 0x01)
6588 		return false;
6589 
6590 	switch (key->addr.type) {
6591 	case BDADDR_LE_PUBLIC:
6592 		return true;
6593 
6594 	case BDADDR_LE_RANDOM:
6595 		/* Two most significant bits shall be set */
6596 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6597 			return false;
6598 		return true;
6599 	}
6600 
6601 	return false;
6602 }
6603 
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6604 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6605 			       void *cp_data, u16 len)
6606 {
6607 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
6608 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6609 				   sizeof(struct mgmt_ltk_info));
6610 	u16 key_count, expected_len;
6611 	int i, err;
6612 
6613 	bt_dev_dbg(hdev, "sock %p", sk);
6614 
6615 	if (!lmp_le_capable(hdev))
6616 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6617 				       MGMT_STATUS_NOT_SUPPORTED);
6618 
6619 	key_count = __le16_to_cpu(cp->key_count);
6620 	if (key_count > max_key_count) {
6621 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6622 			   key_count);
6623 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6624 				       MGMT_STATUS_INVALID_PARAMS);
6625 	}
6626 
6627 	expected_len = struct_size(cp, keys, key_count);
6628 	if (expected_len != len) {
6629 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6630 			   expected_len, len);
6631 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6632 				       MGMT_STATUS_INVALID_PARAMS);
6633 	}
6634 
6635 	bt_dev_dbg(hdev, "key_count %u", key_count);
6636 
6637 	for (i = 0; i < key_count; i++) {
6638 		struct mgmt_ltk_info *key = &cp->keys[i];
6639 
6640 		if (!ltk_is_valid(key))
6641 			return mgmt_cmd_status(sk, hdev->id,
6642 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
6643 					       MGMT_STATUS_INVALID_PARAMS);
6644 	}
6645 
6646 	hci_dev_lock(hdev);
6647 
6648 	hci_smp_ltks_clear(hdev);
6649 
6650 	for (i = 0; i < key_count; i++) {
6651 		struct mgmt_ltk_info *key = &cp->keys[i];
6652 		u8 type, authenticated;
6653 
6654 		if (hci_is_blocked_key(hdev,
6655 				       HCI_BLOCKED_KEY_TYPE_LTK,
6656 				       key->val)) {
6657 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6658 				    &key->addr.bdaddr);
6659 			continue;
6660 		}
6661 
6662 		switch (key->type) {
6663 		case MGMT_LTK_UNAUTHENTICATED:
6664 			authenticated = 0x00;
6665 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6666 			break;
6667 		case MGMT_LTK_AUTHENTICATED:
6668 			authenticated = 0x01;
6669 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6670 			break;
6671 		case MGMT_LTK_P256_UNAUTH:
6672 			authenticated = 0x00;
6673 			type = SMP_LTK_P256;
6674 			break;
6675 		case MGMT_LTK_P256_AUTH:
6676 			authenticated = 0x01;
6677 			type = SMP_LTK_P256;
6678 			break;
6679 		case MGMT_LTK_P256_DEBUG:
6680 			authenticated = 0x00;
6681 			type = SMP_LTK_P256_DEBUG;
6682 			fallthrough;
6683 		default:
6684 			continue;
6685 		}
6686 
6687 		hci_add_ltk(hdev, &key->addr.bdaddr,
6688 			    le_addr_type(key->addr.type), type, authenticated,
6689 			    key->val, key->enc_size, key->ediv, key->rand);
6690 	}
6691 
6692 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6693 			   NULL, 0);
6694 
6695 	hci_dev_unlock(hdev);
6696 
6697 	return err;
6698 }
6699 
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)6700 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6701 {
6702 	struct mgmt_pending_cmd *cmd = data;
6703 	struct hci_conn *conn = cmd->user_data;
6704 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6705 	struct mgmt_rp_get_conn_info rp;
6706 	u8 status;
6707 
6708 	bt_dev_dbg(hdev, "err %d", err);
6709 
6710 	memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6711 
6712 	status = mgmt_status(err);
6713 	if (status == MGMT_STATUS_SUCCESS) {
6714 		rp.rssi = conn->rssi;
6715 		rp.tx_power = conn->tx_power;
6716 		rp.max_tx_power = conn->max_tx_power;
6717 	} else {
6718 		rp.rssi = HCI_RSSI_INVALID;
6719 		rp.tx_power = HCI_TX_POWER_INVALID;
6720 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6721 	}
6722 
6723 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6724 			  &rp, sizeof(rp));
6725 
6726 	if (conn) {
6727 		hci_conn_drop(conn);
6728 		hci_conn_put(conn);
6729 	}
6730 
6731 	mgmt_pending_free(cmd);
6732 }
6733 
get_conn_info_sync(struct hci_dev * hdev,void * data)6734 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6735 {
6736 	struct mgmt_pending_cmd *cmd = data;
6737 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6738 	struct hci_conn *conn;
6739 	int err;
6740 	__le16   handle;
6741 
6742 	/* Make sure we are still connected */
6743 	if (cp->addr.type == BDADDR_BREDR)
6744 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6745 					       &cp->addr.bdaddr);
6746 	else
6747 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6748 
6749 	if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) {
6750 		if (cmd->user_data) {
6751 			hci_conn_drop(cmd->user_data);
6752 			hci_conn_put(cmd->user_data);
6753 			cmd->user_data = NULL;
6754 		}
6755 		return MGMT_STATUS_NOT_CONNECTED;
6756 	}
6757 
6758 	handle = cpu_to_le16(conn->handle);
6759 
6760 	/* Refresh RSSI each time */
6761 	err = hci_read_rssi_sync(hdev, handle);
6762 
6763 	/* For LE links TX power does not change thus we don't need to
6764 	 * query for it once value is known.
6765 	 */
6766 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6767 		     conn->tx_power == HCI_TX_POWER_INVALID))
6768 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
6769 
6770 	/* Max TX power needs to be read only once per connection */
6771 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6772 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
6773 
6774 	return err;
6775 }
6776 
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6777 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6778 			 u16 len)
6779 {
6780 	struct mgmt_cp_get_conn_info *cp = data;
6781 	struct mgmt_rp_get_conn_info rp;
6782 	struct hci_conn *conn;
6783 	unsigned long conn_info_age;
6784 	int err = 0;
6785 
6786 	bt_dev_dbg(hdev, "sock %p", sk);
6787 
6788 	memset(&rp, 0, sizeof(rp));
6789 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6790 	rp.addr.type = cp->addr.type;
6791 
6792 	if (!bdaddr_type_is_valid(cp->addr.type))
6793 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6794 					 MGMT_STATUS_INVALID_PARAMS,
6795 					 &rp, sizeof(rp));
6796 
6797 	hci_dev_lock(hdev);
6798 
6799 	if (!hdev_is_powered(hdev)) {
6800 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6801 					MGMT_STATUS_NOT_POWERED, &rp,
6802 					sizeof(rp));
6803 		goto unlock;
6804 	}
6805 
6806 	if (cp->addr.type == BDADDR_BREDR)
6807 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6808 					       &cp->addr.bdaddr);
6809 	else
6810 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6811 
6812 	if (!conn || conn->state != BT_CONNECTED) {
6813 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6814 					MGMT_STATUS_NOT_CONNECTED, &rp,
6815 					sizeof(rp));
6816 		goto unlock;
6817 	}
6818 
6819 	/* To avoid client trying to guess when to poll again for information we
6820 	 * calculate conn info age as random value between min/max set in hdev.
6821 	 */
6822 	conn_info_age = hdev->conn_info_min_age +
6823 			prandom_u32_max(hdev->conn_info_max_age -
6824 					hdev->conn_info_min_age);
6825 
6826 	/* Query controller to refresh cached values if they are too old or were
6827 	 * never read.
6828 	 */
6829 	if (time_after(jiffies, conn->conn_info_timestamp +
6830 		       msecs_to_jiffies(conn_info_age)) ||
6831 	    !conn->conn_info_timestamp) {
6832 		struct mgmt_pending_cmd *cmd;
6833 
6834 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6835 				       len);
6836 		if (!cmd) {
6837 			err = -ENOMEM;
6838 		} else {
6839 			hci_conn_hold(conn);
6840 			cmd->user_data = hci_conn_get(conn);
6841 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6842 						 cmd, get_conn_info_complete);
6843 		}
6844 
6845 		if (err < 0) {
6846 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6847 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
6848 
6849 			if (cmd)
6850 				mgmt_pending_free(cmd);
6851 
6852 			goto unlock;
6853 		}
6854 
6855 		conn->conn_info_timestamp = jiffies;
6856 	} else {
6857 		/* Cache is valid, just reply with values cached in hci_conn */
6858 		rp.rssi = conn->rssi;
6859 		rp.tx_power = conn->tx_power;
6860 		rp.max_tx_power = conn->max_tx_power;
6861 
6862 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6863 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6864 	}
6865 
6866 unlock:
6867 	hci_dev_unlock(hdev);
6868 	return err;
6869 }
6870 
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)6871 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6872 {
6873 	struct mgmt_pending_cmd *cmd = data;
6874 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6875 	struct mgmt_rp_get_clock_info rp;
6876 	struct hci_conn *conn = cmd->user_data;
6877 	u8 status = mgmt_status(err);
6878 
6879 	bt_dev_dbg(hdev, "err %d", err);
6880 
6881 	memset(&rp, 0, sizeof(rp));
6882 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6883 	rp.addr.type = cp->addr.type;
6884 
6885 	if (err)
6886 		goto complete;
6887 
6888 	rp.local_clock = cpu_to_le32(hdev->clock);
6889 
6890 	if (conn) {
6891 		rp.piconet_clock = cpu_to_le32(conn->clock);
6892 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6893 		hci_conn_drop(conn);
6894 		hci_conn_put(conn);
6895 	}
6896 
6897 complete:
6898 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6899 			  sizeof(rp));
6900 
6901 	mgmt_pending_free(cmd);
6902 }
6903 
get_clock_info_sync(struct hci_dev * hdev,void * data)6904 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6905 {
6906 	struct mgmt_pending_cmd *cmd = data;
6907 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6908 	struct hci_cp_read_clock hci_cp;
6909 	struct hci_conn *conn = cmd->user_data;
6910 	int err;
6911 
6912 	memset(&hci_cp, 0, sizeof(hci_cp));
6913 	err = hci_read_clock_sync(hdev, &hci_cp);
6914 
6915 	if (conn) {
6916 		/* Make sure connection still exists */
6917 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6918 					       &cp->addr.bdaddr);
6919 
6920 		if (conn && conn == cmd->user_data &&
6921 		    conn->state == BT_CONNECTED) {
6922 			hci_cp.handle = cpu_to_le16(conn->handle);
6923 			hci_cp.which = 0x01; /* Piconet clock */
6924 			err = hci_read_clock_sync(hdev, &hci_cp);
6925 		} else if (cmd->user_data) {
6926 			hci_conn_drop(cmd->user_data);
6927 			hci_conn_put(cmd->user_data);
6928 			cmd->user_data = NULL;
6929 		}
6930 	}
6931 
6932 	return err;
6933 }
6934 
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6935 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6936 								u16 len)
6937 {
6938 	struct mgmt_cp_get_clock_info *cp = data;
6939 	struct mgmt_rp_get_clock_info rp;
6940 	struct mgmt_pending_cmd *cmd;
6941 	struct hci_conn *conn;
6942 	int err;
6943 
6944 	bt_dev_dbg(hdev, "sock %p", sk);
6945 
6946 	memset(&rp, 0, sizeof(rp));
6947 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6948 	rp.addr.type = cp->addr.type;
6949 
6950 	if (cp->addr.type != BDADDR_BREDR)
6951 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6952 					 MGMT_STATUS_INVALID_PARAMS,
6953 					 &rp, sizeof(rp));
6954 
6955 	hci_dev_lock(hdev);
6956 
6957 	if (!hdev_is_powered(hdev)) {
6958 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6959 					MGMT_STATUS_NOT_POWERED, &rp,
6960 					sizeof(rp));
6961 		goto unlock;
6962 	}
6963 
6964 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6965 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6966 					       &cp->addr.bdaddr);
6967 		if (!conn || conn->state != BT_CONNECTED) {
6968 			err = mgmt_cmd_complete(sk, hdev->id,
6969 						MGMT_OP_GET_CLOCK_INFO,
6970 						MGMT_STATUS_NOT_CONNECTED,
6971 						&rp, sizeof(rp));
6972 			goto unlock;
6973 		}
6974 	} else {
6975 		conn = NULL;
6976 	}
6977 
6978 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6979 	if (!cmd)
6980 		err = -ENOMEM;
6981 	else
6982 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
6983 					 get_clock_info_complete);
6984 
6985 	if (err < 0) {
6986 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6987 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
6988 
6989 		if (cmd)
6990 			mgmt_pending_free(cmd);
6991 
6992 	} else if (conn) {
6993 		hci_conn_hold(conn);
6994 		cmd->user_data = hci_conn_get(conn);
6995 	}
6996 
6997 
6998 unlock:
6999 	hci_dev_unlock(hdev);
7000 	return err;
7001 }
7002 
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7003 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7004 {
7005 	struct hci_conn *conn;
7006 
7007 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7008 	if (!conn)
7009 		return false;
7010 
7011 	if (conn->dst_type != type)
7012 		return false;
7013 
7014 	if (conn->state != BT_CONNECTED)
7015 		return false;
7016 
7017 	return true;
7018 }
7019 
7020 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7021 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7022 			       u8 addr_type, u8 auto_connect)
7023 {
7024 	struct hci_conn_params *params;
7025 
7026 	params = hci_conn_params_add(hdev, addr, addr_type);
7027 	if (!params)
7028 		return -EIO;
7029 
7030 	if (params->auto_connect == auto_connect)
7031 		return 0;
7032 
7033 	list_del_init(&params->action);
7034 
7035 	switch (auto_connect) {
7036 	case HCI_AUTO_CONN_DISABLED:
7037 	case HCI_AUTO_CONN_LINK_LOSS:
7038 		/* If auto connect is being disabled when we're trying to
7039 		 * connect to device, keep connecting.
7040 		 */
7041 		if (params->explicit_connect)
7042 			list_add(&params->action, &hdev->pend_le_conns);
7043 		break;
7044 	case HCI_AUTO_CONN_REPORT:
7045 		if (params->explicit_connect)
7046 			list_add(&params->action, &hdev->pend_le_conns);
7047 		else
7048 			list_add(&params->action, &hdev->pend_le_reports);
7049 		break;
7050 	case HCI_AUTO_CONN_DIRECT:
7051 	case HCI_AUTO_CONN_ALWAYS:
7052 		if (!is_connected(hdev, addr, addr_type))
7053 			list_add(&params->action, &hdev->pend_le_conns);
7054 		break;
7055 	}
7056 
7057 	params->auto_connect = auto_connect;
7058 
7059 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7060 		   addr, addr_type, auto_connect);
7061 
7062 	return 0;
7063 }
7064 
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7065 static void device_added(struct sock *sk, struct hci_dev *hdev,
7066 			 bdaddr_t *bdaddr, u8 type, u8 action)
7067 {
7068 	struct mgmt_ev_device_added ev;
7069 
7070 	bacpy(&ev.addr.bdaddr, bdaddr);
7071 	ev.addr.type = type;
7072 	ev.action = action;
7073 
7074 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7075 }
7076 
add_device_sync(struct hci_dev * hdev,void * data)7077 static int add_device_sync(struct hci_dev *hdev, void *data)
7078 {
7079 	return hci_update_passive_scan_sync(hdev);
7080 }
7081 
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7082 static int add_device(struct sock *sk, struct hci_dev *hdev,
7083 		      void *data, u16 len)
7084 {
7085 	struct mgmt_cp_add_device *cp = data;
7086 	u8 auto_conn, addr_type;
7087 	struct hci_conn_params *params;
7088 	int err;
7089 	u32 current_flags = 0;
7090 	u32 supported_flags;
7091 
7092 	bt_dev_dbg(hdev, "sock %p", sk);
7093 
7094 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7095 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7096 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7097 					 MGMT_STATUS_INVALID_PARAMS,
7098 					 &cp->addr, sizeof(cp->addr));
7099 
7100 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7101 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7102 					 MGMT_STATUS_INVALID_PARAMS,
7103 					 &cp->addr, sizeof(cp->addr));
7104 
7105 	hci_dev_lock(hdev);
7106 
7107 	if (cp->addr.type == BDADDR_BREDR) {
7108 		/* Only incoming connections action is supported for now */
7109 		if (cp->action != 0x01) {
7110 			err = mgmt_cmd_complete(sk, hdev->id,
7111 						MGMT_OP_ADD_DEVICE,
7112 						MGMT_STATUS_INVALID_PARAMS,
7113 						&cp->addr, sizeof(cp->addr));
7114 			goto unlock;
7115 		}
7116 
7117 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7118 						     &cp->addr.bdaddr,
7119 						     cp->addr.type, 0);
7120 		if (err)
7121 			goto unlock;
7122 
7123 		hci_req_update_scan(hdev);
7124 
7125 		goto added;
7126 	}
7127 
7128 	addr_type = le_addr_type(cp->addr.type);
7129 
7130 	if (cp->action == 0x02)
7131 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7132 	else if (cp->action == 0x01)
7133 		auto_conn = HCI_AUTO_CONN_DIRECT;
7134 	else
7135 		auto_conn = HCI_AUTO_CONN_REPORT;
7136 
7137 	/* Kernel internally uses conn_params with resolvable private
7138 	 * address, but Add Device allows only identity addresses.
7139 	 * Make sure it is enforced before calling
7140 	 * hci_conn_params_lookup.
7141 	 */
7142 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7143 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7144 					MGMT_STATUS_INVALID_PARAMS,
7145 					&cp->addr, sizeof(cp->addr));
7146 		goto unlock;
7147 	}
7148 
7149 	/* If the connection parameters don't exist for this device,
7150 	 * they will be created and configured with defaults.
7151 	 */
7152 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7153 				auto_conn) < 0) {
7154 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7155 					MGMT_STATUS_FAILED, &cp->addr,
7156 					sizeof(cp->addr));
7157 		goto unlock;
7158 	} else {
7159 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7160 						addr_type);
7161 		if (params)
7162 			current_flags = params->flags;
7163 	}
7164 
7165 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7166 	if (err < 0)
7167 		goto unlock;
7168 
7169 added:
7170 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7171 	supported_flags = hdev->conn_flags;
7172 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7173 			     supported_flags, current_flags);
7174 
7175 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7176 				MGMT_STATUS_SUCCESS, &cp->addr,
7177 				sizeof(cp->addr));
7178 
7179 unlock:
7180 	hci_dev_unlock(hdev);
7181 	return err;
7182 }
7183 
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7184 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7185 			   bdaddr_t *bdaddr, u8 type)
7186 {
7187 	struct mgmt_ev_device_removed ev;
7188 
7189 	bacpy(&ev.addr.bdaddr, bdaddr);
7190 	ev.addr.type = type;
7191 
7192 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7193 }
7194 
remove_device_sync(struct hci_dev * hdev,void * data)7195 static int remove_device_sync(struct hci_dev *hdev, void *data)
7196 {
7197 	return hci_update_passive_scan_sync(hdev);
7198 }
7199 
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7200 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7201 			 void *data, u16 len)
7202 {
7203 	struct mgmt_cp_remove_device *cp = data;
7204 	int err;
7205 
7206 	bt_dev_dbg(hdev, "sock %p", sk);
7207 
7208 	hci_dev_lock(hdev);
7209 
7210 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7211 		struct hci_conn_params *params;
7212 		u8 addr_type;
7213 
7214 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7215 			err = mgmt_cmd_complete(sk, hdev->id,
7216 						MGMT_OP_REMOVE_DEVICE,
7217 						MGMT_STATUS_INVALID_PARAMS,
7218 						&cp->addr, sizeof(cp->addr));
7219 			goto unlock;
7220 		}
7221 
7222 		if (cp->addr.type == BDADDR_BREDR) {
7223 			err = hci_bdaddr_list_del(&hdev->accept_list,
7224 						  &cp->addr.bdaddr,
7225 						  cp->addr.type);
7226 			if (err) {
7227 				err = mgmt_cmd_complete(sk, hdev->id,
7228 							MGMT_OP_REMOVE_DEVICE,
7229 							MGMT_STATUS_INVALID_PARAMS,
7230 							&cp->addr,
7231 							sizeof(cp->addr));
7232 				goto unlock;
7233 			}
7234 
7235 			hci_req_update_scan(hdev);
7236 
7237 			device_removed(sk, hdev, &cp->addr.bdaddr,
7238 				       cp->addr.type);
7239 			goto complete;
7240 		}
7241 
7242 		addr_type = le_addr_type(cp->addr.type);
7243 
7244 		/* Kernel internally uses conn_params with resolvable private
7245 		 * address, but Remove Device allows only identity addresses.
7246 		 * Make sure it is enforced before calling
7247 		 * hci_conn_params_lookup.
7248 		 */
7249 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7250 			err = mgmt_cmd_complete(sk, hdev->id,
7251 						MGMT_OP_REMOVE_DEVICE,
7252 						MGMT_STATUS_INVALID_PARAMS,
7253 						&cp->addr, sizeof(cp->addr));
7254 			goto unlock;
7255 		}
7256 
7257 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7258 						addr_type);
7259 		if (!params) {
7260 			err = mgmt_cmd_complete(sk, hdev->id,
7261 						MGMT_OP_REMOVE_DEVICE,
7262 						MGMT_STATUS_INVALID_PARAMS,
7263 						&cp->addr, sizeof(cp->addr));
7264 			goto unlock;
7265 		}
7266 
7267 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7268 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7269 			err = mgmt_cmd_complete(sk, hdev->id,
7270 						MGMT_OP_REMOVE_DEVICE,
7271 						MGMT_STATUS_INVALID_PARAMS,
7272 						&cp->addr, sizeof(cp->addr));
7273 			goto unlock;
7274 		}
7275 
7276 		list_del(&params->action);
7277 		list_del(&params->list);
7278 		kfree(params);
7279 
7280 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7281 	} else {
7282 		struct hci_conn_params *p, *tmp;
7283 		struct bdaddr_list *b, *btmp;
7284 
7285 		if (cp->addr.type) {
7286 			err = mgmt_cmd_complete(sk, hdev->id,
7287 						MGMT_OP_REMOVE_DEVICE,
7288 						MGMT_STATUS_INVALID_PARAMS,
7289 						&cp->addr, sizeof(cp->addr));
7290 			goto unlock;
7291 		}
7292 
7293 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7294 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7295 			list_del(&b->list);
7296 			kfree(b);
7297 		}
7298 
7299 		hci_req_update_scan(hdev);
7300 
7301 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7302 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7303 				continue;
7304 			device_removed(sk, hdev, &p->addr, p->addr_type);
7305 			if (p->explicit_connect) {
7306 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7307 				continue;
7308 			}
7309 			list_del(&p->action);
7310 			list_del(&p->list);
7311 			kfree(p);
7312 		}
7313 
7314 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7315 	}
7316 
7317 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7318 
7319 complete:
7320 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7321 				MGMT_STATUS_SUCCESS, &cp->addr,
7322 				sizeof(cp->addr));
7323 unlock:
7324 	hci_dev_unlock(hdev);
7325 	return err;
7326 }
7327 
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7328 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7329 			   u16 len)
7330 {
7331 	struct mgmt_cp_load_conn_param *cp = data;
7332 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7333 				     sizeof(struct mgmt_conn_param));
7334 	u16 param_count, expected_len;
7335 	int i;
7336 
7337 	if (!lmp_le_capable(hdev))
7338 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7339 				       MGMT_STATUS_NOT_SUPPORTED);
7340 
7341 	param_count = __le16_to_cpu(cp->param_count);
7342 	if (param_count > max_param_count) {
7343 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7344 			   param_count);
7345 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7346 				       MGMT_STATUS_INVALID_PARAMS);
7347 	}
7348 
7349 	expected_len = struct_size(cp, params, param_count);
7350 	if (expected_len != len) {
7351 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7352 			   expected_len, len);
7353 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7354 				       MGMT_STATUS_INVALID_PARAMS);
7355 	}
7356 
7357 	bt_dev_dbg(hdev, "param_count %u", param_count);
7358 
7359 	hci_dev_lock(hdev);
7360 
7361 	hci_conn_params_clear_disabled(hdev);
7362 
7363 	for (i = 0; i < param_count; i++) {
7364 		struct mgmt_conn_param *param = &cp->params[i];
7365 		struct hci_conn_params *hci_param;
7366 		u16 min, max, latency, timeout;
7367 		u8 addr_type;
7368 
7369 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7370 			   param->addr.type);
7371 
7372 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7373 			addr_type = ADDR_LE_DEV_PUBLIC;
7374 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7375 			addr_type = ADDR_LE_DEV_RANDOM;
7376 		} else {
7377 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7378 			continue;
7379 		}
7380 
7381 		min = le16_to_cpu(param->min_interval);
7382 		max = le16_to_cpu(param->max_interval);
7383 		latency = le16_to_cpu(param->latency);
7384 		timeout = le16_to_cpu(param->timeout);
7385 
7386 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7387 			   min, max, latency, timeout);
7388 
7389 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7390 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7391 			continue;
7392 		}
7393 
7394 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7395 						addr_type);
7396 		if (!hci_param) {
7397 			bt_dev_err(hdev, "failed to add connection parameters");
7398 			continue;
7399 		}
7400 
7401 		hci_param->conn_min_interval = min;
7402 		hci_param->conn_max_interval = max;
7403 		hci_param->conn_latency = latency;
7404 		hci_param->supervision_timeout = timeout;
7405 	}
7406 
7407 	hci_dev_unlock(hdev);
7408 
7409 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7410 				 NULL, 0);
7411 }
7412 
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7413 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7414 			       void *data, u16 len)
7415 {
7416 	struct mgmt_cp_set_external_config *cp = data;
7417 	bool changed;
7418 	int err;
7419 
7420 	bt_dev_dbg(hdev, "sock %p", sk);
7421 
7422 	if (hdev_is_powered(hdev))
7423 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7424 				       MGMT_STATUS_REJECTED);
7425 
7426 	if (cp->config != 0x00 && cp->config != 0x01)
7427 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7428 				         MGMT_STATUS_INVALID_PARAMS);
7429 
7430 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7431 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7432 				       MGMT_STATUS_NOT_SUPPORTED);
7433 
7434 	hci_dev_lock(hdev);
7435 
7436 	if (cp->config)
7437 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7438 	else
7439 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7440 
7441 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7442 	if (err < 0)
7443 		goto unlock;
7444 
7445 	if (!changed)
7446 		goto unlock;
7447 
7448 	err = new_options(hdev, sk);
7449 
7450 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7451 		mgmt_index_removed(hdev);
7452 
7453 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7454 			hci_dev_set_flag(hdev, HCI_CONFIG);
7455 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7456 
7457 			queue_work(hdev->req_workqueue, &hdev->power_on);
7458 		} else {
7459 			set_bit(HCI_RAW, &hdev->flags);
7460 			mgmt_index_added(hdev);
7461 		}
7462 	}
7463 
7464 unlock:
7465 	hci_dev_unlock(hdev);
7466 	return err;
7467 }
7468 
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7469 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7470 			      void *data, u16 len)
7471 {
7472 	struct mgmt_cp_set_public_address *cp = data;
7473 	bool changed;
7474 	int err;
7475 
7476 	bt_dev_dbg(hdev, "sock %p", sk);
7477 
7478 	if (hdev_is_powered(hdev))
7479 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7480 				       MGMT_STATUS_REJECTED);
7481 
7482 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7483 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7484 				       MGMT_STATUS_INVALID_PARAMS);
7485 
7486 	if (!hdev->set_bdaddr)
7487 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7488 				       MGMT_STATUS_NOT_SUPPORTED);
7489 
7490 	hci_dev_lock(hdev);
7491 
7492 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7493 	bacpy(&hdev->public_addr, &cp->bdaddr);
7494 
7495 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7496 	if (err < 0)
7497 		goto unlock;
7498 
7499 	if (!changed)
7500 		goto unlock;
7501 
7502 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7503 		err = new_options(hdev, sk);
7504 
7505 	if (is_configured(hdev)) {
7506 		mgmt_index_removed(hdev);
7507 
7508 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7509 
7510 		hci_dev_set_flag(hdev, HCI_CONFIG);
7511 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7512 
7513 		queue_work(hdev->req_workqueue, &hdev->power_on);
7514 	}
7515 
7516 unlock:
7517 	hci_dev_unlock(hdev);
7518 	return err;
7519 }
7520 
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)7521 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7522 					     int err)
7523 {
7524 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7525 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7526 	u8 *h192, *r192, *h256, *r256;
7527 	struct mgmt_pending_cmd *cmd = data;
7528 	struct sk_buff *skb = cmd->skb;
7529 	u8 status = mgmt_status(err);
7530 	u16 eir_len;
7531 
7532 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
7533 		return;
7534 
7535 	if (!status) {
7536 		if (!skb)
7537 			status = MGMT_STATUS_FAILED;
7538 		else if (IS_ERR(skb))
7539 			status = mgmt_status(PTR_ERR(skb));
7540 		else
7541 			status = mgmt_status(skb->data[0]);
7542 	}
7543 
7544 	bt_dev_dbg(hdev, "status %u", status);
7545 
7546 	mgmt_cp = cmd->param;
7547 
7548 	if (status) {
7549 		status = mgmt_status(status);
7550 		eir_len = 0;
7551 
7552 		h192 = NULL;
7553 		r192 = NULL;
7554 		h256 = NULL;
7555 		r256 = NULL;
7556 	} else if (!bredr_sc_enabled(hdev)) {
7557 		struct hci_rp_read_local_oob_data *rp;
7558 
7559 		if (skb->len != sizeof(*rp)) {
7560 			status = MGMT_STATUS_FAILED;
7561 			eir_len = 0;
7562 		} else {
7563 			status = MGMT_STATUS_SUCCESS;
7564 			rp = (void *)skb->data;
7565 
7566 			eir_len = 5 + 18 + 18;
7567 			h192 = rp->hash;
7568 			r192 = rp->rand;
7569 			h256 = NULL;
7570 			r256 = NULL;
7571 		}
7572 	} else {
7573 		struct hci_rp_read_local_oob_ext_data *rp;
7574 
7575 		if (skb->len != sizeof(*rp)) {
7576 			status = MGMT_STATUS_FAILED;
7577 			eir_len = 0;
7578 		} else {
7579 			status = MGMT_STATUS_SUCCESS;
7580 			rp = (void *)skb->data;
7581 
7582 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7583 				eir_len = 5 + 18 + 18;
7584 				h192 = NULL;
7585 				r192 = NULL;
7586 			} else {
7587 				eir_len = 5 + 18 + 18 + 18 + 18;
7588 				h192 = rp->hash192;
7589 				r192 = rp->rand192;
7590 			}
7591 
7592 			h256 = rp->hash256;
7593 			r256 = rp->rand256;
7594 		}
7595 	}
7596 
7597 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7598 	if (!mgmt_rp)
7599 		goto done;
7600 
7601 	if (eir_len == 0)
7602 		goto send_rsp;
7603 
7604 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7605 				  hdev->dev_class, 3);
7606 
7607 	if (h192 && r192) {
7608 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7609 					  EIR_SSP_HASH_C192, h192, 16);
7610 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7611 					  EIR_SSP_RAND_R192, r192, 16);
7612 	}
7613 
7614 	if (h256 && r256) {
7615 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7616 					  EIR_SSP_HASH_C256, h256, 16);
7617 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7618 					  EIR_SSP_RAND_R256, r256, 16);
7619 	}
7620 
7621 send_rsp:
7622 	mgmt_rp->type = mgmt_cp->type;
7623 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7624 
7625 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7626 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7627 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7628 	if (err < 0 || status)
7629 		goto done;
7630 
7631 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7632 
7633 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7634 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7635 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7636 done:
7637 	if (skb && !IS_ERR(skb))
7638 		kfree_skb(skb);
7639 
7640 	kfree(mgmt_rp);
7641 	mgmt_pending_remove(cmd);
7642 }
7643 
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)7644 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7645 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7646 {
7647 	struct mgmt_pending_cmd *cmd;
7648 	int err;
7649 
7650 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7651 			       cp, sizeof(*cp));
7652 	if (!cmd)
7653 		return -ENOMEM;
7654 
7655 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
7656 				 read_local_oob_ext_data_complete);
7657 
7658 	if (err < 0) {
7659 		mgmt_pending_remove(cmd);
7660 		return err;
7661 	}
7662 
7663 	return 0;
7664 }
7665 
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7666 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7667 				   void *data, u16 data_len)
7668 {
7669 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7670 	struct mgmt_rp_read_local_oob_ext_data *rp;
7671 	size_t rp_len;
7672 	u16 eir_len;
7673 	u8 status, flags, role, addr[7], hash[16], rand[16];
7674 	int err;
7675 
7676 	bt_dev_dbg(hdev, "sock %p", sk);
7677 
7678 	if (hdev_is_powered(hdev)) {
7679 		switch (cp->type) {
7680 		case BIT(BDADDR_BREDR):
7681 			status = mgmt_bredr_support(hdev);
7682 			if (status)
7683 				eir_len = 0;
7684 			else
7685 				eir_len = 5;
7686 			break;
7687 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7688 			status = mgmt_le_support(hdev);
7689 			if (status)
7690 				eir_len = 0;
7691 			else
7692 				eir_len = 9 + 3 + 18 + 18 + 3;
7693 			break;
7694 		default:
7695 			status = MGMT_STATUS_INVALID_PARAMS;
7696 			eir_len = 0;
7697 			break;
7698 		}
7699 	} else {
7700 		status = MGMT_STATUS_NOT_POWERED;
7701 		eir_len = 0;
7702 	}
7703 
7704 	rp_len = sizeof(*rp) + eir_len;
7705 	rp = kmalloc(rp_len, GFP_ATOMIC);
7706 	if (!rp)
7707 		return -ENOMEM;
7708 
7709 	if (!status && !lmp_ssp_capable(hdev)) {
7710 		status = MGMT_STATUS_NOT_SUPPORTED;
7711 		eir_len = 0;
7712 	}
7713 
7714 	if (status)
7715 		goto complete;
7716 
7717 	hci_dev_lock(hdev);
7718 
7719 	eir_len = 0;
7720 	switch (cp->type) {
7721 	case BIT(BDADDR_BREDR):
7722 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7723 			err = read_local_ssp_oob_req(hdev, sk, cp);
7724 			hci_dev_unlock(hdev);
7725 			if (!err)
7726 				goto done;
7727 
7728 			status = MGMT_STATUS_FAILED;
7729 			goto complete;
7730 		} else {
7731 			eir_len = eir_append_data(rp->eir, eir_len,
7732 						  EIR_CLASS_OF_DEV,
7733 						  hdev->dev_class, 3);
7734 		}
7735 		break;
7736 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7737 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7738 		    smp_generate_oob(hdev, hash, rand) < 0) {
7739 			hci_dev_unlock(hdev);
7740 			status = MGMT_STATUS_FAILED;
7741 			goto complete;
7742 		}
7743 
7744 		/* This should return the active RPA, but since the RPA
7745 		 * is only programmed on demand, it is really hard to fill
7746 		 * this in at the moment. For now disallow retrieving
7747 		 * local out-of-band data when privacy is in use.
7748 		 *
7749 		 * Returning the identity address will not help here since
7750 		 * pairing happens before the identity resolving key is
7751 		 * known and thus the connection establishment happens
7752 		 * based on the RPA and not the identity address.
7753 		 */
7754 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7755 			hci_dev_unlock(hdev);
7756 			status = MGMT_STATUS_REJECTED;
7757 			goto complete;
7758 		}
7759 
7760 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7761 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7762 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7763 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7764 			memcpy(addr, &hdev->static_addr, 6);
7765 			addr[6] = 0x01;
7766 		} else {
7767 			memcpy(addr, &hdev->bdaddr, 6);
7768 			addr[6] = 0x00;
7769 		}
7770 
7771 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7772 					  addr, sizeof(addr));
7773 
7774 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7775 			role = 0x02;
7776 		else
7777 			role = 0x01;
7778 
7779 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7780 					  &role, sizeof(role));
7781 
7782 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7783 			eir_len = eir_append_data(rp->eir, eir_len,
7784 						  EIR_LE_SC_CONFIRM,
7785 						  hash, sizeof(hash));
7786 
7787 			eir_len = eir_append_data(rp->eir, eir_len,
7788 						  EIR_LE_SC_RANDOM,
7789 						  rand, sizeof(rand));
7790 		}
7791 
7792 		flags = mgmt_get_adv_discov_flags(hdev);
7793 
7794 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7795 			flags |= LE_AD_NO_BREDR;
7796 
7797 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7798 					  &flags, sizeof(flags));
7799 		break;
7800 	}
7801 
7802 	hci_dev_unlock(hdev);
7803 
7804 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7805 
7806 	status = MGMT_STATUS_SUCCESS;
7807 
7808 complete:
7809 	rp->type = cp->type;
7810 	rp->eir_len = cpu_to_le16(eir_len);
7811 
7812 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7813 				status, rp, sizeof(*rp) + eir_len);
7814 	if (err < 0 || status)
7815 		goto done;
7816 
7817 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7818 				 rp, sizeof(*rp) + eir_len,
7819 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7820 
7821 done:
7822 	kfree(rp);
7823 
7824 	return err;
7825 }
7826 
get_supported_adv_flags(struct hci_dev * hdev)7827 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7828 {
7829 	u32 flags = 0;
7830 
7831 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7832 	flags |= MGMT_ADV_FLAG_DISCOV;
7833 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7834 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7835 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7836 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7837 	flags |= MGMT_ADV_PARAM_DURATION;
7838 	flags |= MGMT_ADV_PARAM_TIMEOUT;
7839 	flags |= MGMT_ADV_PARAM_INTERVALS;
7840 	flags |= MGMT_ADV_PARAM_TX_POWER;
7841 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
7842 
7843 	/* In extended adv TX_POWER returned from Set Adv Param
7844 	 * will be always valid.
7845 	 */
7846 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7847 	    ext_adv_capable(hdev))
7848 		flags |= MGMT_ADV_FLAG_TX_POWER;
7849 
7850 	if (ext_adv_capable(hdev)) {
7851 		flags |= MGMT_ADV_FLAG_SEC_1M;
7852 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7853 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7854 
7855 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7856 			flags |= MGMT_ADV_FLAG_SEC_2M;
7857 
7858 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7859 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7860 	}
7861 
7862 	return flags;
7863 }
7864 
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7865 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7866 			     void *data, u16 data_len)
7867 {
7868 	struct mgmt_rp_read_adv_features *rp;
7869 	size_t rp_len;
7870 	int err;
7871 	struct adv_info *adv_instance;
7872 	u32 supported_flags;
7873 	u8 *instance;
7874 
7875 	bt_dev_dbg(hdev, "sock %p", sk);
7876 
7877 	if (!lmp_le_capable(hdev))
7878 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7879 				       MGMT_STATUS_REJECTED);
7880 
7881 	hci_dev_lock(hdev);
7882 
7883 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7884 	rp = kmalloc(rp_len, GFP_ATOMIC);
7885 	if (!rp) {
7886 		hci_dev_unlock(hdev);
7887 		return -ENOMEM;
7888 	}
7889 
7890 	supported_flags = get_supported_adv_flags(hdev);
7891 
7892 	rp->supported_flags = cpu_to_le32(supported_flags);
7893 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7894 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7895 	rp->max_instances = hdev->le_num_of_adv_sets;
7896 	rp->num_instances = hdev->adv_instance_cnt;
7897 
7898 	instance = rp->instance;
7899 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7900 		*instance = adv_instance->instance;
7901 		instance++;
7902 	}
7903 
7904 	hci_dev_unlock(hdev);
7905 
7906 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7907 				MGMT_STATUS_SUCCESS, rp, rp_len);
7908 
7909 	kfree(rp);
7910 
7911 	return err;
7912 }
7913 
calculate_name_len(struct hci_dev * hdev)7914 static u8 calculate_name_len(struct hci_dev *hdev)
7915 {
7916 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7917 
7918 	return eir_append_local_name(hdev, buf, 0);
7919 }
7920 
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)7921 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7922 			   bool is_adv_data)
7923 {
7924 	u8 max_len = HCI_MAX_AD_LENGTH;
7925 
7926 	if (is_adv_data) {
7927 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7928 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7929 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7930 			max_len -= 3;
7931 
7932 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7933 			max_len -= 3;
7934 	} else {
7935 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7936 			max_len -= calculate_name_len(hdev);
7937 
7938 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7939 			max_len -= 4;
7940 	}
7941 
7942 	return max_len;
7943 }
7944 
flags_managed(u32 adv_flags)7945 static bool flags_managed(u32 adv_flags)
7946 {
7947 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7948 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7949 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7950 }
7951 
tx_power_managed(u32 adv_flags)7952 static bool tx_power_managed(u32 adv_flags)
7953 {
7954 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7955 }
7956 
name_managed(u32 adv_flags)7957 static bool name_managed(u32 adv_flags)
7958 {
7959 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7960 }
7961 
appearance_managed(u32 adv_flags)7962 static bool appearance_managed(u32 adv_flags)
7963 {
7964 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7965 }
7966 
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)7967 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7968 			      u8 len, bool is_adv_data)
7969 {
7970 	int i, cur_len;
7971 	u8 max_len;
7972 
7973 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7974 
7975 	if (len > max_len)
7976 		return false;
7977 
7978 	/* Make sure that the data is correctly formatted. */
7979 	for (i = 0; i < len; i += (cur_len + 1)) {
7980 		cur_len = data[i];
7981 
7982 		if (!cur_len)
7983 			continue;
7984 
7985 		if (data[i + 1] == EIR_FLAGS &&
7986 		    (!is_adv_data || flags_managed(adv_flags)))
7987 			return false;
7988 
7989 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7990 			return false;
7991 
7992 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7993 			return false;
7994 
7995 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7996 			return false;
7997 
7998 		if (data[i + 1] == EIR_APPEARANCE &&
7999 		    appearance_managed(adv_flags))
8000 			return false;
8001 
8002 		/* If the current field length would exceed the total data
8003 		 * length, then it's invalid.
8004 		 */
8005 		if (i + cur_len >= len)
8006 			return false;
8007 	}
8008 
8009 	return true;
8010 }
8011 
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8012 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8013 {
8014 	u32 supported_flags, phy_flags;
8015 
8016 	/* The current implementation only supports a subset of the specified
8017 	 * flags. Also need to check mutual exclusiveness of sec flags.
8018 	 */
8019 	supported_flags = get_supported_adv_flags(hdev);
8020 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8021 	if (adv_flags & ~supported_flags ||
8022 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8023 		return false;
8024 
8025 	return true;
8026 }
8027 
adv_busy(struct hci_dev * hdev)8028 static bool adv_busy(struct hci_dev *hdev)
8029 {
8030 	return pending_find(MGMT_OP_SET_LE, hdev);
8031 }
8032 
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8033 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8034 			     int err)
8035 {
8036 	struct adv_info *adv, *n;
8037 
8038 	bt_dev_dbg(hdev, "err %d", err);
8039 
8040 	hci_dev_lock(hdev);
8041 
8042 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8043 		u8 instance;
8044 
8045 		if (!adv->pending)
8046 			continue;
8047 
8048 		if (!err) {
8049 			adv->pending = false;
8050 			continue;
8051 		}
8052 
8053 		instance = adv->instance;
8054 
8055 		if (hdev->cur_adv_instance == instance)
8056 			cancel_adv_timeout(hdev);
8057 
8058 		hci_remove_adv_instance(hdev, instance);
8059 		mgmt_advertising_removed(sk, hdev, instance);
8060 	}
8061 
8062 	hci_dev_unlock(hdev);
8063 }
8064 
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8065 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8066 {
8067 	struct mgmt_pending_cmd *cmd = data;
8068 	struct mgmt_cp_add_advertising *cp = cmd->param;
8069 	struct mgmt_rp_add_advertising rp;
8070 
8071 	memset(&rp, 0, sizeof(rp));
8072 
8073 	rp.instance = cp->instance;
8074 
8075 	if (err)
8076 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8077 				mgmt_status(err));
8078 	else
8079 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8080 				  mgmt_status(err), &rp, sizeof(rp));
8081 
8082 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8083 
8084 	mgmt_pending_free(cmd);
8085 }
8086 
add_advertising_sync(struct hci_dev * hdev,void * data)8087 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8088 {
8089 	struct mgmt_pending_cmd *cmd = data;
8090 	struct mgmt_cp_add_advertising *cp = cmd->param;
8091 
8092 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8093 }
8094 
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8095 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8096 			   void *data, u16 data_len)
8097 {
8098 	struct mgmt_cp_add_advertising *cp = data;
8099 	struct mgmt_rp_add_advertising rp;
8100 	u32 flags;
8101 	u8 status;
8102 	u16 timeout, duration;
8103 	unsigned int prev_instance_cnt;
8104 	u8 schedule_instance = 0;
8105 	struct adv_info *next_instance;
8106 	int err;
8107 	struct mgmt_pending_cmd *cmd;
8108 
8109 	bt_dev_dbg(hdev, "sock %p", sk);
8110 
8111 	status = mgmt_le_support(hdev);
8112 	if (status)
8113 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8114 				       status);
8115 
8116 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8117 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8118 				       MGMT_STATUS_INVALID_PARAMS);
8119 
8120 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8121 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8122 				       MGMT_STATUS_INVALID_PARAMS);
8123 
8124 	flags = __le32_to_cpu(cp->flags);
8125 	timeout = __le16_to_cpu(cp->timeout);
8126 	duration = __le16_to_cpu(cp->duration);
8127 
8128 	if (!requested_adv_flags_are_valid(hdev, flags))
8129 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8130 				       MGMT_STATUS_INVALID_PARAMS);
8131 
8132 	hci_dev_lock(hdev);
8133 
8134 	if (timeout && !hdev_is_powered(hdev)) {
8135 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8136 				      MGMT_STATUS_REJECTED);
8137 		goto unlock;
8138 	}
8139 
8140 	if (adv_busy(hdev)) {
8141 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8142 				      MGMT_STATUS_BUSY);
8143 		goto unlock;
8144 	}
8145 
8146 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8147 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8148 			       cp->scan_rsp_len, false)) {
8149 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8150 				      MGMT_STATUS_INVALID_PARAMS);
8151 		goto unlock;
8152 	}
8153 
8154 	prev_instance_cnt = hdev->adv_instance_cnt;
8155 
8156 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8157 				   cp->adv_data_len, cp->data,
8158 				   cp->scan_rsp_len,
8159 				   cp->data + cp->adv_data_len,
8160 				   timeout, duration,
8161 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8162 				   hdev->le_adv_min_interval,
8163 				   hdev->le_adv_max_interval);
8164 	if (err < 0) {
8165 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8166 				      MGMT_STATUS_FAILED);
8167 		goto unlock;
8168 	}
8169 
8170 	/* Only trigger an advertising added event if a new instance was
8171 	 * actually added.
8172 	 */
8173 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8174 		mgmt_advertising_added(sk, hdev, cp->instance);
8175 
8176 	if (hdev->cur_adv_instance == cp->instance) {
8177 		/* If the currently advertised instance is being changed then
8178 		 * cancel the current advertising and schedule the next
8179 		 * instance. If there is only one instance then the overridden
8180 		 * advertising data will be visible right away.
8181 		 */
8182 		cancel_adv_timeout(hdev);
8183 
8184 		next_instance = hci_get_next_instance(hdev, cp->instance);
8185 		if (next_instance)
8186 			schedule_instance = next_instance->instance;
8187 	} else if (!hdev->adv_instance_timeout) {
8188 		/* Immediately advertise the new instance if no other
8189 		 * instance is currently being advertised.
8190 		 */
8191 		schedule_instance = cp->instance;
8192 	}
8193 
8194 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8195 	 * there is no instance to be advertised then we have no HCI
8196 	 * communication to make. Simply return.
8197 	 */
8198 	if (!hdev_is_powered(hdev) ||
8199 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8200 	    !schedule_instance) {
8201 		rp.instance = cp->instance;
8202 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8203 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8204 		goto unlock;
8205 	}
8206 
8207 	/* We're good to go, update advertising data, parameters, and start
8208 	 * advertising.
8209 	 */
8210 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8211 			       data_len);
8212 	if (!cmd) {
8213 		err = -ENOMEM;
8214 		goto unlock;
8215 	}
8216 
8217 	cp->instance = schedule_instance;
8218 
8219 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8220 				 add_advertising_complete);
8221 	if (err < 0)
8222 		mgmt_pending_free(cmd);
8223 
8224 unlock:
8225 	hci_dev_unlock(hdev);
8226 
8227 	return err;
8228 }
8229 
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8230 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8231 					int err)
8232 {
8233 	struct mgmt_pending_cmd *cmd = data;
8234 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8235 	struct mgmt_rp_add_ext_adv_params rp;
8236 	struct adv_info *adv;
8237 	u32 flags;
8238 
8239 	BT_DBG("%s", hdev->name);
8240 
8241 	hci_dev_lock(hdev);
8242 
8243 	adv = hci_find_adv_instance(hdev, cp->instance);
8244 	if (!adv)
8245 		goto unlock;
8246 
8247 	rp.instance = cp->instance;
8248 	rp.tx_power = adv->tx_power;
8249 
8250 	/* While we're at it, inform userspace of the available space for this
8251 	 * advertisement, given the flags that will be used.
8252 	 */
8253 	flags = __le32_to_cpu(cp->flags);
8254 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8255 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8256 
8257 	if (err) {
8258 		/* If this advertisement was previously advertising and we
8259 		 * failed to update it, we signal that it has been removed and
8260 		 * delete its structure
8261 		 */
8262 		if (!adv->pending)
8263 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8264 
8265 		hci_remove_adv_instance(hdev, cp->instance);
8266 
8267 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8268 				mgmt_status(err));
8269 	} else {
8270 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8271 				  mgmt_status(err), &rp, sizeof(rp));
8272 	}
8273 
8274 unlock:
8275 	if (cmd)
8276 		mgmt_pending_free(cmd);
8277 
8278 	hci_dev_unlock(hdev);
8279 }
8280 
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8281 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8282 {
8283 	struct mgmt_pending_cmd *cmd = data;
8284 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8285 
8286 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8287 }
8288 
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8289 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8290 			      void *data, u16 data_len)
8291 {
8292 	struct mgmt_cp_add_ext_adv_params *cp = data;
8293 	struct mgmt_rp_add_ext_adv_params rp;
8294 	struct mgmt_pending_cmd *cmd = NULL;
8295 	u32 flags, min_interval, max_interval;
8296 	u16 timeout, duration;
8297 	u8 status;
8298 	s8 tx_power;
8299 	int err;
8300 
8301 	BT_DBG("%s", hdev->name);
8302 
8303 	status = mgmt_le_support(hdev);
8304 	if (status)
8305 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8306 				       status);
8307 
8308 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8309 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8310 				       MGMT_STATUS_INVALID_PARAMS);
8311 
8312 	/* The purpose of breaking add_advertising into two separate MGMT calls
8313 	 * for params and data is to allow more parameters to be added to this
8314 	 * structure in the future. For this reason, we verify that we have the
8315 	 * bare minimum structure we know of when the interface was defined. Any
8316 	 * extra parameters we don't know about will be ignored in this request.
8317 	 */
8318 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8319 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8320 				       MGMT_STATUS_INVALID_PARAMS);
8321 
8322 	flags = __le32_to_cpu(cp->flags);
8323 
8324 	if (!requested_adv_flags_are_valid(hdev, flags))
8325 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8326 				       MGMT_STATUS_INVALID_PARAMS);
8327 
8328 	hci_dev_lock(hdev);
8329 
8330 	/* In new interface, we require that we are powered to register */
8331 	if (!hdev_is_powered(hdev)) {
8332 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8333 				      MGMT_STATUS_REJECTED);
8334 		goto unlock;
8335 	}
8336 
8337 	if (adv_busy(hdev)) {
8338 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8339 				      MGMT_STATUS_BUSY);
8340 		goto unlock;
8341 	}
8342 
8343 	/* Parse defined parameters from request, use defaults otherwise */
8344 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8345 		  __le16_to_cpu(cp->timeout) : 0;
8346 
8347 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8348 		   __le16_to_cpu(cp->duration) :
8349 		   hdev->def_multi_adv_rotation_duration;
8350 
8351 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8352 		       __le32_to_cpu(cp->min_interval) :
8353 		       hdev->le_adv_min_interval;
8354 
8355 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8356 		       __le32_to_cpu(cp->max_interval) :
8357 		       hdev->le_adv_max_interval;
8358 
8359 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8360 		   cp->tx_power :
8361 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8362 
8363 	/* Create advertising instance with no advertising or response data */
8364 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8365 				   0, NULL, 0, NULL, timeout, duration,
8366 				   tx_power, min_interval, max_interval);
8367 
8368 	if (err < 0) {
8369 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8370 				      MGMT_STATUS_FAILED);
8371 		goto unlock;
8372 	}
8373 
8374 	/* Submit request for advertising params if ext adv available */
8375 	if (ext_adv_capable(hdev)) {
8376 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8377 				       data, data_len);
8378 		if (!cmd) {
8379 			err = -ENOMEM;
8380 			hci_remove_adv_instance(hdev, cp->instance);
8381 			goto unlock;
8382 		}
8383 
8384 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8385 					 add_ext_adv_params_complete);
8386 		if (err < 0)
8387 			mgmt_pending_free(cmd);
8388 	} else {
8389 		rp.instance = cp->instance;
8390 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8391 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8392 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8393 		err = mgmt_cmd_complete(sk, hdev->id,
8394 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8395 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8396 	}
8397 
8398 unlock:
8399 	hci_dev_unlock(hdev);
8400 
8401 	return err;
8402 }
8403 
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8404 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8405 {
8406 	struct mgmt_pending_cmd *cmd = data;
8407 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8408 	struct mgmt_rp_add_advertising rp;
8409 
8410 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8411 
8412 	memset(&rp, 0, sizeof(rp));
8413 
8414 	rp.instance = cp->instance;
8415 
8416 	if (err)
8417 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8418 				mgmt_status(err));
8419 	else
8420 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8421 				  mgmt_status(err), &rp, sizeof(rp));
8422 
8423 	mgmt_pending_free(cmd);
8424 }
8425 
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8426 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8427 {
8428 	struct mgmt_pending_cmd *cmd = data;
8429 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8430 	int err;
8431 
8432 	if (ext_adv_capable(hdev)) {
8433 		err = hci_update_adv_data_sync(hdev, cp->instance);
8434 		if (err)
8435 			return err;
8436 
8437 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8438 		if (err)
8439 			return err;
8440 
8441 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8442 	}
8443 
8444 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8445 }
8446 
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8447 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8448 			    u16 data_len)
8449 {
8450 	struct mgmt_cp_add_ext_adv_data *cp = data;
8451 	struct mgmt_rp_add_ext_adv_data rp;
8452 	u8 schedule_instance = 0;
8453 	struct adv_info *next_instance;
8454 	struct adv_info *adv_instance;
8455 	int err = 0;
8456 	struct mgmt_pending_cmd *cmd;
8457 
8458 	BT_DBG("%s", hdev->name);
8459 
8460 	hci_dev_lock(hdev);
8461 
8462 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8463 
8464 	if (!adv_instance) {
8465 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8466 				      MGMT_STATUS_INVALID_PARAMS);
8467 		goto unlock;
8468 	}
8469 
8470 	/* In new interface, we require that we are powered to register */
8471 	if (!hdev_is_powered(hdev)) {
8472 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8473 				      MGMT_STATUS_REJECTED);
8474 		goto clear_new_instance;
8475 	}
8476 
8477 	if (adv_busy(hdev)) {
8478 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8479 				      MGMT_STATUS_BUSY);
8480 		goto clear_new_instance;
8481 	}
8482 
8483 	/* Validate new data */
8484 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8485 			       cp->adv_data_len, true) ||
8486 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8487 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8488 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8489 				      MGMT_STATUS_INVALID_PARAMS);
8490 		goto clear_new_instance;
8491 	}
8492 
8493 	/* Set the data in the advertising instance */
8494 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8495 				  cp->data, cp->scan_rsp_len,
8496 				  cp->data + cp->adv_data_len);
8497 
8498 	/* If using software rotation, determine next instance to use */
8499 	if (hdev->cur_adv_instance == cp->instance) {
8500 		/* If the currently advertised instance is being changed
8501 		 * then cancel the current advertising and schedule the
8502 		 * next instance. If there is only one instance then the
8503 		 * overridden advertising data will be visible right
8504 		 * away
8505 		 */
8506 		cancel_adv_timeout(hdev);
8507 
8508 		next_instance = hci_get_next_instance(hdev, cp->instance);
8509 		if (next_instance)
8510 			schedule_instance = next_instance->instance;
8511 	} else if (!hdev->adv_instance_timeout) {
8512 		/* Immediately advertise the new instance if no other
8513 		 * instance is currently being advertised.
8514 		 */
8515 		schedule_instance = cp->instance;
8516 	}
8517 
8518 	/* If the HCI_ADVERTISING flag is set or there is no instance to
8519 	 * be advertised then we have no HCI communication to make.
8520 	 * Simply return.
8521 	 */
8522 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8523 		if (adv_instance->pending) {
8524 			mgmt_advertising_added(sk, hdev, cp->instance);
8525 			adv_instance->pending = false;
8526 		}
8527 		rp.instance = cp->instance;
8528 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8529 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8530 		goto unlock;
8531 	}
8532 
8533 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8534 			       data_len);
8535 	if (!cmd) {
8536 		err = -ENOMEM;
8537 		goto clear_new_instance;
8538 	}
8539 
8540 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8541 				 add_ext_adv_data_complete);
8542 	if (err < 0) {
8543 		mgmt_pending_free(cmd);
8544 		goto clear_new_instance;
8545 	}
8546 
8547 	/* We were successful in updating data, so trigger advertising_added
8548 	 * event if this is an instance that wasn't previously advertising. If
8549 	 * a failure occurs in the requests we initiated, we will remove the
8550 	 * instance again in add_advertising_complete
8551 	 */
8552 	if (adv_instance->pending)
8553 		mgmt_advertising_added(sk, hdev, cp->instance);
8554 
8555 	goto unlock;
8556 
8557 clear_new_instance:
8558 	hci_remove_adv_instance(hdev, cp->instance);
8559 
8560 unlock:
8561 	hci_dev_unlock(hdev);
8562 
8563 	return err;
8564 }
8565 
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)8566 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8567 					int err)
8568 {
8569 	struct mgmt_pending_cmd *cmd = data;
8570 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8571 	struct mgmt_rp_remove_advertising rp;
8572 
8573 	bt_dev_dbg(hdev, "err %d", err);
8574 
8575 	memset(&rp, 0, sizeof(rp));
8576 	rp.instance = cp->instance;
8577 
8578 	if (err)
8579 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8580 				mgmt_status(err));
8581 	else
8582 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8583 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8584 
8585 	mgmt_pending_free(cmd);
8586 }
8587 
remove_advertising_sync(struct hci_dev * hdev,void * data)8588 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8589 {
8590 	struct mgmt_pending_cmd *cmd = data;
8591 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8592 	int err;
8593 
8594 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8595 	if (err)
8596 		return err;
8597 
8598 	if (list_empty(&hdev->adv_instances))
8599 		err = hci_disable_advertising_sync(hdev);
8600 
8601 	return err;
8602 }
8603 
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8604 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8605 			      void *data, u16 data_len)
8606 {
8607 	struct mgmt_cp_remove_advertising *cp = data;
8608 	struct mgmt_pending_cmd *cmd;
8609 	int err;
8610 
8611 	bt_dev_dbg(hdev, "sock %p", sk);
8612 
8613 	hci_dev_lock(hdev);
8614 
8615 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8616 		err = mgmt_cmd_status(sk, hdev->id,
8617 				      MGMT_OP_REMOVE_ADVERTISING,
8618 				      MGMT_STATUS_INVALID_PARAMS);
8619 		goto unlock;
8620 	}
8621 
8622 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
8623 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8624 				      MGMT_STATUS_BUSY);
8625 		goto unlock;
8626 	}
8627 
8628 	if (list_empty(&hdev->adv_instances)) {
8629 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8630 				      MGMT_STATUS_INVALID_PARAMS);
8631 		goto unlock;
8632 	}
8633 
8634 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8635 			       data_len);
8636 	if (!cmd) {
8637 		err = -ENOMEM;
8638 		goto unlock;
8639 	}
8640 
8641 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8642 				 remove_advertising_complete);
8643 	if (err < 0)
8644 		mgmt_pending_free(cmd);
8645 
8646 unlock:
8647 	hci_dev_unlock(hdev);
8648 
8649 	return err;
8650 }
8651 
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8652 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8653 			     void *data, u16 data_len)
8654 {
8655 	struct mgmt_cp_get_adv_size_info *cp = data;
8656 	struct mgmt_rp_get_adv_size_info rp;
8657 	u32 flags, supported_flags;
8658 
8659 	bt_dev_dbg(hdev, "sock %p", sk);
8660 
8661 	if (!lmp_le_capable(hdev))
8662 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8663 				       MGMT_STATUS_REJECTED);
8664 
8665 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8666 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8667 				       MGMT_STATUS_INVALID_PARAMS);
8668 
8669 	flags = __le32_to_cpu(cp->flags);
8670 
8671 	/* The current implementation only supports a subset of the specified
8672 	 * flags.
8673 	 */
8674 	supported_flags = get_supported_adv_flags(hdev);
8675 	if (flags & ~supported_flags)
8676 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8677 				       MGMT_STATUS_INVALID_PARAMS);
8678 
8679 	rp.instance = cp->instance;
8680 	rp.flags = cp->flags;
8681 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8682 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8683 
8684 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8685 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8686 }
8687 
8688 static const struct hci_mgmt_handler mgmt_handlers[] = {
8689 	{ NULL }, /* 0x0000 (no command) */
8690 	{ read_version,            MGMT_READ_VERSION_SIZE,
8691 						HCI_MGMT_NO_HDEV |
8692 						HCI_MGMT_UNTRUSTED },
8693 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
8694 						HCI_MGMT_NO_HDEV |
8695 						HCI_MGMT_UNTRUSTED },
8696 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8697 						HCI_MGMT_NO_HDEV |
8698 						HCI_MGMT_UNTRUSTED },
8699 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
8700 						HCI_MGMT_UNTRUSTED },
8701 	{ set_powered,             MGMT_SETTING_SIZE },
8702 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8703 	{ set_connectable,         MGMT_SETTING_SIZE },
8704 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
8705 	{ set_bondable,            MGMT_SETTING_SIZE },
8706 	{ set_link_security,       MGMT_SETTING_SIZE },
8707 	{ set_ssp,                 MGMT_SETTING_SIZE },
8708 	{ set_hs,                  MGMT_SETTING_SIZE },
8709 	{ set_le,                  MGMT_SETTING_SIZE },
8710 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8711 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8712 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
8713 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8714 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8715 						HCI_MGMT_VAR_LEN },
8716 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8717 						HCI_MGMT_VAR_LEN },
8718 	{ disconnect,              MGMT_DISCONNECT_SIZE },
8719 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8720 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8721 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8722 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8723 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
8724 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8725 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8726 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8727 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8728 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8729 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8730 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8731 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8732 						HCI_MGMT_VAR_LEN },
8733 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8734 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
8735 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8736 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8737 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
8738 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8739 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8740 	{ set_advertising,         MGMT_SETTING_SIZE },
8741 	{ set_bredr,               MGMT_SETTING_SIZE },
8742 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8743 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8744 	{ set_secure_conn,         MGMT_SETTING_SIZE },
8745 	{ set_debug_keys,          MGMT_SETTING_SIZE },
8746 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
8747 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
8748 						HCI_MGMT_VAR_LEN },
8749 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8750 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8751 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
8752 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8753 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8754 						HCI_MGMT_VAR_LEN },
8755 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8756 						HCI_MGMT_NO_HDEV |
8757 						HCI_MGMT_UNTRUSTED },
8758 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8759 						HCI_MGMT_UNCONFIGURED |
8760 						HCI_MGMT_UNTRUSTED },
8761 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8762 						HCI_MGMT_UNCONFIGURED },
8763 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8764 						HCI_MGMT_UNCONFIGURED },
8765 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8766 						HCI_MGMT_VAR_LEN },
8767 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8768 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8769 						HCI_MGMT_NO_HDEV |
8770 						HCI_MGMT_UNTRUSTED },
8771 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8772 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
8773 						HCI_MGMT_VAR_LEN },
8774 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
8775 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8776 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8777 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8778 						HCI_MGMT_UNTRUSTED },
8779 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
8780 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8781 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8782 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8783 						HCI_MGMT_VAR_LEN },
8784 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
8785 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8786 						HCI_MGMT_UNTRUSTED },
8787 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8788 						HCI_MGMT_UNTRUSTED |
8789 						HCI_MGMT_HDEV_OPTIONAL },
8790 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8791 						HCI_MGMT_VAR_LEN |
8792 						HCI_MGMT_HDEV_OPTIONAL },
8793 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8794 						HCI_MGMT_UNTRUSTED },
8795 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8796 						HCI_MGMT_VAR_LEN },
8797 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8798 						HCI_MGMT_UNTRUSTED },
8799 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8800 						HCI_MGMT_VAR_LEN },
8801 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8802 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8803 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8804 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8805 						HCI_MGMT_VAR_LEN },
8806 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8807 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8808 						HCI_MGMT_VAR_LEN },
8809 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8810 						HCI_MGMT_VAR_LEN },
8811 	{ add_adv_patterns_monitor_rssi,
8812 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8813 						HCI_MGMT_VAR_LEN },
8814 };
8815 
mgmt_index_added(struct hci_dev * hdev)8816 void mgmt_index_added(struct hci_dev *hdev)
8817 {
8818 	struct mgmt_ev_ext_index ev;
8819 
8820 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8821 		return;
8822 
8823 	switch (hdev->dev_type) {
8824 	case HCI_PRIMARY:
8825 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8826 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8827 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8828 			ev.type = 0x01;
8829 		} else {
8830 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8831 					 HCI_MGMT_INDEX_EVENTS);
8832 			ev.type = 0x00;
8833 		}
8834 		break;
8835 	case HCI_AMP:
8836 		ev.type = 0x02;
8837 		break;
8838 	default:
8839 		return;
8840 	}
8841 
8842 	ev.bus = hdev->bus;
8843 
8844 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8845 			 HCI_MGMT_EXT_INDEX_EVENTS);
8846 }
8847 
mgmt_index_removed(struct hci_dev * hdev)8848 void mgmt_index_removed(struct hci_dev *hdev)
8849 {
8850 	struct mgmt_ev_ext_index ev;
8851 	u8 status = MGMT_STATUS_INVALID_INDEX;
8852 
8853 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8854 		return;
8855 
8856 	switch (hdev->dev_type) {
8857 	case HCI_PRIMARY:
8858 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8859 
8860 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8861 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8862 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8863 			ev.type = 0x01;
8864 		} else {
8865 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8866 					 HCI_MGMT_INDEX_EVENTS);
8867 			ev.type = 0x00;
8868 		}
8869 		break;
8870 	case HCI_AMP:
8871 		ev.type = 0x02;
8872 		break;
8873 	default:
8874 		return;
8875 	}
8876 
8877 	ev.bus = hdev->bus;
8878 
8879 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8880 			 HCI_MGMT_EXT_INDEX_EVENTS);
8881 }
8882 
mgmt_power_on(struct hci_dev * hdev,int err)8883 void mgmt_power_on(struct hci_dev *hdev, int err)
8884 {
8885 	struct cmd_lookup match = { NULL, hdev };
8886 
8887 	bt_dev_dbg(hdev, "err %d", err);
8888 
8889 	hci_dev_lock(hdev);
8890 
8891 	if (!err) {
8892 		restart_le_actions(hdev);
8893 		hci_update_passive_scan(hdev);
8894 	}
8895 
8896 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8897 
8898 	new_settings(hdev, match.sk);
8899 
8900 	if (match.sk)
8901 		sock_put(match.sk);
8902 
8903 	hci_dev_unlock(hdev);
8904 }
8905 
__mgmt_power_off(struct hci_dev * hdev)8906 void __mgmt_power_off(struct hci_dev *hdev)
8907 {
8908 	struct cmd_lookup match = { NULL, hdev };
8909 	u8 status, zero_cod[] = { 0, 0, 0 };
8910 
8911 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8912 
8913 	/* If the power off is because of hdev unregistration let
8914 	 * use the appropriate INVALID_INDEX status. Otherwise use
8915 	 * NOT_POWERED. We cover both scenarios here since later in
8916 	 * mgmt_index_removed() any hci_conn callbacks will have already
8917 	 * been triggered, potentially causing misleading DISCONNECTED
8918 	 * status responses.
8919 	 */
8920 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8921 		status = MGMT_STATUS_INVALID_INDEX;
8922 	else
8923 		status = MGMT_STATUS_NOT_POWERED;
8924 
8925 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8926 
8927 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8928 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8929 				   zero_cod, sizeof(zero_cod),
8930 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8931 		ext_info_changed(hdev, NULL);
8932 	}
8933 
8934 	new_settings(hdev, match.sk);
8935 
8936 	if (match.sk)
8937 		sock_put(match.sk);
8938 }
8939 
mgmt_set_powered_failed(struct hci_dev * hdev,int err)8940 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8941 {
8942 	struct mgmt_pending_cmd *cmd;
8943 	u8 status;
8944 
8945 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8946 	if (!cmd)
8947 		return;
8948 
8949 	if (err == -ERFKILL)
8950 		status = MGMT_STATUS_RFKILLED;
8951 	else
8952 		status = MGMT_STATUS_FAILED;
8953 
8954 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8955 
8956 	mgmt_pending_remove(cmd);
8957 }
8958 
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)8959 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8960 		       bool persistent)
8961 {
8962 	struct mgmt_ev_new_link_key ev;
8963 
8964 	memset(&ev, 0, sizeof(ev));
8965 
8966 	ev.store_hint = persistent;
8967 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8968 	ev.key.addr.type = BDADDR_BREDR;
8969 	ev.key.type = key->type;
8970 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8971 	ev.key.pin_len = key->pin_len;
8972 
8973 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8974 }
8975 
mgmt_ltk_type(struct smp_ltk * ltk)8976 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8977 {
8978 	switch (ltk->type) {
8979 	case SMP_LTK:
8980 	case SMP_LTK_RESPONDER:
8981 		if (ltk->authenticated)
8982 			return MGMT_LTK_AUTHENTICATED;
8983 		return MGMT_LTK_UNAUTHENTICATED;
8984 	case SMP_LTK_P256:
8985 		if (ltk->authenticated)
8986 			return MGMT_LTK_P256_AUTH;
8987 		return MGMT_LTK_P256_UNAUTH;
8988 	case SMP_LTK_P256_DEBUG:
8989 		return MGMT_LTK_P256_DEBUG;
8990 	}
8991 
8992 	return MGMT_LTK_UNAUTHENTICATED;
8993 }
8994 
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)8995 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8996 {
8997 	struct mgmt_ev_new_long_term_key ev;
8998 
8999 	memset(&ev, 0, sizeof(ev));
9000 
9001 	/* Devices using resolvable or non-resolvable random addresses
9002 	 * without providing an identity resolving key don't require
9003 	 * to store long term keys. Their addresses will change the
9004 	 * next time around.
9005 	 *
9006 	 * Only when a remote device provides an identity address
9007 	 * make sure the long term key is stored. If the remote
9008 	 * identity is known, the long term keys are internally
9009 	 * mapped to the identity address. So allow static random
9010 	 * and public addresses here.
9011 	 */
9012 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9013 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9014 		ev.store_hint = 0x00;
9015 	else
9016 		ev.store_hint = persistent;
9017 
9018 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9019 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9020 	ev.key.type = mgmt_ltk_type(key);
9021 	ev.key.enc_size = key->enc_size;
9022 	ev.key.ediv = key->ediv;
9023 	ev.key.rand = key->rand;
9024 
9025 	if (key->type == SMP_LTK)
9026 		ev.key.initiator = 1;
9027 
9028 	/* Make sure we copy only the significant bytes based on the
9029 	 * encryption key size, and set the rest of the value to zeroes.
9030 	 */
9031 	memcpy(ev.key.val, key->val, key->enc_size);
9032 	memset(ev.key.val + key->enc_size, 0,
9033 	       sizeof(ev.key.val) - key->enc_size);
9034 
9035 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9036 }
9037 
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9038 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9039 {
9040 	struct mgmt_ev_new_irk ev;
9041 
9042 	memset(&ev, 0, sizeof(ev));
9043 
9044 	ev.store_hint = persistent;
9045 
9046 	bacpy(&ev.rpa, &irk->rpa);
9047 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9048 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9049 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9050 
9051 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9052 }
9053 
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9054 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9055 		   bool persistent)
9056 {
9057 	struct mgmt_ev_new_csrk ev;
9058 
9059 	memset(&ev, 0, sizeof(ev));
9060 
9061 	/* Devices using resolvable or non-resolvable random addresses
9062 	 * without providing an identity resolving key don't require
9063 	 * to store signature resolving keys. Their addresses will change
9064 	 * the next time around.
9065 	 *
9066 	 * Only when a remote device provides an identity address
9067 	 * make sure the signature resolving key is stored. So allow
9068 	 * static random and public addresses here.
9069 	 */
9070 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9071 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9072 		ev.store_hint = 0x00;
9073 	else
9074 		ev.store_hint = persistent;
9075 
9076 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9077 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9078 	ev.key.type = csrk->type;
9079 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9080 
9081 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9082 }
9083 
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9084 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9085 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9086 			 u16 max_interval, u16 latency, u16 timeout)
9087 {
9088 	struct mgmt_ev_new_conn_param ev;
9089 
9090 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9091 		return;
9092 
9093 	memset(&ev, 0, sizeof(ev));
9094 	bacpy(&ev.addr.bdaddr, bdaddr);
9095 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9096 	ev.store_hint = store_hint;
9097 	ev.min_interval = cpu_to_le16(min_interval);
9098 	ev.max_interval = cpu_to_le16(max_interval);
9099 	ev.latency = cpu_to_le16(latency);
9100 	ev.timeout = cpu_to_le16(timeout);
9101 
9102 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9103 }
9104 
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9105 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9106 			   u8 *name, u8 name_len)
9107 {
9108 	struct sk_buff *skb;
9109 	struct mgmt_ev_device_connected *ev;
9110 	u16 eir_len = 0;
9111 	u32 flags = 0;
9112 
9113 	/* allocate buff for LE or BR/EDR adv */
9114 	if (conn->le_adv_data_len > 0)
9115 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9116 				     sizeof(*ev) + conn->le_adv_data_len);
9117 	else
9118 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9119 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9120 				     eir_precalc_len(sizeof(conn->dev_class)));
9121 
9122 	ev = skb_put(skb, sizeof(*ev));
9123 	bacpy(&ev->addr.bdaddr, &conn->dst);
9124 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9125 
9126 	if (conn->out)
9127 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9128 
9129 	ev->flags = __cpu_to_le32(flags);
9130 
9131 	/* We must ensure that the EIR Data fields are ordered and
9132 	 * unique. Keep it simple for now and avoid the problem by not
9133 	 * adding any BR/EDR data to the LE adv.
9134 	 */
9135 	if (conn->le_adv_data_len > 0) {
9136 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9137 		eir_len = conn->le_adv_data_len;
9138 	} else {
9139 		if (name)
9140 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9141 
9142 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9143 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9144 						    conn->dev_class, sizeof(conn->dev_class));
9145 	}
9146 
9147 	ev->eir_len = cpu_to_le16(eir_len);
9148 
9149 	mgmt_event_skb(skb, NULL);
9150 }
9151 
disconnect_rsp(struct mgmt_pending_cmd * cmd,void * data)9152 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9153 {
9154 	struct sock **sk = data;
9155 
9156 	cmd->cmd_complete(cmd, 0);
9157 
9158 	*sk = cmd->sk;
9159 	sock_hold(*sk);
9160 
9161 	mgmt_pending_remove(cmd);
9162 }
9163 
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9164 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9165 {
9166 	struct hci_dev *hdev = data;
9167 	struct mgmt_cp_unpair_device *cp = cmd->param;
9168 
9169 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9170 
9171 	cmd->cmd_complete(cmd, 0);
9172 	mgmt_pending_remove(cmd);
9173 }
9174 
mgmt_powering_down(struct hci_dev * hdev)9175 bool mgmt_powering_down(struct hci_dev *hdev)
9176 {
9177 	struct mgmt_pending_cmd *cmd;
9178 	struct mgmt_mode *cp;
9179 
9180 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9181 	if (!cmd)
9182 		return false;
9183 
9184 	cp = cmd->param;
9185 	if (!cp->val)
9186 		return true;
9187 
9188 	return false;
9189 }
9190 
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9191 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9192 			      u8 link_type, u8 addr_type, u8 reason,
9193 			      bool mgmt_connected)
9194 {
9195 	struct mgmt_ev_device_disconnected ev;
9196 	struct sock *sk = NULL;
9197 
9198 	/* The connection is still in hci_conn_hash so test for 1
9199 	 * instead of 0 to know if this is the last one.
9200 	 */
9201 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9202 		cancel_delayed_work(&hdev->power_off);
9203 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9204 	}
9205 
9206 	if (!mgmt_connected)
9207 		return;
9208 
9209 	if (link_type != ACL_LINK && link_type != LE_LINK)
9210 		return;
9211 
9212 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9213 
9214 	bacpy(&ev.addr.bdaddr, bdaddr);
9215 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9216 	ev.reason = reason;
9217 
9218 	/* Report disconnects due to suspend */
9219 	if (hdev->suspended)
9220 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9221 
9222 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9223 
9224 	if (sk)
9225 		sock_put(sk);
9226 
9227 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9228 			     hdev);
9229 }
9230 
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9231 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9232 			    u8 link_type, u8 addr_type, u8 status)
9233 {
9234 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9235 	struct mgmt_cp_disconnect *cp;
9236 	struct mgmt_pending_cmd *cmd;
9237 
9238 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9239 			     hdev);
9240 
9241 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9242 	if (!cmd)
9243 		return;
9244 
9245 	cp = cmd->param;
9246 
9247 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9248 		return;
9249 
9250 	if (cp->addr.type != bdaddr_type)
9251 		return;
9252 
9253 	cmd->cmd_complete(cmd, mgmt_status(status));
9254 	mgmt_pending_remove(cmd);
9255 }
9256 
mgmt_connect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9257 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9258 			 u8 addr_type, u8 status)
9259 {
9260 	struct mgmt_ev_connect_failed ev;
9261 
9262 	/* The connection is still in hci_conn_hash so test for 1
9263 	 * instead of 0 to know if this is the last one.
9264 	 */
9265 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9266 		cancel_delayed_work(&hdev->power_off);
9267 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9268 	}
9269 
9270 	bacpy(&ev.addr.bdaddr, bdaddr);
9271 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9272 	ev.status = mgmt_status(status);
9273 
9274 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9275 }
9276 
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9277 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9278 {
9279 	struct mgmt_ev_pin_code_request ev;
9280 
9281 	bacpy(&ev.addr.bdaddr, bdaddr);
9282 	ev.addr.type = BDADDR_BREDR;
9283 	ev.secure = secure;
9284 
9285 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9286 }
9287 
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9288 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9289 				  u8 status)
9290 {
9291 	struct mgmt_pending_cmd *cmd;
9292 
9293 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9294 	if (!cmd)
9295 		return;
9296 
9297 	cmd->cmd_complete(cmd, mgmt_status(status));
9298 	mgmt_pending_remove(cmd);
9299 }
9300 
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9301 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9302 				      u8 status)
9303 {
9304 	struct mgmt_pending_cmd *cmd;
9305 
9306 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9307 	if (!cmd)
9308 		return;
9309 
9310 	cmd->cmd_complete(cmd, mgmt_status(status));
9311 	mgmt_pending_remove(cmd);
9312 }
9313 
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9314 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9315 			      u8 link_type, u8 addr_type, u32 value,
9316 			      u8 confirm_hint)
9317 {
9318 	struct mgmt_ev_user_confirm_request ev;
9319 
9320 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9321 
9322 	bacpy(&ev.addr.bdaddr, bdaddr);
9323 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9324 	ev.confirm_hint = confirm_hint;
9325 	ev.value = cpu_to_le32(value);
9326 
9327 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9328 			  NULL);
9329 }
9330 
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9331 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9332 			      u8 link_type, u8 addr_type)
9333 {
9334 	struct mgmt_ev_user_passkey_request ev;
9335 
9336 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9337 
9338 	bacpy(&ev.addr.bdaddr, bdaddr);
9339 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9340 
9341 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9342 			  NULL);
9343 }
9344 
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9345 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9346 				      u8 link_type, u8 addr_type, u8 status,
9347 				      u8 opcode)
9348 {
9349 	struct mgmt_pending_cmd *cmd;
9350 
9351 	cmd = pending_find(opcode, hdev);
9352 	if (!cmd)
9353 		return -ENOENT;
9354 
9355 	cmd->cmd_complete(cmd, mgmt_status(status));
9356 	mgmt_pending_remove(cmd);
9357 
9358 	return 0;
9359 }
9360 
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9361 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9362 				     u8 link_type, u8 addr_type, u8 status)
9363 {
9364 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9365 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9366 }
9367 
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9368 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9369 					 u8 link_type, u8 addr_type, u8 status)
9370 {
9371 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9372 					  status,
9373 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9374 }
9375 
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9376 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9377 				     u8 link_type, u8 addr_type, u8 status)
9378 {
9379 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9380 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9381 }
9382 
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9383 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9384 					 u8 link_type, u8 addr_type, u8 status)
9385 {
9386 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9387 					  status,
9388 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9389 }
9390 
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9391 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9392 			     u8 link_type, u8 addr_type, u32 passkey,
9393 			     u8 entered)
9394 {
9395 	struct mgmt_ev_passkey_notify ev;
9396 
9397 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9398 
9399 	bacpy(&ev.addr.bdaddr, bdaddr);
9400 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9401 	ev.passkey = __cpu_to_le32(passkey);
9402 	ev.entered = entered;
9403 
9404 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9405 }
9406 
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9407 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9408 {
9409 	struct mgmt_ev_auth_failed ev;
9410 	struct mgmt_pending_cmd *cmd;
9411 	u8 status = mgmt_status(hci_status);
9412 
9413 	bacpy(&ev.addr.bdaddr, &conn->dst);
9414 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9415 	ev.status = status;
9416 
9417 	cmd = find_pairing(conn);
9418 
9419 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9420 		    cmd ? cmd->sk : NULL);
9421 
9422 	if (cmd) {
9423 		cmd->cmd_complete(cmd, status);
9424 		mgmt_pending_remove(cmd);
9425 	}
9426 }
9427 
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9428 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9429 {
9430 	struct cmd_lookup match = { NULL, hdev };
9431 	bool changed;
9432 
9433 	if (status) {
9434 		u8 mgmt_err = mgmt_status(status);
9435 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9436 				     cmd_status_rsp, &mgmt_err);
9437 		return;
9438 	}
9439 
9440 	if (test_bit(HCI_AUTH, &hdev->flags))
9441 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9442 	else
9443 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9444 
9445 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9446 			     &match);
9447 
9448 	if (changed)
9449 		new_settings(hdev, match.sk);
9450 
9451 	if (match.sk)
9452 		sock_put(match.sk);
9453 }
9454 
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9455 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9456 {
9457 	struct cmd_lookup *match = data;
9458 
9459 	if (match->sk == NULL) {
9460 		match->sk = cmd->sk;
9461 		sock_hold(match->sk);
9462 	}
9463 }
9464 
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9465 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9466 				    u8 status)
9467 {
9468 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9469 
9470 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9471 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9472 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9473 
9474 	if (!status) {
9475 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9476 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9477 		ext_info_changed(hdev, NULL);
9478 	}
9479 
9480 	if (match.sk)
9481 		sock_put(match.sk);
9482 }
9483 
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)9484 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9485 {
9486 	struct mgmt_cp_set_local_name ev;
9487 	struct mgmt_pending_cmd *cmd;
9488 
9489 	if (status)
9490 		return;
9491 
9492 	memset(&ev, 0, sizeof(ev));
9493 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9494 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9495 
9496 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9497 	if (!cmd) {
9498 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9499 
9500 		/* If this is a HCI command related to powering on the
9501 		 * HCI dev don't send any mgmt signals.
9502 		 */
9503 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9504 			return;
9505 	}
9506 
9507 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9508 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9509 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9510 }
9511 
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])9512 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9513 {
9514 	int i;
9515 
9516 	for (i = 0; i < uuid_count; i++) {
9517 		if (!memcmp(uuid, uuids[i], 16))
9518 			return true;
9519 	}
9520 
9521 	return false;
9522 }
9523 
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])9524 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9525 {
9526 	u16 parsed = 0;
9527 
9528 	while (parsed < eir_len) {
9529 		u8 field_len = eir[0];
9530 		u8 uuid[16];
9531 		int i;
9532 
9533 		if (field_len == 0)
9534 			break;
9535 
9536 		if (eir_len - parsed < field_len + 1)
9537 			break;
9538 
9539 		switch (eir[1]) {
9540 		case EIR_UUID16_ALL:
9541 		case EIR_UUID16_SOME:
9542 			for (i = 0; i + 3 <= field_len; i += 2) {
9543 				memcpy(uuid, bluetooth_base_uuid, 16);
9544 				uuid[13] = eir[i + 3];
9545 				uuid[12] = eir[i + 2];
9546 				if (has_uuid(uuid, uuid_count, uuids))
9547 					return true;
9548 			}
9549 			break;
9550 		case EIR_UUID32_ALL:
9551 		case EIR_UUID32_SOME:
9552 			for (i = 0; i + 5 <= field_len; i += 4) {
9553 				memcpy(uuid, bluetooth_base_uuid, 16);
9554 				uuid[15] = eir[i + 5];
9555 				uuid[14] = eir[i + 4];
9556 				uuid[13] = eir[i + 3];
9557 				uuid[12] = eir[i + 2];
9558 				if (has_uuid(uuid, uuid_count, uuids))
9559 					return true;
9560 			}
9561 			break;
9562 		case EIR_UUID128_ALL:
9563 		case EIR_UUID128_SOME:
9564 			for (i = 0; i + 17 <= field_len; i += 16) {
9565 				memcpy(uuid, eir + i + 2, 16);
9566 				if (has_uuid(uuid, uuid_count, uuids))
9567 					return true;
9568 			}
9569 			break;
9570 		}
9571 
9572 		parsed += field_len + 1;
9573 		eir += field_len + 1;
9574 	}
9575 
9576 	return false;
9577 }
9578 
restart_le_scan(struct hci_dev * hdev)9579 static void restart_le_scan(struct hci_dev *hdev)
9580 {
9581 	/* If controller is not scanning we are done. */
9582 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9583 		return;
9584 
9585 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9586 		       hdev->discovery.scan_start +
9587 		       hdev->discovery.scan_duration))
9588 		return;
9589 
9590 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9591 			   DISCOV_LE_RESTART_DELAY);
9592 }
9593 
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)9594 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9595 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9596 {
9597 	/* If a RSSI threshold has been specified, and
9598 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9599 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9600 	 * is set, let it through for further processing, as we might need to
9601 	 * restart the scan.
9602 	 *
9603 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9604 	 * the results are also dropped.
9605 	 */
9606 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9607 	    (rssi == HCI_RSSI_INVALID ||
9608 	    (rssi < hdev->discovery.rssi &&
9609 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9610 		return  false;
9611 
9612 	if (hdev->discovery.uuid_count != 0) {
9613 		/* If a list of UUIDs is provided in filter, results with no
9614 		 * matching UUID should be dropped.
9615 		 */
9616 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9617 				   hdev->discovery.uuids) &&
9618 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
9619 				   hdev->discovery.uuid_count,
9620 				   hdev->discovery.uuids))
9621 			return false;
9622 	}
9623 
9624 	/* If duplicate filtering does not report RSSI changes, then restart
9625 	 * scanning to ensure updated result with updated RSSI values.
9626 	 */
9627 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9628 		restart_le_scan(hdev);
9629 
9630 		/* Validate RSSI value against the RSSI threshold once more. */
9631 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9632 		    rssi < hdev->discovery.rssi)
9633 			return false;
9634 	}
9635 
9636 	return true;
9637 }
9638 
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)9639 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
9640 				  bdaddr_t *bdaddr, u8 addr_type)
9641 {
9642 	struct mgmt_ev_adv_monitor_device_lost ev;
9643 
9644 	ev.monitor_handle = cpu_to_le16(handle);
9645 	bacpy(&ev.addr.bdaddr, bdaddr);
9646 	ev.addr.type = addr_type;
9647 
9648 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
9649 		   NULL);
9650 }
9651 
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)9652 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
9653 					       struct sk_buff *skb,
9654 					       struct sock *skip_sk,
9655 					       u16 handle)
9656 {
9657 	struct sk_buff *advmon_skb;
9658 	size_t advmon_skb_len;
9659 	__le16 *monitor_handle;
9660 
9661 	if (!skb)
9662 		return;
9663 
9664 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
9665 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
9666 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
9667 				    advmon_skb_len);
9668 	if (!advmon_skb)
9669 		return;
9670 
9671 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
9672 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
9673 	 * store monitor_handle of the matched monitor.
9674 	 */
9675 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
9676 	*monitor_handle = cpu_to_le16(handle);
9677 	skb_put_data(advmon_skb, skb->data, skb->len);
9678 
9679 	mgmt_event_skb(advmon_skb, skip_sk);
9680 }
9681 
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)9682 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
9683 					  bdaddr_t *bdaddr, bool report_device,
9684 					  struct sk_buff *skb,
9685 					  struct sock *skip_sk)
9686 {
9687 	struct monitored_device *dev, *tmp;
9688 	bool matched = false;
9689 	bool notified = false;
9690 
9691 	/* We have received the Advertisement Report because:
9692 	 * 1. the kernel has initiated active discovery
9693 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
9694 	 *    passive scanning
9695 	 * 3. if none of the above is true, we have one or more active
9696 	 *    Advertisement Monitor
9697 	 *
9698 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
9699 	 * and report ONLY one advertisement per device for the matched Monitor
9700 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9701 	 *
9702 	 * For case 3, since we are not active scanning and all advertisements
9703 	 * received are due to a matched Advertisement Monitor, report all
9704 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9705 	 */
9706 	if (report_device && !hdev->advmon_pend_notify) {
9707 		mgmt_event_skb(skb, skip_sk);
9708 		return;
9709 	}
9710 
9711 	hdev->advmon_pend_notify = false;
9712 
9713 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
9714 		if (!bacmp(&dev->bdaddr, bdaddr)) {
9715 			matched = true;
9716 
9717 			if (!dev->notified) {
9718 				mgmt_send_adv_monitor_device_found(hdev, skb,
9719 								   skip_sk,
9720 								   dev->handle);
9721 				notified = true;
9722 				dev->notified = true;
9723 			}
9724 		}
9725 
9726 		if (!dev->notified)
9727 			hdev->advmon_pend_notify = true;
9728 	}
9729 
9730 	if (!report_device &&
9731 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
9732 		/* Handle 0 indicates that we are not active scanning and this
9733 		 * is a subsequent advertisement report for an already matched
9734 		 * Advertisement Monitor or the controller offloading support
9735 		 * is not available.
9736 		 */
9737 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
9738 	}
9739 
9740 	if (report_device)
9741 		mgmt_event_skb(skb, skip_sk);
9742 	else
9743 		kfree_skb(skb);
9744 }
9745 
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)9746 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9747 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9748 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9749 {
9750 	struct sk_buff *skb;
9751 	struct mgmt_ev_device_found *ev;
9752 	bool report_device = hci_discovery_active(hdev);
9753 
9754 	/* Don't send events for a non-kernel initiated discovery. With
9755 	 * LE one exception is if we have pend_le_reports > 0 in which
9756 	 * case we're doing passive scanning and want these events.
9757 	 */
9758 	if (!hci_discovery_active(hdev)) {
9759 		if (link_type == ACL_LINK)
9760 			return;
9761 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
9762 			report_device = true;
9763 		else if (!hci_is_adv_monitoring(hdev))
9764 			return;
9765 	}
9766 
9767 	if (hdev->discovery.result_filtering) {
9768 		/* We are using service discovery */
9769 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9770 				     scan_rsp_len))
9771 			return;
9772 	}
9773 
9774 	if (hdev->discovery.limited) {
9775 		/* Check for limited discoverable bit */
9776 		if (dev_class) {
9777 			if (!(dev_class[1] & 0x20))
9778 				return;
9779 		} else {
9780 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9781 			if (!flags || !(flags[0] & LE_AD_LIMITED))
9782 				return;
9783 		}
9784 	}
9785 
9786 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
9787 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9788 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
9789 	if (!skb)
9790 		return;
9791 
9792 	ev = skb_put(skb, sizeof(*ev));
9793 
9794 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
9795 	 * RSSI value was reported as 0 when not available. This behavior
9796 	 * is kept when using device discovery. This is required for full
9797 	 * backwards compatibility with the API.
9798 	 *
9799 	 * However when using service discovery, the value 127 will be
9800 	 * returned when the RSSI is not available.
9801 	 */
9802 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9803 	    link_type == ACL_LINK)
9804 		rssi = 0;
9805 
9806 	bacpy(&ev->addr.bdaddr, bdaddr);
9807 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9808 	ev->rssi = rssi;
9809 	ev->flags = cpu_to_le32(flags);
9810 
9811 	if (eir_len > 0)
9812 		/* Copy EIR or advertising data into event */
9813 		skb_put_data(skb, eir, eir_len);
9814 
9815 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
9816 		u8 eir_cod[5];
9817 
9818 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
9819 					   dev_class, 3);
9820 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
9821 	}
9822 
9823 	if (scan_rsp_len > 0)
9824 		/* Append scan response data to event */
9825 		skb_put_data(skb, scan_rsp, scan_rsp_len);
9826 
9827 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9828 
9829 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
9830 }
9831 
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)9832 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9833 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9834 {
9835 	struct sk_buff *skb;
9836 	struct mgmt_ev_device_found *ev;
9837 	u16 eir_len = 0;
9838 	u32 flags = 0;
9839 
9840 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9841 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
9842 
9843 	ev = skb_put(skb, sizeof(*ev));
9844 	bacpy(&ev->addr.bdaddr, bdaddr);
9845 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9846 	ev->rssi = rssi;
9847 
9848 	if (name)
9849 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9850 	else
9851 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
9852 
9853 	ev->eir_len = cpu_to_le16(eir_len);
9854 	ev->flags = cpu_to_le32(flags);
9855 
9856 	mgmt_event_skb(skb, NULL);
9857 }
9858 
mgmt_discovering(struct hci_dev * hdev,u8 discovering)9859 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9860 {
9861 	struct mgmt_ev_discovering ev;
9862 
9863 	bt_dev_dbg(hdev, "discovering %u", discovering);
9864 
9865 	memset(&ev, 0, sizeof(ev));
9866 	ev.type = hdev->discovery.type;
9867 	ev.discovering = discovering;
9868 
9869 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9870 }
9871 
mgmt_suspending(struct hci_dev * hdev,u8 state)9872 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9873 {
9874 	struct mgmt_ev_controller_suspend ev;
9875 
9876 	ev.suspend_state = state;
9877 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9878 }
9879 
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)9880 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9881 		   u8 addr_type)
9882 {
9883 	struct mgmt_ev_controller_resume ev;
9884 
9885 	ev.wake_reason = reason;
9886 	if (bdaddr) {
9887 		bacpy(&ev.addr.bdaddr, bdaddr);
9888 		ev.addr.type = addr_type;
9889 	} else {
9890 		memset(&ev.addr, 0, sizeof(ev.addr));
9891 	}
9892 
9893 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9894 }
9895 
9896 static struct hci_mgmt_chan chan = {
9897 	.channel	= HCI_CHANNEL_CONTROL,
9898 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
9899 	.handlers	= mgmt_handlers,
9900 	.hdev_init	= mgmt_init_hdev,
9901 };
9902 
mgmt_init(void)9903 int mgmt_init(void)
9904 {
9905 	return hci_mgmt_chan_register(&chan);
9906 }
9907 
mgmt_exit(void)9908 void mgmt_exit(void)
9909 {
9910 	hci_mgmt_chan_unregister(&chan);
9911 }
9912