1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <linux/module.h>
28 
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <net/sock.h>
39 
40 #include <linux/uaccess.h>
41 #include <asm/unaligned.h>
42 
43 #include <net/bluetooth/bluetooth.h>
44 #include <net/bluetooth/hci_core.h>
45 
46 /* Handle HCI Event packets */
47 
hci_cc_inquiry_cancel(struct hci_dev * hdev,struct sk_buff * skb)48 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
49 {
50 	__u8 status = *((__u8 *) skb->data);
51 
52 	BT_DBG("%s status 0x%x", hdev->name, status);
53 
54 	if (status) {
55 		hci_dev_lock(hdev);
56 		mgmt_stop_discovery_failed(hdev, status);
57 		hci_dev_unlock(hdev);
58 		return;
59 	}
60 
61 	clear_bit(HCI_INQUIRY, &hdev->flags);
62 
63 	hci_dev_lock(hdev);
64 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
65 	hci_dev_unlock(hdev);
66 
67 	hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
68 
69 	hci_conn_check_pending(hdev);
70 }
71 
hci_cc_exit_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)72 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73 {
74 	__u8 status = *((__u8 *) skb->data);
75 
76 	BT_DBG("%s status 0x%x", hdev->name, status);
77 
78 	if (status)
79 		return;
80 
81 	hci_conn_check_pending(hdev);
82 }
83 
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,struct sk_buff * skb)84 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
85 {
86 	BT_DBG("%s", hdev->name);
87 }
88 
hci_cc_role_discovery(struct hci_dev * hdev,struct sk_buff * skb)89 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
90 {
91 	struct hci_rp_role_discovery *rp = (void *) skb->data;
92 	struct hci_conn *conn;
93 
94 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
95 
96 	if (rp->status)
97 		return;
98 
99 	hci_dev_lock(hdev);
100 
101 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
102 	if (conn) {
103 		if (rp->role)
104 			conn->link_mode &= ~HCI_LM_MASTER;
105 		else
106 			conn->link_mode |= HCI_LM_MASTER;
107 	}
108 
109 	hci_dev_unlock(hdev);
110 }
111 
hci_cc_read_link_policy(struct hci_dev * hdev,struct sk_buff * skb)112 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
113 {
114 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
115 	struct hci_conn *conn;
116 
117 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
118 
119 	if (rp->status)
120 		return;
121 
122 	hci_dev_lock(hdev);
123 
124 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
125 	if (conn)
126 		conn->link_policy = __le16_to_cpu(rp->policy);
127 
128 	hci_dev_unlock(hdev);
129 }
130 
hci_cc_write_link_policy(struct hci_dev * hdev,struct sk_buff * skb)131 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
132 {
133 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
134 	struct hci_conn *conn;
135 	void *sent;
136 
137 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
138 
139 	if (rp->status)
140 		return;
141 
142 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
143 	if (!sent)
144 		return;
145 
146 	hci_dev_lock(hdev);
147 
148 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 	if (conn)
150 		conn->link_policy = get_unaligned_le16(sent + 2);
151 
152 	hci_dev_unlock(hdev);
153 }
154 
hci_cc_read_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)155 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156 {
157 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
158 
159 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
160 
161 	if (rp->status)
162 		return;
163 
164 	hdev->link_policy = __le16_to_cpu(rp->policy);
165 }
166 
hci_cc_write_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)167 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
168 {
169 	__u8 status = *((__u8 *) skb->data);
170 	void *sent;
171 
172 	BT_DBG("%s status 0x%x", hdev->name, status);
173 
174 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
175 	if (!sent)
176 		return;
177 
178 	if (!status)
179 		hdev->link_policy = get_unaligned_le16(sent);
180 
181 	hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
182 }
183 
hci_cc_reset(struct hci_dev * hdev,struct sk_buff * skb)184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185 {
186 	__u8 status = *((__u8 *) skb->data);
187 
188 	BT_DBG("%s status 0x%x", hdev->name, status);
189 
190 	clear_bit(HCI_RESET, &hdev->flags);
191 
192 	hci_req_complete(hdev, HCI_OP_RESET, status);
193 
194 	/* Reset all non-persistent flags */
195 	hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
196 
197 	hdev->discovery.state = DISCOVERY_STOPPED;
198 }
199 
hci_cc_write_local_name(struct hci_dev * hdev,struct sk_buff * skb)200 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
201 {
202 	__u8 status = *((__u8 *) skb->data);
203 	void *sent;
204 
205 	BT_DBG("%s status 0x%x", hdev->name, status);
206 
207 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
208 	if (!sent)
209 		return;
210 
211 	hci_dev_lock(hdev);
212 
213 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
214 		mgmt_set_local_name_complete(hdev, sent, status);
215 	else if (!status)
216 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
217 
218 	hci_dev_unlock(hdev);
219 
220 	hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
221 }
222 
hci_cc_read_local_name(struct hci_dev * hdev,struct sk_buff * skb)223 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
224 {
225 	struct hci_rp_read_local_name *rp = (void *) skb->data;
226 
227 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
228 
229 	if (rp->status)
230 		return;
231 
232 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
233 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
234 }
235 
hci_cc_write_auth_enable(struct hci_dev * hdev,struct sk_buff * skb)236 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
237 {
238 	__u8 status = *((__u8 *) skb->data);
239 	void *sent;
240 
241 	BT_DBG("%s status 0x%x", hdev->name, status);
242 
243 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
244 	if (!sent)
245 		return;
246 
247 	if (!status) {
248 		__u8 param = *((__u8 *) sent);
249 
250 		if (param == AUTH_ENABLED)
251 			set_bit(HCI_AUTH, &hdev->flags);
252 		else
253 			clear_bit(HCI_AUTH, &hdev->flags);
254 	}
255 
256 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
257 		mgmt_auth_enable_complete(hdev, status);
258 
259 	hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
260 }
261 
hci_cc_write_encrypt_mode(struct hci_dev * hdev,struct sk_buff * skb)262 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
263 {
264 	__u8 status = *((__u8 *) skb->data);
265 	void *sent;
266 
267 	BT_DBG("%s status 0x%x", hdev->name, status);
268 
269 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
270 	if (!sent)
271 		return;
272 
273 	if (!status) {
274 		__u8 param = *((__u8 *) sent);
275 
276 		if (param)
277 			set_bit(HCI_ENCRYPT, &hdev->flags);
278 		else
279 			clear_bit(HCI_ENCRYPT, &hdev->flags);
280 	}
281 
282 	hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
283 }
284 
hci_cc_write_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)285 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
286 {
287 	__u8 param, status = *((__u8 *) skb->data);
288 	int old_pscan, old_iscan;
289 	void *sent;
290 
291 	BT_DBG("%s status 0x%x", hdev->name, status);
292 
293 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
294 	if (!sent)
295 		return;
296 
297 	param = *((__u8 *) sent);
298 
299 	hci_dev_lock(hdev);
300 
301 	if (status != 0) {
302 		mgmt_write_scan_failed(hdev, param, status);
303 		hdev->discov_timeout = 0;
304 		goto done;
305 	}
306 
307 	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
308 	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
309 
310 	if (param & SCAN_INQUIRY) {
311 		set_bit(HCI_ISCAN, &hdev->flags);
312 		if (!old_iscan)
313 			mgmt_discoverable(hdev, 1);
314 		if (hdev->discov_timeout > 0) {
315 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
316 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
317 									to);
318 		}
319 	} else if (old_iscan)
320 		mgmt_discoverable(hdev, 0);
321 
322 	if (param & SCAN_PAGE) {
323 		set_bit(HCI_PSCAN, &hdev->flags);
324 		if (!old_pscan)
325 			mgmt_connectable(hdev, 1);
326 	} else if (old_pscan)
327 		mgmt_connectable(hdev, 0);
328 
329 done:
330 	hci_dev_unlock(hdev);
331 	hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
332 }
333 
hci_cc_read_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
335 {
336 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
337 
338 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
339 
340 	if (rp->status)
341 		return;
342 
343 	memcpy(hdev->dev_class, rp->dev_class, 3);
344 
345 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 		hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
347 }
348 
hci_cc_write_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350 {
351 	__u8 status = *((__u8 *) skb->data);
352 	void *sent;
353 
354 	BT_DBG("%s status 0x%x", hdev->name, status);
355 
356 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 	if (!sent)
358 		return;
359 
360 	hci_dev_lock(hdev);
361 
362 	if (status == 0)
363 		memcpy(hdev->dev_class, sent, 3);
364 
365 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 		mgmt_set_class_of_dev_complete(hdev, sent, status);
367 
368 	hci_dev_unlock(hdev);
369 }
370 
hci_cc_read_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
372 {
373 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374 	__u16 setting;
375 
376 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
377 
378 	if (rp->status)
379 		return;
380 
381 	setting = __le16_to_cpu(rp->voice_setting);
382 
383 	if (hdev->voice_setting == setting)
384 		return;
385 
386 	hdev->voice_setting = setting;
387 
388 	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
389 
390 	if (hdev->notify)
391 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
392 }
393 
hci_cc_write_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)394 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
395 {
396 	__u8 status = *((__u8 *) skb->data);
397 	__u16 setting;
398 	void *sent;
399 
400 	BT_DBG("%s status 0x%x", hdev->name, status);
401 
402 	if (status)
403 		return;
404 
405 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
406 	if (!sent)
407 		return;
408 
409 	setting = get_unaligned_le16(sent);
410 
411 	if (hdev->voice_setting == setting)
412 		return;
413 
414 	hdev->voice_setting = setting;
415 
416 	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
417 
418 	if (hdev->notify)
419 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
420 }
421 
hci_cc_host_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)422 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
423 {
424 	__u8 status = *((__u8 *) skb->data);
425 
426 	BT_DBG("%s status 0x%x", hdev->name, status);
427 
428 	hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
429 }
430 
hci_cc_write_ssp_mode(struct hci_dev * hdev,struct sk_buff * skb)431 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
432 {
433 	__u8 status = *((__u8 *) skb->data);
434 	void *sent;
435 
436 	BT_DBG("%s status 0x%x", hdev->name, status);
437 
438 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
439 	if (!sent)
440 		return;
441 
442 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
443 		mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
444 	else if (!status) {
445 		if (*((u8 *) sent))
446 			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
447 		else
448 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
449 	}
450 }
451 
hci_get_inquiry_mode(struct hci_dev * hdev)452 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
453 {
454 	if (hdev->features[6] & LMP_EXT_INQ)
455 		return 2;
456 
457 	if (hdev->features[3] & LMP_RSSI_INQ)
458 		return 1;
459 
460 	if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
461 						hdev->lmp_subver == 0x0757)
462 		return 1;
463 
464 	if (hdev->manufacturer == 15) {
465 		if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
466 			return 1;
467 		if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
468 			return 1;
469 		if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
470 			return 1;
471 	}
472 
473 	if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
474 						hdev->lmp_subver == 0x1805)
475 		return 1;
476 
477 	return 0;
478 }
479 
hci_setup_inquiry_mode(struct hci_dev * hdev)480 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
481 {
482 	u8 mode;
483 
484 	mode = hci_get_inquiry_mode(hdev);
485 
486 	hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
487 }
488 
hci_setup_event_mask(struct hci_dev * hdev)489 static void hci_setup_event_mask(struct hci_dev *hdev)
490 {
491 	/* The second byte is 0xff instead of 0x9f (two reserved bits
492 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
493 	 * command otherwise */
494 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
495 
496 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
497 	 * any event mask for pre 1.2 devices */
498 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
499 		return;
500 
501 	events[4] |= 0x01; /* Flow Specification Complete */
502 	events[4] |= 0x02; /* Inquiry Result with RSSI */
503 	events[4] |= 0x04; /* Read Remote Extended Features Complete */
504 	events[5] |= 0x08; /* Synchronous Connection Complete */
505 	events[5] |= 0x10; /* Synchronous Connection Changed */
506 
507 	if (hdev->features[3] & LMP_RSSI_INQ)
508 		events[4] |= 0x04; /* Inquiry Result with RSSI */
509 
510 	if (hdev->features[5] & LMP_SNIFF_SUBR)
511 		events[5] |= 0x20; /* Sniff Subrating */
512 
513 	if (hdev->features[5] & LMP_PAUSE_ENC)
514 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
515 
516 	if (hdev->features[6] & LMP_EXT_INQ)
517 		events[5] |= 0x40; /* Extended Inquiry Result */
518 
519 	if (hdev->features[6] & LMP_NO_FLUSH)
520 		events[7] |= 0x01; /* Enhanced Flush Complete */
521 
522 	if (hdev->features[7] & LMP_LSTO)
523 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
524 
525 	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
526 		events[6] |= 0x01;	/* IO Capability Request */
527 		events[6] |= 0x02;	/* IO Capability Response */
528 		events[6] |= 0x04;	/* User Confirmation Request */
529 		events[6] |= 0x08;	/* User Passkey Request */
530 		events[6] |= 0x10;	/* Remote OOB Data Request */
531 		events[6] |= 0x20;	/* Simple Pairing Complete */
532 		events[7] |= 0x04;	/* User Passkey Notification */
533 		events[7] |= 0x08;	/* Keypress Notification */
534 		events[7] |= 0x10;	/* Remote Host Supported
535 					 * Features Notification */
536 	}
537 
538 	if (hdev->features[4] & LMP_LE)
539 		events[7] |= 0x20;	/* LE Meta-Event */
540 
541 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
542 }
543 
hci_setup(struct hci_dev * hdev)544 static void hci_setup(struct hci_dev *hdev)
545 {
546 	if (hdev->dev_type != HCI_BREDR)
547 		return;
548 
549 	hci_setup_event_mask(hdev);
550 
551 	if (hdev->hci_ver > BLUETOOTH_VER_1_1)
552 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
553 
554 	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
555 		if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
556 			u8 mode = 0x01;
557 			hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
558 				     sizeof(mode), &mode);
559 		} else {
560 			struct hci_cp_write_eir cp;
561 
562 			memset(hdev->eir, 0, sizeof(hdev->eir));
563 			memset(&cp, 0, sizeof(cp));
564 
565 			hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
566 		}
567 	}
568 
569 	if (hdev->features[3] & LMP_RSSI_INQ)
570 		hci_setup_inquiry_mode(hdev);
571 
572 	if (hdev->features[7] & LMP_INQ_TX_PWR)
573 		hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
574 
575 	if (hdev->features[7] & LMP_EXTFEATURES) {
576 		struct hci_cp_read_local_ext_features cp;
577 
578 		cp.page = 0x01;
579 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
580 			     &cp);
581 	}
582 
583 	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
584 		u8 enable = 1;
585 		hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
586 			     &enable);
587 	}
588 }
589 
hci_cc_read_local_version(struct hci_dev * hdev,struct sk_buff * skb)590 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
591 {
592 	struct hci_rp_read_local_version *rp = (void *) skb->data;
593 
594 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
595 
596 	if (rp->status)
597 		goto done;
598 
599 	hdev->hci_ver = rp->hci_ver;
600 	hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
601 	hdev->lmp_ver = rp->lmp_ver;
602 	hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
603 	hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
604 
605 	BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
606 					hdev->manufacturer,
607 					hdev->hci_ver, hdev->hci_rev);
608 
609 	if (test_bit(HCI_INIT, &hdev->flags))
610 		hci_setup(hdev);
611 
612 done:
613 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
614 }
615 
hci_setup_link_policy(struct hci_dev * hdev)616 static void hci_setup_link_policy(struct hci_dev *hdev)
617 {
618 	u16 link_policy = 0;
619 
620 	if (hdev->features[0] & LMP_RSWITCH)
621 		link_policy |= HCI_LP_RSWITCH;
622 	if (hdev->features[0] & LMP_HOLD)
623 		link_policy |= HCI_LP_HOLD;
624 	if (hdev->features[0] & LMP_SNIFF)
625 		link_policy |= HCI_LP_SNIFF;
626 	if (hdev->features[1] & LMP_PARK)
627 		link_policy |= HCI_LP_PARK;
628 
629 	link_policy = cpu_to_le16(link_policy);
630 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
631 		     &link_policy);
632 }
633 
hci_cc_read_local_commands(struct hci_dev * hdev,struct sk_buff * skb)634 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
635 {
636 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
637 
638 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
639 
640 	if (rp->status)
641 		goto done;
642 
643 	memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
644 
645 	if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
646 		hci_setup_link_policy(hdev);
647 
648 done:
649 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
650 }
651 
hci_cc_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)652 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
653 {
654 	struct hci_rp_read_local_features *rp = (void *) skb->data;
655 
656 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
657 
658 	if (rp->status)
659 		return;
660 
661 	memcpy(hdev->features, rp->features, 8);
662 
663 	/* Adjust default settings according to features
664 	 * supported by device. */
665 
666 	if (hdev->features[0] & LMP_3SLOT)
667 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
668 
669 	if (hdev->features[0] & LMP_5SLOT)
670 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
671 
672 	if (hdev->features[1] & LMP_HV2) {
673 		hdev->pkt_type  |= (HCI_HV2);
674 		hdev->esco_type |= (ESCO_HV2);
675 	}
676 
677 	if (hdev->features[1] & LMP_HV3) {
678 		hdev->pkt_type  |= (HCI_HV3);
679 		hdev->esco_type |= (ESCO_HV3);
680 	}
681 
682 	if (hdev->features[3] & LMP_ESCO)
683 		hdev->esco_type |= (ESCO_EV3);
684 
685 	if (hdev->features[4] & LMP_EV4)
686 		hdev->esco_type |= (ESCO_EV4);
687 
688 	if (hdev->features[4] & LMP_EV5)
689 		hdev->esco_type |= (ESCO_EV5);
690 
691 	if (hdev->features[5] & LMP_EDR_ESCO_2M)
692 		hdev->esco_type |= (ESCO_2EV3);
693 
694 	if (hdev->features[5] & LMP_EDR_ESCO_3M)
695 		hdev->esco_type |= (ESCO_3EV3);
696 
697 	if (hdev->features[5] & LMP_EDR_3S_ESCO)
698 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
699 
700 	BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
701 					hdev->features[0], hdev->features[1],
702 					hdev->features[2], hdev->features[3],
703 					hdev->features[4], hdev->features[5],
704 					hdev->features[6], hdev->features[7]);
705 }
706 
hci_set_le_support(struct hci_dev * hdev)707 static void hci_set_le_support(struct hci_dev *hdev)
708 {
709 	struct hci_cp_write_le_host_supported cp;
710 
711 	memset(&cp, 0, sizeof(cp));
712 
713 	if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
714 		cp.le = 1;
715 		cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
716 	}
717 
718 	if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
719 		hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
720 			     &cp);
721 }
722 
hci_cc_read_local_ext_features(struct hci_dev * hdev,struct sk_buff * skb)723 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
724 							struct sk_buff *skb)
725 {
726 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
727 
728 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
729 
730 	if (rp->status)
731 		goto done;
732 
733 	switch (rp->page) {
734 	case 0:
735 		memcpy(hdev->features, rp->features, 8);
736 		break;
737 	case 1:
738 		memcpy(hdev->host_features, rp->features, 8);
739 		break;
740 	}
741 
742 	if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
743 		hci_set_le_support(hdev);
744 
745 done:
746 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
747 }
748 
hci_cc_read_flow_control_mode(struct hci_dev * hdev,struct sk_buff * skb)749 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
750 						struct sk_buff *skb)
751 {
752 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
753 
754 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
755 
756 	if (rp->status)
757 		return;
758 
759 	hdev->flow_ctl_mode = rp->mode;
760 
761 	hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
762 }
763 
hci_cc_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)764 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
765 {
766 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
767 
768 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
769 
770 	if (rp->status)
771 		return;
772 
773 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
774 	hdev->sco_mtu  = rp->sco_mtu;
775 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
776 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
777 
778 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
779 		hdev->sco_mtu  = 64;
780 		hdev->sco_pkts = 8;
781 	}
782 
783 	hdev->acl_cnt = hdev->acl_pkts;
784 	hdev->sco_cnt = hdev->sco_pkts;
785 
786 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
787 					hdev->acl_mtu, hdev->acl_pkts,
788 					hdev->sco_mtu, hdev->sco_pkts);
789 }
790 
hci_cc_read_bd_addr(struct hci_dev * hdev,struct sk_buff * skb)791 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
792 {
793 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
794 
795 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
796 
797 	if (!rp->status)
798 		bacpy(&hdev->bdaddr, &rp->bdaddr);
799 
800 	hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
801 }
802 
hci_cc_read_data_block_size(struct hci_dev * hdev,struct sk_buff * skb)803 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
804 							struct sk_buff *skb)
805 {
806 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
807 
808 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
809 
810 	if (rp->status)
811 		return;
812 
813 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
814 	hdev->block_len = __le16_to_cpu(rp->block_len);
815 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
816 
817 	hdev->block_cnt = hdev->num_blocks;
818 
819 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
820 					hdev->block_cnt, hdev->block_len);
821 
822 	hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
823 }
824 
hci_cc_write_ca_timeout(struct hci_dev * hdev,struct sk_buff * skb)825 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
826 {
827 	__u8 status = *((__u8 *) skb->data);
828 
829 	BT_DBG("%s status 0x%x", hdev->name, status);
830 
831 	hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
832 }
833 
hci_cc_read_local_amp_info(struct hci_dev * hdev,struct sk_buff * skb)834 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
835 		struct sk_buff *skb)
836 {
837 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
838 
839 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
840 
841 	if (rp->status)
842 		return;
843 
844 	hdev->amp_status = rp->amp_status;
845 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
846 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
847 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
848 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
849 	hdev->amp_type = rp->amp_type;
850 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
851 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
852 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
853 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
854 
855 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
856 }
857 
hci_cc_delete_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)858 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
859 							struct sk_buff *skb)
860 {
861 	__u8 status = *((__u8 *) skb->data);
862 
863 	BT_DBG("%s status 0x%x", hdev->name, status);
864 
865 	hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
866 }
867 
hci_cc_set_event_mask(struct hci_dev * hdev,struct sk_buff * skb)868 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
869 {
870 	__u8 status = *((__u8 *) skb->data);
871 
872 	BT_DBG("%s status 0x%x", hdev->name, status);
873 
874 	hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
875 }
876 
hci_cc_write_inquiry_mode(struct hci_dev * hdev,struct sk_buff * skb)877 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
878 							struct sk_buff *skb)
879 {
880 	__u8 status = *((__u8 *) skb->data);
881 
882 	BT_DBG("%s status 0x%x", hdev->name, status);
883 
884 	hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
885 }
886 
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,struct sk_buff * skb)887 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
888 							struct sk_buff *skb)
889 {
890 	__u8 status = *((__u8 *) skb->data);
891 
892 	BT_DBG("%s status 0x%x", hdev->name, status);
893 
894 	hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
895 }
896 
hci_cc_set_event_flt(struct hci_dev * hdev,struct sk_buff * skb)897 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
898 {
899 	__u8 status = *((__u8 *) skb->data);
900 
901 	BT_DBG("%s status 0x%x", hdev->name, status);
902 
903 	hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
904 }
905 
hci_cc_pin_code_reply(struct hci_dev * hdev,struct sk_buff * skb)906 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
907 {
908 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
909 	struct hci_cp_pin_code_reply *cp;
910 	struct hci_conn *conn;
911 
912 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
913 
914 	hci_dev_lock(hdev);
915 
916 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
917 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
918 
919 	if (rp->status != 0)
920 		goto unlock;
921 
922 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
923 	if (!cp)
924 		goto unlock;
925 
926 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
927 	if (conn)
928 		conn->pin_length = cp->pin_len;
929 
930 unlock:
931 	hci_dev_unlock(hdev);
932 }
933 
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)934 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
935 {
936 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
937 
938 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
939 
940 	hci_dev_lock(hdev);
941 
942 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
943 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
944 								rp->status);
945 
946 	hci_dev_unlock(hdev);
947 }
948 
hci_cc_le_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)949 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
950 				       struct sk_buff *skb)
951 {
952 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
953 
954 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
955 
956 	if (rp->status)
957 		return;
958 
959 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
960 	hdev->le_pkts = rp->le_max_pkt;
961 
962 	hdev->le_cnt = hdev->le_pkts;
963 
964 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
965 
966 	hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
967 }
968 
hci_cc_user_confirm_reply(struct hci_dev * hdev,struct sk_buff * skb)969 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
970 {
971 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
972 
973 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
974 
975 	hci_dev_lock(hdev);
976 
977 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
978 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
979 						 rp->status);
980 
981 	hci_dev_unlock(hdev);
982 }
983 
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)984 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
985 							struct sk_buff *skb)
986 {
987 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
988 
989 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
990 
991 	hci_dev_lock(hdev);
992 
993 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
994 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
995 						     ACL_LINK, 0, rp->status);
996 
997 	hci_dev_unlock(hdev);
998 }
999 
hci_cc_user_passkey_reply(struct hci_dev * hdev,struct sk_buff * skb)1000 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1001 {
1002 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1003 
1004 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1005 
1006 	hci_dev_lock(hdev);
1007 
1008 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1009 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1010 						 0, rp->status);
1011 
1012 	hci_dev_unlock(hdev);
1013 }
1014 
hci_cc_user_passkey_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1015 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1016 							struct sk_buff *skb)
1017 {
1018 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1019 
1020 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1021 
1022 	hci_dev_lock(hdev);
1023 
1024 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1025 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1026 						     ACL_LINK, 0, rp->status);
1027 
1028 	hci_dev_unlock(hdev);
1029 }
1030 
hci_cc_read_local_oob_data_reply(struct hci_dev * hdev,struct sk_buff * skb)1031 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1032 							struct sk_buff *skb)
1033 {
1034 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1035 
1036 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1037 
1038 	hci_dev_lock(hdev);
1039 	mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1040 						rp->randomizer, rp->status);
1041 	hci_dev_unlock(hdev);
1042 }
1043 
hci_cc_le_set_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1044 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1045 {
1046 	__u8 status = *((__u8 *) skb->data);
1047 
1048 	BT_DBG("%s status 0x%x", hdev->name, status);
1049 
1050 	hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1051 
1052 	if (status) {
1053 		hci_dev_lock(hdev);
1054 		mgmt_start_discovery_failed(hdev, status);
1055 		hci_dev_unlock(hdev);
1056 		return;
1057 	}
1058 }
1059 
hci_cc_le_set_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1060 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1061 					struct sk_buff *skb)
1062 {
1063 	struct hci_cp_le_set_scan_enable *cp;
1064 	__u8 status = *((__u8 *) skb->data);
1065 
1066 	BT_DBG("%s status 0x%x", hdev->name, status);
1067 
1068 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1069 	if (!cp)
1070 		return;
1071 
1072 	switch (cp->enable) {
1073 	case LE_SCANNING_ENABLED:
1074 		hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1075 
1076 		if (status) {
1077 			hci_dev_lock(hdev);
1078 			mgmt_start_discovery_failed(hdev, status);
1079 			hci_dev_unlock(hdev);
1080 			return;
1081 		}
1082 
1083 		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1084 
1085 		cancel_delayed_work_sync(&hdev->adv_work);
1086 
1087 		hci_dev_lock(hdev);
1088 		hci_adv_entries_clear(hdev);
1089 		hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1090 		hci_dev_unlock(hdev);
1091 		break;
1092 
1093 	case LE_SCANNING_DISABLED:
1094 		if (status)
1095 			return;
1096 
1097 		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1098 
1099 		schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1100 
1101 		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
1102 			mgmt_interleaved_discovery(hdev);
1103 		} else {
1104 			hci_dev_lock(hdev);
1105 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1106 			hci_dev_unlock(hdev);
1107 		}
1108 
1109 		break;
1110 
1111 	default:
1112 		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1113 		break;
1114 	}
1115 }
1116 
hci_cc_le_ltk_reply(struct hci_dev * hdev,struct sk_buff * skb)1117 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1118 {
1119 	struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1120 
1121 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1122 
1123 	if (rp->status)
1124 		return;
1125 
1126 	hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1127 }
1128 
hci_cc_le_ltk_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1129 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1130 {
1131 	struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1132 
1133 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1134 
1135 	if (rp->status)
1136 		return;
1137 
1138 	hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1139 }
1140 
hci_cc_write_le_host_supported(struct hci_dev * hdev,struct sk_buff * skb)1141 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1142 							struct sk_buff *skb)
1143 {
1144 	struct hci_cp_write_le_host_supported *sent;
1145 	__u8 status = *((__u8 *) skb->data);
1146 
1147 	BT_DBG("%s status 0x%x", hdev->name, status);
1148 
1149 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1150 	if (!sent)
1151 		return;
1152 
1153 	if (!status) {
1154 		if (sent->le)
1155 			hdev->host_features[0] |= LMP_HOST_LE;
1156 		else
1157 			hdev->host_features[0] &= ~LMP_HOST_LE;
1158 	}
1159 
1160 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1161 					!test_bit(HCI_INIT, &hdev->flags))
1162 		mgmt_le_enable_complete(hdev, sent->le, status);
1163 
1164 	hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1165 }
1166 
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)1167 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1168 {
1169 	BT_DBG("%s status 0x%x", hdev->name, status);
1170 
1171 	if (status) {
1172 		hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1173 		hci_conn_check_pending(hdev);
1174 		hci_dev_lock(hdev);
1175 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
1176 			mgmt_start_discovery_failed(hdev, status);
1177 		hci_dev_unlock(hdev);
1178 		return;
1179 	}
1180 
1181 	set_bit(HCI_INQUIRY, &hdev->flags);
1182 
1183 	hci_dev_lock(hdev);
1184 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1185 	hci_dev_unlock(hdev);
1186 }
1187 
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)1188 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1189 {
1190 	struct hci_cp_create_conn *cp;
1191 	struct hci_conn *conn;
1192 
1193 	BT_DBG("%s status 0x%x", hdev->name, status);
1194 
1195 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1196 	if (!cp)
1197 		return;
1198 
1199 	hci_dev_lock(hdev);
1200 
1201 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1202 
1203 	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1204 
1205 	if (status) {
1206 		if (conn && conn->state == BT_CONNECT) {
1207 			if (status != 0x0c || conn->attempt > 2) {
1208 				conn->state = BT_CLOSED;
1209 				hci_proto_connect_cfm(conn, status);
1210 				hci_conn_del(conn);
1211 			} else
1212 				conn->state = BT_CONNECT2;
1213 		}
1214 	} else {
1215 		if (!conn) {
1216 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1217 			if (conn) {
1218 				conn->out = true;
1219 				conn->link_mode |= HCI_LM_MASTER;
1220 			} else
1221 				BT_ERR("No memory for new connection");
1222 		}
1223 	}
1224 
1225 	hci_dev_unlock(hdev);
1226 }
1227 
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)1228 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1229 {
1230 	struct hci_cp_add_sco *cp;
1231 	struct hci_conn *acl, *sco;
1232 	__u16 handle;
1233 
1234 	BT_DBG("%s status 0x%x", hdev->name, status);
1235 
1236 	if (!status)
1237 		return;
1238 
1239 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1240 	if (!cp)
1241 		return;
1242 
1243 	handle = __le16_to_cpu(cp->handle);
1244 
1245 	BT_DBG("%s handle %d", hdev->name, handle);
1246 
1247 	hci_dev_lock(hdev);
1248 
1249 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1250 	if (acl) {
1251 		sco = acl->link;
1252 		if (sco) {
1253 			sco->state = BT_CLOSED;
1254 
1255 			hci_proto_connect_cfm(sco, status);
1256 			hci_conn_del(sco);
1257 		}
1258 	}
1259 
1260 	hci_dev_unlock(hdev);
1261 }
1262 
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)1263 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1264 {
1265 	struct hci_cp_auth_requested *cp;
1266 	struct hci_conn *conn;
1267 
1268 	BT_DBG("%s status 0x%x", hdev->name, status);
1269 
1270 	if (!status)
1271 		return;
1272 
1273 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1274 	if (!cp)
1275 		return;
1276 
1277 	hci_dev_lock(hdev);
1278 
1279 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1280 	if (conn) {
1281 		if (conn->state == BT_CONFIG) {
1282 			hci_proto_connect_cfm(conn, status);
1283 			hci_conn_put(conn);
1284 		}
1285 	}
1286 
1287 	hci_dev_unlock(hdev);
1288 }
1289 
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)1290 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1291 {
1292 	struct hci_cp_set_conn_encrypt *cp;
1293 	struct hci_conn *conn;
1294 
1295 	BT_DBG("%s status 0x%x", hdev->name, status);
1296 
1297 	if (!status)
1298 		return;
1299 
1300 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1301 	if (!cp)
1302 		return;
1303 
1304 	hci_dev_lock(hdev);
1305 
1306 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1307 	if (conn) {
1308 		if (conn->state == BT_CONFIG) {
1309 			hci_proto_connect_cfm(conn, status);
1310 			hci_conn_put(conn);
1311 		}
1312 	}
1313 
1314 	hci_dev_unlock(hdev);
1315 }
1316 
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)1317 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1318 							struct hci_conn *conn)
1319 {
1320 	if (conn->state != BT_CONFIG || !conn->out)
1321 		return 0;
1322 
1323 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1324 		return 0;
1325 
1326 	/* Only request authentication for SSP connections or non-SSP
1327 	 * devices with sec_level HIGH or if MITM protection is requested */
1328 	if (!hci_conn_ssp_enabled(conn) &&
1329 				conn->pending_sec_level != BT_SECURITY_HIGH &&
1330 				!(conn->auth_type & 0x01))
1331 		return 0;
1332 
1333 	return 1;
1334 }
1335 
hci_resolve_name(struct hci_dev * hdev,struct inquiry_entry * e)1336 static inline int hci_resolve_name(struct hci_dev *hdev,
1337 				   struct inquiry_entry *e)
1338 {
1339 	struct hci_cp_remote_name_req cp;
1340 
1341 	memset(&cp, 0, sizeof(cp));
1342 
1343 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1344 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1345 	cp.pscan_mode = e->data.pscan_mode;
1346 	cp.clock_offset = e->data.clock_offset;
1347 
1348 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1349 }
1350 
hci_resolve_next_name(struct hci_dev * hdev)1351 static bool hci_resolve_next_name(struct hci_dev *hdev)
1352 {
1353 	struct discovery_state *discov = &hdev->discovery;
1354 	struct inquiry_entry *e;
1355 
1356 	if (list_empty(&discov->resolve))
1357 		return false;
1358 
1359 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1360 	if (!e)
1361 		return false;
1362 
1363 	if (hci_resolve_name(hdev, e) == 0) {
1364 		e->name_state = NAME_PENDING;
1365 		return true;
1366 	}
1367 
1368 	return false;
1369 }
1370 
hci_check_pending_name(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * name,u8 name_len)1371 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1372 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1373 {
1374 	struct discovery_state *discov = &hdev->discovery;
1375 	struct inquiry_entry *e;
1376 
1377 	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1378 		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1379 				      name_len, conn->dev_class);
1380 
1381 	if (discov->state == DISCOVERY_STOPPED)
1382 		return;
1383 
1384 	if (discov->state == DISCOVERY_STOPPING)
1385 		goto discov_complete;
1386 
1387 	if (discov->state != DISCOVERY_RESOLVING)
1388 		return;
1389 
1390 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1391 	/* If the device was not found in a list of found devices names of which
1392 	 * are pending. there is no need to continue resolving a next name as it
1393 	 * will be done upon receiving another Remote Name Request Complete
1394 	 * Event */
1395 	if (!e)
1396 		return;
1397 
1398 	list_del(&e->list);
1399 	if (name) {
1400 		e->name_state = NAME_KNOWN;
1401 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1402 				 e->data.rssi, name, name_len);
1403 	} else {
1404 		e->name_state = NAME_NOT_KNOWN;
1405 	}
1406 
1407 	if (hci_resolve_next_name(hdev))
1408 		return;
1409 
1410 discov_complete:
1411 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1412 }
1413 
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)1414 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1415 {
1416 	struct hci_cp_remote_name_req *cp;
1417 	struct hci_conn *conn;
1418 
1419 	BT_DBG("%s status 0x%x", hdev->name, status);
1420 
1421 	/* If successful wait for the name req complete event before
1422 	 * checking for the need to do authentication */
1423 	if (!status)
1424 		return;
1425 
1426 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1427 	if (!cp)
1428 		return;
1429 
1430 	hci_dev_lock(hdev);
1431 
1432 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1433 
1434 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1435 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1436 
1437 	if (!conn)
1438 		goto unlock;
1439 
1440 	if (!hci_outgoing_auth_needed(hdev, conn))
1441 		goto unlock;
1442 
1443 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1444 		struct hci_cp_auth_requested cp;
1445 		cp.handle = __cpu_to_le16(conn->handle);
1446 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1447 	}
1448 
1449 unlock:
1450 	hci_dev_unlock(hdev);
1451 }
1452 
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)1453 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1454 {
1455 	struct hci_cp_read_remote_features *cp;
1456 	struct hci_conn *conn;
1457 
1458 	BT_DBG("%s status 0x%x", hdev->name, status);
1459 
1460 	if (!status)
1461 		return;
1462 
1463 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1464 	if (!cp)
1465 		return;
1466 
1467 	hci_dev_lock(hdev);
1468 
1469 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1470 	if (conn) {
1471 		if (conn->state == BT_CONFIG) {
1472 			hci_proto_connect_cfm(conn, status);
1473 			hci_conn_put(conn);
1474 		}
1475 	}
1476 
1477 	hci_dev_unlock(hdev);
1478 }
1479 
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)1480 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1481 {
1482 	struct hci_cp_read_remote_ext_features *cp;
1483 	struct hci_conn *conn;
1484 
1485 	BT_DBG("%s status 0x%x", hdev->name, status);
1486 
1487 	if (!status)
1488 		return;
1489 
1490 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1491 	if (!cp)
1492 		return;
1493 
1494 	hci_dev_lock(hdev);
1495 
1496 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1497 	if (conn) {
1498 		if (conn->state == BT_CONFIG) {
1499 			hci_proto_connect_cfm(conn, status);
1500 			hci_conn_put(conn);
1501 		}
1502 	}
1503 
1504 	hci_dev_unlock(hdev);
1505 }
1506 
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)1507 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1508 {
1509 	struct hci_cp_setup_sync_conn *cp;
1510 	struct hci_conn *acl, *sco;
1511 	__u16 handle;
1512 
1513 	BT_DBG("%s status 0x%x", hdev->name, status);
1514 
1515 	if (!status)
1516 		return;
1517 
1518 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1519 	if (!cp)
1520 		return;
1521 
1522 	handle = __le16_to_cpu(cp->handle);
1523 
1524 	BT_DBG("%s handle %d", hdev->name, handle);
1525 
1526 	hci_dev_lock(hdev);
1527 
1528 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1529 	if (acl) {
1530 		sco = acl->link;
1531 		if (sco) {
1532 			sco->state = BT_CLOSED;
1533 
1534 			hci_proto_connect_cfm(sco, status);
1535 			hci_conn_del(sco);
1536 		}
1537 	}
1538 
1539 	hci_dev_unlock(hdev);
1540 }
1541 
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)1542 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1543 {
1544 	struct hci_cp_sniff_mode *cp;
1545 	struct hci_conn *conn;
1546 
1547 	BT_DBG("%s status 0x%x", hdev->name, status);
1548 
1549 	if (!status)
1550 		return;
1551 
1552 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1553 	if (!cp)
1554 		return;
1555 
1556 	hci_dev_lock(hdev);
1557 
1558 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1559 	if (conn) {
1560 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1561 
1562 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1563 			hci_sco_setup(conn, status);
1564 	}
1565 
1566 	hci_dev_unlock(hdev);
1567 }
1568 
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)1569 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1570 {
1571 	struct hci_cp_exit_sniff_mode *cp;
1572 	struct hci_conn *conn;
1573 
1574 	BT_DBG("%s status 0x%x", hdev->name, status);
1575 
1576 	if (!status)
1577 		return;
1578 
1579 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1580 	if (!cp)
1581 		return;
1582 
1583 	hci_dev_lock(hdev);
1584 
1585 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1586 	if (conn) {
1587 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1588 
1589 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1590 			hci_sco_setup(conn, status);
1591 	}
1592 
1593 	hci_dev_unlock(hdev);
1594 }
1595 
hci_cs_disconnect(struct hci_dev * hdev,u8 status)1596 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1597 {
1598 	struct hci_cp_disconnect *cp;
1599 	struct hci_conn *conn;
1600 
1601 	if (!status)
1602 		return;
1603 
1604 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1605 	if (!cp)
1606 		return;
1607 
1608 	hci_dev_lock(hdev);
1609 
1610 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1611 	if (conn)
1612 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1613 				       conn->dst_type, status);
1614 
1615 	hci_dev_unlock(hdev);
1616 }
1617 
hci_cs_le_create_conn(struct hci_dev * hdev,__u8 status)1618 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1619 {
1620 	struct hci_cp_le_create_conn *cp;
1621 	struct hci_conn *conn;
1622 
1623 	BT_DBG("%s status 0x%x", hdev->name, status);
1624 
1625 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1626 	if (!cp)
1627 		return;
1628 
1629 	hci_dev_lock(hdev);
1630 
1631 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1632 
1633 	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1634 		conn);
1635 
1636 	if (status) {
1637 		if (conn && conn->state == BT_CONNECT) {
1638 			conn->state = BT_CLOSED;
1639 			hci_proto_connect_cfm(conn, status);
1640 			hci_conn_del(conn);
1641 		}
1642 	} else {
1643 		if (!conn) {
1644 			conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1645 			if (conn) {
1646 				conn->dst_type = cp->peer_addr_type;
1647 				conn->out = true;
1648 			} else {
1649 				BT_ERR("No memory for new connection");
1650 			}
1651 		}
1652 	}
1653 
1654 	hci_dev_unlock(hdev);
1655 }
1656 
hci_cs_le_start_enc(struct hci_dev * hdev,u8 status)1657 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1658 {
1659 	BT_DBG("%s status 0x%x", hdev->name, status);
1660 }
1661 
hci_inquiry_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1662 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1663 {
1664 	__u8 status = *((__u8 *) skb->data);
1665 	struct discovery_state *discov = &hdev->discovery;
1666 	struct inquiry_entry *e;
1667 
1668 	BT_DBG("%s status %d", hdev->name, status);
1669 
1670 	hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1671 
1672 	hci_conn_check_pending(hdev);
1673 
1674 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1675 		return;
1676 
1677 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1678 		return;
1679 
1680 	hci_dev_lock(hdev);
1681 
1682 	if (discov->state != DISCOVERY_FINDING)
1683 		goto unlock;
1684 
1685 	if (list_empty(&discov->resolve)) {
1686 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1687 		goto unlock;
1688 	}
1689 
1690 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1691 	if (e && hci_resolve_name(hdev, e) == 0) {
1692 		e->name_state = NAME_PENDING;
1693 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1694 	} else {
1695 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1696 	}
1697 
1698 unlock:
1699 	hci_dev_unlock(hdev);
1700 }
1701 
hci_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)1702 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1703 {
1704 	struct inquiry_data data;
1705 	struct inquiry_info *info = (void *) (skb->data + 1);
1706 	int num_rsp = *((__u8 *) skb->data);
1707 
1708 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1709 
1710 	if (!num_rsp)
1711 		return;
1712 
1713 	hci_dev_lock(hdev);
1714 
1715 	for (; num_rsp; num_rsp--, info++) {
1716 		bool name_known, ssp;
1717 
1718 		bacpy(&data.bdaddr, &info->bdaddr);
1719 		data.pscan_rep_mode	= info->pscan_rep_mode;
1720 		data.pscan_period_mode	= info->pscan_period_mode;
1721 		data.pscan_mode		= info->pscan_mode;
1722 		memcpy(data.dev_class, info->dev_class, 3);
1723 		data.clock_offset	= info->clock_offset;
1724 		data.rssi		= 0x00;
1725 		data.ssp_mode		= 0x00;
1726 
1727 		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1728 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1729 				  info->dev_class, 0, !name_known, ssp, NULL,
1730 				  0);
1731 	}
1732 
1733 	hci_dev_unlock(hdev);
1734 }
1735 
hci_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1736 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1737 {
1738 	struct hci_ev_conn_complete *ev = (void *) skb->data;
1739 	struct hci_conn *conn;
1740 
1741 	BT_DBG("%s", hdev->name);
1742 
1743 	hci_dev_lock(hdev);
1744 
1745 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1746 	if (!conn) {
1747 		if (ev->link_type != SCO_LINK)
1748 			goto unlock;
1749 
1750 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1751 		if (!conn)
1752 			goto unlock;
1753 
1754 		conn->type = SCO_LINK;
1755 	}
1756 
1757 	if (!ev->status) {
1758 		conn->handle = __le16_to_cpu(ev->handle);
1759 
1760 		if (conn->type == ACL_LINK) {
1761 			conn->state = BT_CONFIG;
1762 			hci_conn_hold(conn);
1763 
1764 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1765 			    !hci_find_link_key(hdev, &ev->bdaddr))
1766 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1767 			else
1768 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1769 		} else
1770 			conn->state = BT_CONNECTED;
1771 
1772 		hci_conn_hold_device(conn);
1773 		hci_conn_add_sysfs(conn);
1774 
1775 		if (test_bit(HCI_AUTH, &hdev->flags))
1776 			conn->link_mode |= HCI_LM_AUTH;
1777 
1778 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1779 			conn->link_mode |= HCI_LM_ENCRYPT;
1780 
1781 		/* Get remote features */
1782 		if (conn->type == ACL_LINK) {
1783 			struct hci_cp_read_remote_features cp;
1784 			cp.handle = ev->handle;
1785 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1786 				     sizeof(cp), &cp);
1787 		}
1788 
1789 		/* Set packet type for incoming connection */
1790 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1791 			struct hci_cp_change_conn_ptype cp;
1792 			cp.handle = ev->handle;
1793 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1794 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1795 				     &cp);
1796 		}
1797 	} else {
1798 		conn->state = BT_CLOSED;
1799 		if (conn->type == ACL_LINK)
1800 			mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1801 					    conn->dst_type, ev->status);
1802 	}
1803 
1804 	if (conn->type == ACL_LINK)
1805 		hci_sco_setup(conn, ev->status);
1806 
1807 	if (ev->status) {
1808 		hci_proto_connect_cfm(conn, ev->status);
1809 		hci_conn_del(conn);
1810 	} else if (ev->link_type != ACL_LINK)
1811 		hci_proto_connect_cfm(conn, ev->status);
1812 
1813 unlock:
1814 	hci_dev_unlock(hdev);
1815 
1816 	hci_conn_check_pending(hdev);
1817 }
1818 
hci_conn_request_evt(struct hci_dev * hdev,struct sk_buff * skb)1819 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1820 {
1821 	struct hci_ev_conn_request *ev = (void *) skb->data;
1822 	int mask = hdev->link_mode;
1823 
1824 	BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1825 					batostr(&ev->bdaddr), ev->link_type);
1826 
1827 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1828 
1829 	if ((mask & HCI_LM_ACCEPT) &&
1830 			!hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1831 		/* Connection accepted */
1832 		struct inquiry_entry *ie;
1833 		struct hci_conn *conn;
1834 
1835 		hci_dev_lock(hdev);
1836 
1837 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1838 		if (ie)
1839 			memcpy(ie->data.dev_class, ev->dev_class, 3);
1840 
1841 		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1842 		if (!conn) {
1843 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1844 			if (!conn) {
1845 				BT_ERR("No memory for new connection");
1846 				hci_dev_unlock(hdev);
1847 				return;
1848 			}
1849 		}
1850 
1851 		memcpy(conn->dev_class, ev->dev_class, 3);
1852 		conn->state = BT_CONNECT;
1853 
1854 		hci_dev_unlock(hdev);
1855 
1856 		if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1857 			struct hci_cp_accept_conn_req cp;
1858 
1859 			bacpy(&cp.bdaddr, &ev->bdaddr);
1860 
1861 			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1862 				cp.role = 0x00; /* Become master */
1863 			else
1864 				cp.role = 0x01; /* Remain slave */
1865 
1866 			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1867 				     &cp);
1868 		} else {
1869 			struct hci_cp_accept_sync_conn_req cp;
1870 
1871 			bacpy(&cp.bdaddr, &ev->bdaddr);
1872 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1873 
1874 			cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
1875 			cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
1876 			cp.max_latency    = cpu_to_le16(0xffff);
1877 			cp.content_format = cpu_to_le16(hdev->voice_setting);
1878 			cp.retrans_effort = 0xff;
1879 
1880 			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1881 				     sizeof(cp), &cp);
1882 		}
1883 	} else {
1884 		/* Connection rejected */
1885 		struct hci_cp_reject_conn_req cp;
1886 
1887 		bacpy(&cp.bdaddr, &ev->bdaddr);
1888 		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1889 		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1890 	}
1891 }
1892 
hci_disconn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1893 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1894 {
1895 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
1896 	struct hci_conn *conn;
1897 
1898 	BT_DBG("%s status %d", hdev->name, ev->status);
1899 
1900 	hci_dev_lock(hdev);
1901 
1902 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1903 	if (!conn)
1904 		goto unlock;
1905 
1906 	if (ev->status == 0)
1907 		conn->state = BT_CLOSED;
1908 
1909 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1910 			(conn->type == ACL_LINK || conn->type == LE_LINK)) {
1911 		if (ev->status != 0)
1912 			mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1913 						conn->dst_type, ev->status);
1914 		else
1915 			mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1916 						 conn->dst_type);
1917 	}
1918 
1919 	if (ev->status == 0) {
1920 		if (conn->type == ACL_LINK && conn->flush_key)
1921 			hci_remove_link_key(hdev, &conn->dst);
1922 		hci_proto_disconn_cfm(conn, ev->reason);
1923 		hci_conn_del(conn);
1924 	}
1925 
1926 unlock:
1927 	hci_dev_unlock(hdev);
1928 }
1929 
hci_auth_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1930 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1931 {
1932 	struct hci_ev_auth_complete *ev = (void *) skb->data;
1933 	struct hci_conn *conn;
1934 
1935 	BT_DBG("%s status %d", hdev->name, ev->status);
1936 
1937 	hci_dev_lock(hdev);
1938 
1939 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1940 	if (!conn)
1941 		goto unlock;
1942 
1943 	if (!ev->status) {
1944 		if (!hci_conn_ssp_enabled(conn) &&
1945 				test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1946 			BT_INFO("re-auth of legacy device is not possible.");
1947 		} else {
1948 			conn->link_mode |= HCI_LM_AUTH;
1949 			conn->sec_level = conn->pending_sec_level;
1950 		}
1951 	} else {
1952 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1953 				 ev->status);
1954 	}
1955 
1956 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1957 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1958 
1959 	if (conn->state == BT_CONFIG) {
1960 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
1961 			struct hci_cp_set_conn_encrypt cp;
1962 			cp.handle  = ev->handle;
1963 			cp.encrypt = 0x01;
1964 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1965 									&cp);
1966 		} else {
1967 			conn->state = BT_CONNECTED;
1968 			hci_proto_connect_cfm(conn, ev->status);
1969 			hci_conn_put(conn);
1970 		}
1971 	} else {
1972 		hci_auth_cfm(conn, ev->status);
1973 
1974 		hci_conn_hold(conn);
1975 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1976 		hci_conn_put(conn);
1977 	}
1978 
1979 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1980 		if (!ev->status) {
1981 			struct hci_cp_set_conn_encrypt cp;
1982 			cp.handle  = ev->handle;
1983 			cp.encrypt = 0x01;
1984 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1985 									&cp);
1986 		} else {
1987 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1988 			hci_encrypt_cfm(conn, ev->status, 0x00);
1989 		}
1990 	}
1991 
1992 unlock:
1993 	hci_dev_unlock(hdev);
1994 }
1995 
hci_remote_name_evt(struct hci_dev * hdev,struct sk_buff * skb)1996 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1997 {
1998 	struct hci_ev_remote_name *ev = (void *) skb->data;
1999 	struct hci_conn *conn;
2000 
2001 	BT_DBG("%s", hdev->name);
2002 
2003 	hci_conn_check_pending(hdev);
2004 
2005 	hci_dev_lock(hdev);
2006 
2007 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2008 
2009 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2010 		goto check_auth;
2011 
2012 	if (ev->status == 0)
2013 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2014 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2015 	else
2016 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2017 
2018 check_auth:
2019 	if (!conn)
2020 		goto unlock;
2021 
2022 	if (!hci_outgoing_auth_needed(hdev, conn))
2023 		goto unlock;
2024 
2025 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2026 		struct hci_cp_auth_requested cp;
2027 		cp.handle = __cpu_to_le16(conn->handle);
2028 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2029 	}
2030 
2031 unlock:
2032 	hci_dev_unlock(hdev);
2033 }
2034 
hci_encrypt_change_evt(struct hci_dev * hdev,struct sk_buff * skb)2035 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2036 {
2037 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2038 	struct hci_conn *conn;
2039 
2040 	BT_DBG("%s status %d", hdev->name, ev->status);
2041 
2042 	hci_dev_lock(hdev);
2043 
2044 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2045 	if (conn) {
2046 		if (!ev->status) {
2047 			if (ev->encrypt) {
2048 				/* Encryption implies authentication */
2049 				conn->link_mode |= HCI_LM_AUTH;
2050 				conn->link_mode |= HCI_LM_ENCRYPT;
2051 				conn->sec_level = conn->pending_sec_level;
2052 			} else
2053 				conn->link_mode &= ~HCI_LM_ENCRYPT;
2054 		}
2055 
2056 		clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2057 
2058 		if (ev->status && conn->state == BT_CONNECTED) {
2059 			hci_acl_disconn(conn, 0x13);
2060 			hci_conn_put(conn);
2061 			goto unlock;
2062 		}
2063 
2064 		if (conn->state == BT_CONFIG) {
2065 			if (!ev->status)
2066 				conn->state = BT_CONNECTED;
2067 
2068 			hci_proto_connect_cfm(conn, ev->status);
2069 			hci_conn_put(conn);
2070 		} else
2071 			hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2072 	}
2073 
2074 unlock:
2075 	hci_dev_unlock(hdev);
2076 }
2077 
hci_change_link_key_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2078 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2079 {
2080 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2081 	struct hci_conn *conn;
2082 
2083 	BT_DBG("%s status %d", hdev->name, ev->status);
2084 
2085 	hci_dev_lock(hdev);
2086 
2087 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2088 	if (conn) {
2089 		if (!ev->status)
2090 			conn->link_mode |= HCI_LM_SECURE;
2091 
2092 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2093 
2094 		hci_key_change_cfm(conn, ev->status);
2095 	}
2096 
2097 	hci_dev_unlock(hdev);
2098 }
2099 
hci_remote_features_evt(struct hci_dev * hdev,struct sk_buff * skb)2100 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2101 {
2102 	struct hci_ev_remote_features *ev = (void *) skb->data;
2103 	struct hci_conn *conn;
2104 
2105 	BT_DBG("%s status %d", hdev->name, ev->status);
2106 
2107 	hci_dev_lock(hdev);
2108 
2109 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2110 	if (!conn)
2111 		goto unlock;
2112 
2113 	if (!ev->status)
2114 		memcpy(conn->features, ev->features, 8);
2115 
2116 	if (conn->state != BT_CONFIG)
2117 		goto unlock;
2118 
2119 	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2120 		struct hci_cp_read_remote_ext_features cp;
2121 		cp.handle = ev->handle;
2122 		cp.page = 0x01;
2123 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2124 							sizeof(cp), &cp);
2125 		goto unlock;
2126 	}
2127 
2128 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2129 		struct hci_cp_remote_name_req cp;
2130 		memset(&cp, 0, sizeof(cp));
2131 		bacpy(&cp.bdaddr, &conn->dst);
2132 		cp.pscan_rep_mode = 0x02;
2133 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2134 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2135 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2136 				      conn->dst_type, 0, NULL, 0,
2137 				      conn->dev_class);
2138 
2139 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2140 		conn->state = BT_CONNECTED;
2141 		hci_proto_connect_cfm(conn, ev->status);
2142 		hci_conn_put(conn);
2143 	}
2144 
2145 unlock:
2146 	hci_dev_unlock(hdev);
2147 }
2148 
hci_remote_version_evt(struct hci_dev * hdev,struct sk_buff * skb)2149 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2150 {
2151 	BT_DBG("%s", hdev->name);
2152 }
2153 
hci_qos_setup_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2154 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2155 {
2156 	BT_DBG("%s", hdev->name);
2157 }
2158 
hci_cmd_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2159 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2160 {
2161 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2162 	__u16 opcode;
2163 
2164 	skb_pull(skb, sizeof(*ev));
2165 
2166 	opcode = __le16_to_cpu(ev->opcode);
2167 
2168 	switch (opcode) {
2169 	case HCI_OP_INQUIRY_CANCEL:
2170 		hci_cc_inquiry_cancel(hdev, skb);
2171 		break;
2172 
2173 	case HCI_OP_EXIT_PERIODIC_INQ:
2174 		hci_cc_exit_periodic_inq(hdev, skb);
2175 		break;
2176 
2177 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2178 		hci_cc_remote_name_req_cancel(hdev, skb);
2179 		break;
2180 
2181 	case HCI_OP_ROLE_DISCOVERY:
2182 		hci_cc_role_discovery(hdev, skb);
2183 		break;
2184 
2185 	case HCI_OP_READ_LINK_POLICY:
2186 		hci_cc_read_link_policy(hdev, skb);
2187 		break;
2188 
2189 	case HCI_OP_WRITE_LINK_POLICY:
2190 		hci_cc_write_link_policy(hdev, skb);
2191 		break;
2192 
2193 	case HCI_OP_READ_DEF_LINK_POLICY:
2194 		hci_cc_read_def_link_policy(hdev, skb);
2195 		break;
2196 
2197 	case HCI_OP_WRITE_DEF_LINK_POLICY:
2198 		hci_cc_write_def_link_policy(hdev, skb);
2199 		break;
2200 
2201 	case HCI_OP_RESET:
2202 		hci_cc_reset(hdev, skb);
2203 		break;
2204 
2205 	case HCI_OP_WRITE_LOCAL_NAME:
2206 		hci_cc_write_local_name(hdev, skb);
2207 		break;
2208 
2209 	case HCI_OP_READ_LOCAL_NAME:
2210 		hci_cc_read_local_name(hdev, skb);
2211 		break;
2212 
2213 	case HCI_OP_WRITE_AUTH_ENABLE:
2214 		hci_cc_write_auth_enable(hdev, skb);
2215 		break;
2216 
2217 	case HCI_OP_WRITE_ENCRYPT_MODE:
2218 		hci_cc_write_encrypt_mode(hdev, skb);
2219 		break;
2220 
2221 	case HCI_OP_WRITE_SCAN_ENABLE:
2222 		hci_cc_write_scan_enable(hdev, skb);
2223 		break;
2224 
2225 	case HCI_OP_READ_CLASS_OF_DEV:
2226 		hci_cc_read_class_of_dev(hdev, skb);
2227 		break;
2228 
2229 	case HCI_OP_WRITE_CLASS_OF_DEV:
2230 		hci_cc_write_class_of_dev(hdev, skb);
2231 		break;
2232 
2233 	case HCI_OP_READ_VOICE_SETTING:
2234 		hci_cc_read_voice_setting(hdev, skb);
2235 		break;
2236 
2237 	case HCI_OP_WRITE_VOICE_SETTING:
2238 		hci_cc_write_voice_setting(hdev, skb);
2239 		break;
2240 
2241 	case HCI_OP_HOST_BUFFER_SIZE:
2242 		hci_cc_host_buffer_size(hdev, skb);
2243 		break;
2244 
2245 	case HCI_OP_WRITE_SSP_MODE:
2246 		hci_cc_write_ssp_mode(hdev, skb);
2247 		break;
2248 
2249 	case HCI_OP_READ_LOCAL_VERSION:
2250 		hci_cc_read_local_version(hdev, skb);
2251 		break;
2252 
2253 	case HCI_OP_READ_LOCAL_COMMANDS:
2254 		hci_cc_read_local_commands(hdev, skb);
2255 		break;
2256 
2257 	case HCI_OP_READ_LOCAL_FEATURES:
2258 		hci_cc_read_local_features(hdev, skb);
2259 		break;
2260 
2261 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2262 		hci_cc_read_local_ext_features(hdev, skb);
2263 		break;
2264 
2265 	case HCI_OP_READ_BUFFER_SIZE:
2266 		hci_cc_read_buffer_size(hdev, skb);
2267 		break;
2268 
2269 	case HCI_OP_READ_BD_ADDR:
2270 		hci_cc_read_bd_addr(hdev, skb);
2271 		break;
2272 
2273 	case HCI_OP_READ_DATA_BLOCK_SIZE:
2274 		hci_cc_read_data_block_size(hdev, skb);
2275 		break;
2276 
2277 	case HCI_OP_WRITE_CA_TIMEOUT:
2278 		hci_cc_write_ca_timeout(hdev, skb);
2279 		break;
2280 
2281 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2282 		hci_cc_read_flow_control_mode(hdev, skb);
2283 		break;
2284 
2285 	case HCI_OP_READ_LOCAL_AMP_INFO:
2286 		hci_cc_read_local_amp_info(hdev, skb);
2287 		break;
2288 
2289 	case HCI_OP_DELETE_STORED_LINK_KEY:
2290 		hci_cc_delete_stored_link_key(hdev, skb);
2291 		break;
2292 
2293 	case HCI_OP_SET_EVENT_MASK:
2294 		hci_cc_set_event_mask(hdev, skb);
2295 		break;
2296 
2297 	case HCI_OP_WRITE_INQUIRY_MODE:
2298 		hci_cc_write_inquiry_mode(hdev, skb);
2299 		break;
2300 
2301 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2302 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2303 		break;
2304 
2305 	case HCI_OP_SET_EVENT_FLT:
2306 		hci_cc_set_event_flt(hdev, skb);
2307 		break;
2308 
2309 	case HCI_OP_PIN_CODE_REPLY:
2310 		hci_cc_pin_code_reply(hdev, skb);
2311 		break;
2312 
2313 	case HCI_OP_PIN_CODE_NEG_REPLY:
2314 		hci_cc_pin_code_neg_reply(hdev, skb);
2315 		break;
2316 
2317 	case HCI_OP_READ_LOCAL_OOB_DATA:
2318 		hci_cc_read_local_oob_data_reply(hdev, skb);
2319 		break;
2320 
2321 	case HCI_OP_LE_READ_BUFFER_SIZE:
2322 		hci_cc_le_read_buffer_size(hdev, skb);
2323 		break;
2324 
2325 	case HCI_OP_USER_CONFIRM_REPLY:
2326 		hci_cc_user_confirm_reply(hdev, skb);
2327 		break;
2328 
2329 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2330 		hci_cc_user_confirm_neg_reply(hdev, skb);
2331 		break;
2332 
2333 	case HCI_OP_USER_PASSKEY_REPLY:
2334 		hci_cc_user_passkey_reply(hdev, skb);
2335 		break;
2336 
2337 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2338 		hci_cc_user_passkey_neg_reply(hdev, skb);
2339 		break;
2340 
2341 	case HCI_OP_LE_SET_SCAN_PARAM:
2342 		hci_cc_le_set_scan_param(hdev, skb);
2343 		break;
2344 
2345 	case HCI_OP_LE_SET_SCAN_ENABLE:
2346 		hci_cc_le_set_scan_enable(hdev, skb);
2347 		break;
2348 
2349 	case HCI_OP_LE_LTK_REPLY:
2350 		hci_cc_le_ltk_reply(hdev, skb);
2351 		break;
2352 
2353 	case HCI_OP_LE_LTK_NEG_REPLY:
2354 		hci_cc_le_ltk_neg_reply(hdev, skb);
2355 		break;
2356 
2357 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2358 		hci_cc_write_le_host_supported(hdev, skb);
2359 		break;
2360 
2361 	default:
2362 		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2363 		break;
2364 	}
2365 
2366 	if (ev->opcode != HCI_OP_NOP)
2367 		del_timer(&hdev->cmd_timer);
2368 
2369 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2370 		atomic_set(&hdev->cmd_cnt, 1);
2371 		if (!skb_queue_empty(&hdev->cmd_q))
2372 			queue_work(hdev->workqueue, &hdev->cmd_work);
2373 	}
2374 }
2375 
hci_cmd_status_evt(struct hci_dev * hdev,struct sk_buff * skb)2376 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2377 {
2378 	struct hci_ev_cmd_status *ev = (void *) skb->data;
2379 	__u16 opcode;
2380 
2381 	skb_pull(skb, sizeof(*ev));
2382 
2383 	opcode = __le16_to_cpu(ev->opcode);
2384 
2385 	switch (opcode) {
2386 	case HCI_OP_INQUIRY:
2387 		hci_cs_inquiry(hdev, ev->status);
2388 		break;
2389 
2390 	case HCI_OP_CREATE_CONN:
2391 		hci_cs_create_conn(hdev, ev->status);
2392 		break;
2393 
2394 	case HCI_OP_ADD_SCO:
2395 		hci_cs_add_sco(hdev, ev->status);
2396 		break;
2397 
2398 	case HCI_OP_AUTH_REQUESTED:
2399 		hci_cs_auth_requested(hdev, ev->status);
2400 		break;
2401 
2402 	case HCI_OP_SET_CONN_ENCRYPT:
2403 		hci_cs_set_conn_encrypt(hdev, ev->status);
2404 		break;
2405 
2406 	case HCI_OP_REMOTE_NAME_REQ:
2407 		hci_cs_remote_name_req(hdev, ev->status);
2408 		break;
2409 
2410 	case HCI_OP_READ_REMOTE_FEATURES:
2411 		hci_cs_read_remote_features(hdev, ev->status);
2412 		break;
2413 
2414 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2415 		hci_cs_read_remote_ext_features(hdev, ev->status);
2416 		break;
2417 
2418 	case HCI_OP_SETUP_SYNC_CONN:
2419 		hci_cs_setup_sync_conn(hdev, ev->status);
2420 		break;
2421 
2422 	case HCI_OP_SNIFF_MODE:
2423 		hci_cs_sniff_mode(hdev, ev->status);
2424 		break;
2425 
2426 	case HCI_OP_EXIT_SNIFF_MODE:
2427 		hci_cs_exit_sniff_mode(hdev, ev->status);
2428 		break;
2429 
2430 	case HCI_OP_DISCONNECT:
2431 		hci_cs_disconnect(hdev, ev->status);
2432 		break;
2433 
2434 	case HCI_OP_LE_CREATE_CONN:
2435 		hci_cs_le_create_conn(hdev, ev->status);
2436 		break;
2437 
2438 	case HCI_OP_LE_START_ENC:
2439 		hci_cs_le_start_enc(hdev, ev->status);
2440 		break;
2441 
2442 	default:
2443 		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2444 		break;
2445 	}
2446 
2447 	if (ev->opcode != HCI_OP_NOP)
2448 		del_timer(&hdev->cmd_timer);
2449 
2450 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2451 		atomic_set(&hdev->cmd_cnt, 1);
2452 		if (!skb_queue_empty(&hdev->cmd_q))
2453 			queue_work(hdev->workqueue, &hdev->cmd_work);
2454 	}
2455 }
2456 
hci_role_change_evt(struct hci_dev * hdev,struct sk_buff * skb)2457 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2458 {
2459 	struct hci_ev_role_change *ev = (void *) skb->data;
2460 	struct hci_conn *conn;
2461 
2462 	BT_DBG("%s status %d", hdev->name, ev->status);
2463 
2464 	hci_dev_lock(hdev);
2465 
2466 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2467 	if (conn) {
2468 		if (!ev->status) {
2469 			if (ev->role)
2470 				conn->link_mode &= ~HCI_LM_MASTER;
2471 			else
2472 				conn->link_mode |= HCI_LM_MASTER;
2473 		}
2474 
2475 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2476 
2477 		hci_role_switch_cfm(conn, ev->status, ev->role);
2478 	}
2479 
2480 	hci_dev_unlock(hdev);
2481 }
2482 
hci_num_comp_pkts_evt(struct hci_dev * hdev,struct sk_buff * skb)2483 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2484 {
2485 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2486 	int i;
2487 
2488 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2489 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2490 		return;
2491 	}
2492 
2493 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2494 			ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2495 		BT_DBG("%s bad parameters", hdev->name);
2496 		return;
2497 	}
2498 
2499 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2500 
2501 	for (i = 0; i < ev->num_hndl; i++) {
2502 		struct hci_comp_pkts_info *info = &ev->handles[i];
2503 		struct hci_conn *conn;
2504 		__u16  handle, count;
2505 
2506 		handle = __le16_to_cpu(info->handle);
2507 		count  = __le16_to_cpu(info->count);
2508 
2509 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2510 		if (!conn)
2511 			continue;
2512 
2513 		conn->sent -= count;
2514 
2515 		switch (conn->type) {
2516 		case ACL_LINK:
2517 			hdev->acl_cnt += count;
2518 			if (hdev->acl_cnt > hdev->acl_pkts)
2519 				hdev->acl_cnt = hdev->acl_pkts;
2520 			break;
2521 
2522 		case LE_LINK:
2523 			if (hdev->le_pkts) {
2524 				hdev->le_cnt += count;
2525 				if (hdev->le_cnt > hdev->le_pkts)
2526 					hdev->le_cnt = hdev->le_pkts;
2527 			} else {
2528 				hdev->acl_cnt += count;
2529 				if (hdev->acl_cnt > hdev->acl_pkts)
2530 					hdev->acl_cnt = hdev->acl_pkts;
2531 			}
2532 			break;
2533 
2534 		case SCO_LINK:
2535 			hdev->sco_cnt += count;
2536 			if (hdev->sco_cnt > hdev->sco_pkts)
2537 				hdev->sco_cnt = hdev->sco_pkts;
2538 			break;
2539 
2540 		default:
2541 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2542 			break;
2543 		}
2544 	}
2545 
2546 	queue_work(hdev->workqueue, &hdev->tx_work);
2547 }
2548 
hci_num_comp_blocks_evt(struct hci_dev * hdev,struct sk_buff * skb)2549 static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2550 					   struct sk_buff *skb)
2551 {
2552 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2553 	int i;
2554 
2555 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2556 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2557 		return;
2558 	}
2559 
2560 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2561 			ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2562 		BT_DBG("%s bad parameters", hdev->name);
2563 		return;
2564 	}
2565 
2566 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2567 								ev->num_hndl);
2568 
2569 	for (i = 0; i < ev->num_hndl; i++) {
2570 		struct hci_comp_blocks_info *info = &ev->handles[i];
2571 		struct hci_conn *conn;
2572 		__u16  handle, block_count;
2573 
2574 		handle = __le16_to_cpu(info->handle);
2575 		block_count = __le16_to_cpu(info->blocks);
2576 
2577 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2578 		if (!conn)
2579 			continue;
2580 
2581 		conn->sent -= block_count;
2582 
2583 		switch (conn->type) {
2584 		case ACL_LINK:
2585 			hdev->block_cnt += block_count;
2586 			if (hdev->block_cnt > hdev->num_blocks)
2587 				hdev->block_cnt = hdev->num_blocks;
2588 			break;
2589 
2590 		default:
2591 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2592 			break;
2593 		}
2594 	}
2595 
2596 	queue_work(hdev->workqueue, &hdev->tx_work);
2597 }
2598 
hci_mode_change_evt(struct hci_dev * hdev,struct sk_buff * skb)2599 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2600 {
2601 	struct hci_ev_mode_change *ev = (void *) skb->data;
2602 	struct hci_conn *conn;
2603 
2604 	BT_DBG("%s status %d", hdev->name, ev->status);
2605 
2606 	hci_dev_lock(hdev);
2607 
2608 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2609 	if (conn) {
2610 		conn->mode = ev->mode;
2611 		conn->interval = __le16_to_cpu(ev->interval);
2612 
2613 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2614 			if (conn->mode == HCI_CM_ACTIVE)
2615 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2616 			else
2617 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2618 		}
2619 
2620 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2621 			hci_sco_setup(conn, ev->status);
2622 	}
2623 
2624 	hci_dev_unlock(hdev);
2625 }
2626 
hci_pin_code_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2627 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2628 {
2629 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
2630 	struct hci_conn *conn;
2631 
2632 	BT_DBG("%s", hdev->name);
2633 
2634 	hci_dev_lock(hdev);
2635 
2636 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2637 	if (!conn)
2638 		goto unlock;
2639 
2640 	if (conn->state == BT_CONNECTED) {
2641 		hci_conn_hold(conn);
2642 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2643 		hci_conn_put(conn);
2644 	}
2645 
2646 	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2647 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2648 					sizeof(ev->bdaddr), &ev->bdaddr);
2649 	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2650 		u8 secure;
2651 
2652 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
2653 			secure = 1;
2654 		else
2655 			secure = 0;
2656 
2657 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2658 	}
2659 
2660 unlock:
2661 	hci_dev_unlock(hdev);
2662 }
2663 
hci_link_key_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2664 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2665 {
2666 	struct hci_ev_link_key_req *ev = (void *) skb->data;
2667 	struct hci_cp_link_key_reply cp;
2668 	struct hci_conn *conn;
2669 	struct link_key *key;
2670 
2671 	BT_DBG("%s", hdev->name);
2672 
2673 	if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2674 		return;
2675 
2676 	hci_dev_lock(hdev);
2677 
2678 	key = hci_find_link_key(hdev, &ev->bdaddr);
2679 	if (!key) {
2680 		BT_DBG("%s link key not found for %s", hdev->name,
2681 							batostr(&ev->bdaddr));
2682 		goto not_found;
2683 	}
2684 
2685 	BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2686 							batostr(&ev->bdaddr));
2687 
2688 	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2689 				key->type == HCI_LK_DEBUG_COMBINATION) {
2690 		BT_DBG("%s ignoring debug key", hdev->name);
2691 		goto not_found;
2692 	}
2693 
2694 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2695 	if (conn) {
2696 		if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2697 				conn->auth_type != 0xff &&
2698 				(conn->auth_type & 0x01)) {
2699 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
2700 			goto not_found;
2701 		}
2702 
2703 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2704 				conn->pending_sec_level == BT_SECURITY_HIGH) {
2705 			BT_DBG("%s ignoring key unauthenticated for high \
2706 							security", hdev->name);
2707 			goto not_found;
2708 		}
2709 
2710 		conn->key_type = key->type;
2711 		conn->pin_length = key->pin_len;
2712 	}
2713 
2714 	bacpy(&cp.bdaddr, &ev->bdaddr);
2715 	memcpy(cp.link_key, key->val, 16);
2716 
2717 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2718 
2719 	hci_dev_unlock(hdev);
2720 
2721 	return;
2722 
2723 not_found:
2724 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2725 	hci_dev_unlock(hdev);
2726 }
2727 
hci_link_key_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)2728 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2729 {
2730 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
2731 	struct hci_conn *conn;
2732 	u8 pin_len = 0;
2733 
2734 	BT_DBG("%s", hdev->name);
2735 
2736 	hci_dev_lock(hdev);
2737 
2738 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2739 	if (conn) {
2740 		hci_conn_hold(conn);
2741 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2742 		pin_len = conn->pin_length;
2743 
2744 		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2745 			conn->key_type = ev->key_type;
2746 
2747 		hci_conn_put(conn);
2748 	}
2749 
2750 	if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2751 		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2752 							ev->key_type, pin_len);
2753 
2754 	hci_dev_unlock(hdev);
2755 }
2756 
hci_clock_offset_evt(struct hci_dev * hdev,struct sk_buff * skb)2757 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2758 {
2759 	struct hci_ev_clock_offset *ev = (void *) skb->data;
2760 	struct hci_conn *conn;
2761 
2762 	BT_DBG("%s status %d", hdev->name, ev->status);
2763 
2764 	hci_dev_lock(hdev);
2765 
2766 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2767 	if (conn && !ev->status) {
2768 		struct inquiry_entry *ie;
2769 
2770 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2771 		if (ie) {
2772 			ie->data.clock_offset = ev->clock_offset;
2773 			ie->timestamp = jiffies;
2774 		}
2775 	}
2776 
2777 	hci_dev_unlock(hdev);
2778 }
2779 
hci_pkt_type_change_evt(struct hci_dev * hdev,struct sk_buff * skb)2780 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2781 {
2782 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2783 	struct hci_conn *conn;
2784 
2785 	BT_DBG("%s status %d", hdev->name, ev->status);
2786 
2787 	hci_dev_lock(hdev);
2788 
2789 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2790 	if (conn && !ev->status)
2791 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2792 
2793 	hci_dev_unlock(hdev);
2794 }
2795 
hci_pscan_rep_mode_evt(struct hci_dev * hdev,struct sk_buff * skb)2796 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2797 {
2798 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2799 	struct inquiry_entry *ie;
2800 
2801 	BT_DBG("%s", hdev->name);
2802 
2803 	hci_dev_lock(hdev);
2804 
2805 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2806 	if (ie) {
2807 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2808 		ie->timestamp = jiffies;
2809 	}
2810 
2811 	hci_dev_unlock(hdev);
2812 }
2813 
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,struct sk_buff * skb)2814 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2815 {
2816 	struct inquiry_data data;
2817 	int num_rsp = *((__u8 *) skb->data);
2818 	bool name_known, ssp;
2819 
2820 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2821 
2822 	if (!num_rsp)
2823 		return;
2824 
2825 	hci_dev_lock(hdev);
2826 
2827 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2828 		struct inquiry_info_with_rssi_and_pscan_mode *info;
2829 		info = (void *) (skb->data + 1);
2830 
2831 		for (; num_rsp; num_rsp--, info++) {
2832 			bacpy(&data.bdaddr, &info->bdaddr);
2833 			data.pscan_rep_mode	= info->pscan_rep_mode;
2834 			data.pscan_period_mode	= info->pscan_period_mode;
2835 			data.pscan_mode		= info->pscan_mode;
2836 			memcpy(data.dev_class, info->dev_class, 3);
2837 			data.clock_offset	= info->clock_offset;
2838 			data.rssi		= info->rssi;
2839 			data.ssp_mode		= 0x00;
2840 
2841 			name_known = hci_inquiry_cache_update(hdev, &data,
2842 							      false, &ssp);
2843 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2844 					  info->dev_class, info->rssi,
2845 					  !name_known, ssp, NULL, 0);
2846 		}
2847 	} else {
2848 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2849 
2850 		for (; num_rsp; num_rsp--, info++) {
2851 			bacpy(&data.bdaddr, &info->bdaddr);
2852 			data.pscan_rep_mode	= info->pscan_rep_mode;
2853 			data.pscan_period_mode	= info->pscan_period_mode;
2854 			data.pscan_mode		= 0x00;
2855 			memcpy(data.dev_class, info->dev_class, 3);
2856 			data.clock_offset	= info->clock_offset;
2857 			data.rssi		= info->rssi;
2858 			data.ssp_mode		= 0x00;
2859 			name_known = hci_inquiry_cache_update(hdev, &data,
2860 							      false, &ssp);
2861 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2862 					  info->dev_class, info->rssi,
2863 					  !name_known, ssp, NULL, 0);
2864 		}
2865 	}
2866 
2867 	hci_dev_unlock(hdev);
2868 }
2869 
hci_remote_ext_features_evt(struct hci_dev * hdev,struct sk_buff * skb)2870 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2871 {
2872 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2873 	struct hci_conn *conn;
2874 
2875 	BT_DBG("%s", hdev->name);
2876 
2877 	hci_dev_lock(hdev);
2878 
2879 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2880 	if (!conn)
2881 		goto unlock;
2882 
2883 	if (!ev->status && ev->page == 0x01) {
2884 		struct inquiry_entry *ie;
2885 
2886 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2887 		if (ie)
2888 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2889 
2890 		if (ev->features[0] & LMP_HOST_SSP)
2891 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2892 	}
2893 
2894 	if (conn->state != BT_CONFIG)
2895 		goto unlock;
2896 
2897 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2898 		struct hci_cp_remote_name_req cp;
2899 		memset(&cp, 0, sizeof(cp));
2900 		bacpy(&cp.bdaddr, &conn->dst);
2901 		cp.pscan_rep_mode = 0x02;
2902 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2903 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2904 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2905 				      conn->dst_type, 0, NULL, 0,
2906 				      conn->dev_class);
2907 
2908 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2909 		conn->state = BT_CONNECTED;
2910 		hci_proto_connect_cfm(conn, ev->status);
2911 		hci_conn_put(conn);
2912 	}
2913 
2914 unlock:
2915 	hci_dev_unlock(hdev);
2916 }
2917 
hci_sync_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2918 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2919 {
2920 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2921 	struct hci_conn *conn;
2922 
2923 	BT_DBG("%s status %d", hdev->name, ev->status);
2924 
2925 	hci_dev_lock(hdev);
2926 
2927 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2928 	if (!conn) {
2929 		if (ev->link_type == ESCO_LINK)
2930 			goto unlock;
2931 
2932 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2933 		if (!conn)
2934 			goto unlock;
2935 
2936 		conn->type = SCO_LINK;
2937 	}
2938 
2939 	switch (ev->status) {
2940 	case 0x00:
2941 		conn->handle = __le16_to_cpu(ev->handle);
2942 		conn->state  = BT_CONNECTED;
2943 
2944 		hci_conn_hold_device(conn);
2945 		hci_conn_add_sysfs(conn);
2946 		break;
2947 
2948 	case 0x11:	/* Unsupported Feature or Parameter Value */
2949 	case 0x1c:	/* SCO interval rejected */
2950 	case 0x1a:	/* Unsupported Remote Feature */
2951 	case 0x1f:	/* Unspecified error */
2952 		if (conn->out && conn->attempt < 2) {
2953 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2954 					(hdev->esco_type & EDR_ESCO_MASK);
2955 			hci_setup_sync(conn, conn->link->handle);
2956 			goto unlock;
2957 		}
2958 		/* fall through */
2959 
2960 	default:
2961 		conn->state = BT_CLOSED;
2962 		break;
2963 	}
2964 
2965 	hci_proto_connect_cfm(conn, ev->status);
2966 	if (ev->status)
2967 		hci_conn_del(conn);
2968 
2969 unlock:
2970 	hci_dev_unlock(hdev);
2971 }
2972 
hci_sync_conn_changed_evt(struct hci_dev * hdev,struct sk_buff * skb)2973 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2974 {
2975 	BT_DBG("%s", hdev->name);
2976 }
2977 
hci_sniff_subrate_evt(struct hci_dev * hdev,struct sk_buff * skb)2978 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2979 {
2980 	struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2981 
2982 	BT_DBG("%s status %d", hdev->name, ev->status);
2983 }
2984 
hci_extended_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)2985 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2986 {
2987 	struct inquiry_data data;
2988 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
2989 	int num_rsp = *((__u8 *) skb->data);
2990 
2991 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2992 
2993 	if (!num_rsp)
2994 		return;
2995 
2996 	hci_dev_lock(hdev);
2997 
2998 	for (; num_rsp; num_rsp--, info++) {
2999 		bool name_known, ssp;
3000 
3001 		bacpy(&data.bdaddr, &info->bdaddr);
3002 		data.pscan_rep_mode	= info->pscan_rep_mode;
3003 		data.pscan_period_mode	= info->pscan_period_mode;
3004 		data.pscan_mode		= 0x00;
3005 		memcpy(data.dev_class, info->dev_class, 3);
3006 		data.clock_offset	= info->clock_offset;
3007 		data.rssi		= info->rssi;
3008 		data.ssp_mode		= 0x01;
3009 
3010 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3011 			name_known = eir_has_data_type(info->data,
3012 						       sizeof(info->data),
3013 						       EIR_NAME_COMPLETE);
3014 		else
3015 			name_known = true;
3016 
3017 		name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3018 						      &ssp);
3019 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3020 				  info->dev_class, info->rssi, !name_known,
3021 				  ssp, info->data, sizeof(info->data));
3022 	}
3023 
3024 	hci_dev_unlock(hdev);
3025 }
3026 
hci_get_auth_req(struct hci_conn * conn)3027 static inline u8 hci_get_auth_req(struct hci_conn *conn)
3028 {
3029 	/* If remote requests dedicated bonding follow that lead */
3030 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3031 		/* If both remote and local IO capabilities allow MITM
3032 		 * protection then require it, otherwise don't */
3033 		if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3034 			return 0x02;
3035 		else
3036 			return 0x03;
3037 	}
3038 
3039 	/* If remote requests no-bonding follow that lead */
3040 	if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3041 		return conn->remote_auth | (conn->auth_type & 0x01);
3042 
3043 	return conn->auth_type;
3044 }
3045 
hci_io_capa_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3046 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3047 {
3048 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3049 	struct hci_conn *conn;
3050 
3051 	BT_DBG("%s", hdev->name);
3052 
3053 	hci_dev_lock(hdev);
3054 
3055 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3056 	if (!conn)
3057 		goto unlock;
3058 
3059 	hci_conn_hold(conn);
3060 
3061 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3062 		goto unlock;
3063 
3064 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3065 			(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3066 		struct hci_cp_io_capability_reply cp;
3067 
3068 		bacpy(&cp.bdaddr, &ev->bdaddr);
3069 		/* Change the IO capability from KeyboardDisplay
3070 		 * to DisplayYesNo as it is not supported by BT spec. */
3071 		cp.capability = (conn->io_capability == 0x04) ?
3072 						0x01 : conn->io_capability;
3073 		conn->auth_type = hci_get_auth_req(conn);
3074 		cp.authentication = conn->auth_type;
3075 
3076 		if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
3077 				hci_find_remote_oob_data(hdev, &conn->dst))
3078 			cp.oob_data = 0x01;
3079 		else
3080 			cp.oob_data = 0x00;
3081 
3082 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3083 							sizeof(cp), &cp);
3084 	} else {
3085 		struct hci_cp_io_capability_neg_reply cp;
3086 
3087 		bacpy(&cp.bdaddr, &ev->bdaddr);
3088 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3089 
3090 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3091 							sizeof(cp), &cp);
3092 	}
3093 
3094 unlock:
3095 	hci_dev_unlock(hdev);
3096 }
3097 
hci_io_capa_reply_evt(struct hci_dev * hdev,struct sk_buff * skb)3098 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3099 {
3100 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3101 	struct hci_conn *conn;
3102 
3103 	BT_DBG("%s", hdev->name);
3104 
3105 	hci_dev_lock(hdev);
3106 
3107 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3108 	if (!conn)
3109 		goto unlock;
3110 
3111 	conn->remote_cap = ev->capability;
3112 	conn->remote_auth = ev->authentication;
3113 	if (ev->oob_data)
3114 		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3115 
3116 unlock:
3117 	hci_dev_unlock(hdev);
3118 }
3119 
hci_user_confirm_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3120 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3121 							struct sk_buff *skb)
3122 {
3123 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3124 	int loc_mitm, rem_mitm, confirm_hint = 0;
3125 	struct hci_conn *conn;
3126 
3127 	BT_DBG("%s", hdev->name);
3128 
3129 	hci_dev_lock(hdev);
3130 
3131 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3132 		goto unlock;
3133 
3134 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3135 	if (!conn)
3136 		goto unlock;
3137 
3138 	loc_mitm = (conn->auth_type & 0x01);
3139 	rem_mitm = (conn->remote_auth & 0x01);
3140 
3141 	/* If we require MITM but the remote device can't provide that
3142 	 * (it has NoInputNoOutput) then reject the confirmation
3143 	 * request. The only exception is when we're dedicated bonding
3144 	 * initiators (connect_cfm_cb set) since then we always have the MITM
3145 	 * bit set. */
3146 	if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3147 		BT_DBG("Rejecting request: remote device can't provide MITM");
3148 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3149 					sizeof(ev->bdaddr), &ev->bdaddr);
3150 		goto unlock;
3151 	}
3152 
3153 	/* If no side requires MITM protection; auto-accept */
3154 	if ((!loc_mitm || conn->remote_cap == 0x03) &&
3155 				(!rem_mitm || conn->io_capability == 0x03)) {
3156 
3157 		/* If we're not the initiators request authorization to
3158 		 * proceed from user space (mgmt_user_confirm with
3159 		 * confirm_hint set to 1). The exception is if neither
3160 		 * side had MITM in which case we do auto-accept.
3161 		 */
3162 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3163 		    (loc_mitm || rem_mitm)) {
3164 			BT_DBG("Confirming auto-accept as acceptor");
3165 			confirm_hint = 1;
3166 			goto confirm;
3167 		}
3168 
3169 		BT_DBG("Auto-accept of user confirmation with %ums delay",
3170 						hdev->auto_accept_delay);
3171 
3172 		if (hdev->auto_accept_delay > 0) {
3173 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3174 			mod_timer(&conn->auto_accept_timer, jiffies + delay);
3175 			goto unlock;
3176 		}
3177 
3178 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3179 						sizeof(ev->bdaddr), &ev->bdaddr);
3180 		goto unlock;
3181 	}
3182 
3183 confirm:
3184 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3185 				  confirm_hint);
3186 
3187 unlock:
3188 	hci_dev_unlock(hdev);
3189 }
3190 
hci_user_passkey_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3191 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3192 							struct sk_buff *skb)
3193 {
3194 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3195 
3196 	BT_DBG("%s", hdev->name);
3197 
3198 	hci_dev_lock(hdev);
3199 
3200 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3201 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3202 
3203 	hci_dev_unlock(hdev);
3204 }
3205 
hci_simple_pair_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)3206 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3207 {
3208 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3209 	struct hci_conn *conn;
3210 
3211 	BT_DBG("%s", hdev->name);
3212 
3213 	hci_dev_lock(hdev);
3214 
3215 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3216 	if (!conn)
3217 		goto unlock;
3218 
3219 	/* To avoid duplicate auth_failed events to user space we check
3220 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3221 	 * initiated the authentication. A traditional auth_complete
3222 	 * event gets always produced as initiator and is also mapped to
3223 	 * the mgmt_auth_failed event */
3224 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3225 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3226 				 ev->status);
3227 
3228 	hci_conn_put(conn);
3229 
3230 unlock:
3231 	hci_dev_unlock(hdev);
3232 }
3233 
hci_remote_host_features_evt(struct hci_dev * hdev,struct sk_buff * skb)3234 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3235 {
3236 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3237 	struct inquiry_entry *ie;
3238 
3239 	BT_DBG("%s", hdev->name);
3240 
3241 	hci_dev_lock(hdev);
3242 
3243 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3244 	if (ie)
3245 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3246 
3247 	hci_dev_unlock(hdev);
3248 }
3249 
hci_remote_oob_data_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3250 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3251 						   struct sk_buff *skb)
3252 {
3253 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3254 	struct oob_data *data;
3255 
3256 	BT_DBG("%s", hdev->name);
3257 
3258 	hci_dev_lock(hdev);
3259 
3260 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3261 		goto unlock;
3262 
3263 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3264 	if (data) {
3265 		struct hci_cp_remote_oob_data_reply cp;
3266 
3267 		bacpy(&cp.bdaddr, &ev->bdaddr);
3268 		memcpy(cp.hash, data->hash, sizeof(cp.hash));
3269 		memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3270 
3271 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3272 									&cp);
3273 	} else {
3274 		struct hci_cp_remote_oob_data_neg_reply cp;
3275 
3276 		bacpy(&cp.bdaddr, &ev->bdaddr);
3277 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3278 									&cp);
3279 	}
3280 
3281 unlock:
3282 	hci_dev_unlock(hdev);
3283 }
3284 
hci_le_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)3285 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3286 {
3287 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3288 	struct hci_conn *conn;
3289 
3290 	BT_DBG("%s status %d", hdev->name, ev->status);
3291 
3292 	hci_dev_lock(hdev);
3293 
3294 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3295 	if (!conn) {
3296 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3297 		if (!conn) {
3298 			BT_ERR("No memory for new connection");
3299 			hci_dev_unlock(hdev);
3300 			return;
3301 		}
3302 
3303 		conn->dst_type = ev->bdaddr_type;
3304 	}
3305 
3306 	if (ev->status) {
3307 		mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3308 						conn->dst_type, ev->status);
3309 		hci_proto_connect_cfm(conn, ev->status);
3310 		conn->state = BT_CLOSED;
3311 		hci_conn_del(conn);
3312 		goto unlock;
3313 	}
3314 
3315 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3316 		mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3317 				      conn->dst_type, 0, NULL, 0, NULL);
3318 
3319 	conn->sec_level = BT_SECURITY_LOW;
3320 	conn->handle = __le16_to_cpu(ev->handle);
3321 	conn->state = BT_CONNECTED;
3322 
3323 	hci_conn_hold_device(conn);
3324 	hci_conn_add_sysfs(conn);
3325 
3326 	hci_proto_connect_cfm(conn, ev->status);
3327 
3328 unlock:
3329 	hci_dev_unlock(hdev);
3330 }
3331 
hci_le_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)3332 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3333 						struct sk_buff *skb)
3334 {
3335 	u8 num_reports = skb->data[0];
3336 	void *ptr = &skb->data[1];
3337 	s8 rssi;
3338 
3339 	hci_dev_lock(hdev);
3340 
3341 	while (num_reports--) {
3342 		struct hci_ev_le_advertising_info *ev = ptr;
3343 
3344 		hci_add_adv_entry(hdev, ev);
3345 
3346 		rssi = ev->data[ev->length];
3347 		mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3348 				  NULL, rssi, 0, 1, ev->data, ev->length);
3349 
3350 		ptr += sizeof(*ev) + ev->length + 1;
3351 	}
3352 
3353 	hci_dev_unlock(hdev);
3354 }
3355 
hci_le_ltk_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3356 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3357 						struct sk_buff *skb)
3358 {
3359 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3360 	struct hci_cp_le_ltk_reply cp;
3361 	struct hci_cp_le_ltk_neg_reply neg;
3362 	struct hci_conn *conn;
3363 	struct smp_ltk *ltk;
3364 
3365 	BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3366 
3367 	hci_dev_lock(hdev);
3368 
3369 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3370 	if (conn == NULL)
3371 		goto not_found;
3372 
3373 	ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3374 	if (ltk == NULL)
3375 		goto not_found;
3376 
3377 	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3378 	cp.handle = cpu_to_le16(conn->handle);
3379 
3380 	if (ltk->authenticated)
3381 		conn->pending_sec_level = BT_SECURITY_HIGH;
3382 	else
3383 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
3384 
3385 	conn->enc_key_size = ltk->enc_size;
3386 
3387 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3388 
3389 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
3390 	 * temporary key used to encrypt a connection following
3391 	 * pairing. It is used during the Encrypted Session Setup to
3392 	 * distribute the keys. Later, security can be re-established
3393 	 * using a distributed LTK.
3394 	 */
3395 	if (ltk->type == HCI_SMP_STK_SLAVE) {
3396 		list_del(&ltk->list);
3397 		kfree(ltk);
3398 	}
3399 
3400 	hci_dev_unlock(hdev);
3401 
3402 	return;
3403 
3404 not_found:
3405 	neg.handle = ev->handle;
3406 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3407 	hci_dev_unlock(hdev);
3408 }
3409 
hci_le_meta_evt(struct hci_dev * hdev,struct sk_buff * skb)3410 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3411 {
3412 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
3413 
3414 	skb_pull(skb, sizeof(*le_ev));
3415 
3416 	switch (le_ev->subevent) {
3417 	case HCI_EV_LE_CONN_COMPLETE:
3418 		hci_le_conn_complete_evt(hdev, skb);
3419 		break;
3420 
3421 	case HCI_EV_LE_ADVERTISING_REPORT:
3422 		hci_le_adv_report_evt(hdev, skb);
3423 		break;
3424 
3425 	case HCI_EV_LE_LTK_REQ:
3426 		hci_le_ltk_request_evt(hdev, skb);
3427 		break;
3428 
3429 	default:
3430 		break;
3431 	}
3432 }
3433 
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)3434 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3435 {
3436 	struct hci_event_hdr *hdr = (void *) skb->data;
3437 	__u8 event = hdr->evt;
3438 
3439 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
3440 
3441 	switch (event) {
3442 	case HCI_EV_INQUIRY_COMPLETE:
3443 		hci_inquiry_complete_evt(hdev, skb);
3444 		break;
3445 
3446 	case HCI_EV_INQUIRY_RESULT:
3447 		hci_inquiry_result_evt(hdev, skb);
3448 		break;
3449 
3450 	case HCI_EV_CONN_COMPLETE:
3451 		hci_conn_complete_evt(hdev, skb);
3452 		break;
3453 
3454 	case HCI_EV_CONN_REQUEST:
3455 		hci_conn_request_evt(hdev, skb);
3456 		break;
3457 
3458 	case HCI_EV_DISCONN_COMPLETE:
3459 		hci_disconn_complete_evt(hdev, skb);
3460 		break;
3461 
3462 	case HCI_EV_AUTH_COMPLETE:
3463 		hci_auth_complete_evt(hdev, skb);
3464 		break;
3465 
3466 	case HCI_EV_REMOTE_NAME:
3467 		hci_remote_name_evt(hdev, skb);
3468 		break;
3469 
3470 	case HCI_EV_ENCRYPT_CHANGE:
3471 		hci_encrypt_change_evt(hdev, skb);
3472 		break;
3473 
3474 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3475 		hci_change_link_key_complete_evt(hdev, skb);
3476 		break;
3477 
3478 	case HCI_EV_REMOTE_FEATURES:
3479 		hci_remote_features_evt(hdev, skb);
3480 		break;
3481 
3482 	case HCI_EV_REMOTE_VERSION:
3483 		hci_remote_version_evt(hdev, skb);
3484 		break;
3485 
3486 	case HCI_EV_QOS_SETUP_COMPLETE:
3487 		hci_qos_setup_complete_evt(hdev, skb);
3488 		break;
3489 
3490 	case HCI_EV_CMD_COMPLETE:
3491 		hci_cmd_complete_evt(hdev, skb);
3492 		break;
3493 
3494 	case HCI_EV_CMD_STATUS:
3495 		hci_cmd_status_evt(hdev, skb);
3496 		break;
3497 
3498 	case HCI_EV_ROLE_CHANGE:
3499 		hci_role_change_evt(hdev, skb);
3500 		break;
3501 
3502 	case HCI_EV_NUM_COMP_PKTS:
3503 		hci_num_comp_pkts_evt(hdev, skb);
3504 		break;
3505 
3506 	case HCI_EV_MODE_CHANGE:
3507 		hci_mode_change_evt(hdev, skb);
3508 		break;
3509 
3510 	case HCI_EV_PIN_CODE_REQ:
3511 		hci_pin_code_request_evt(hdev, skb);
3512 		break;
3513 
3514 	case HCI_EV_LINK_KEY_REQ:
3515 		hci_link_key_request_evt(hdev, skb);
3516 		break;
3517 
3518 	case HCI_EV_LINK_KEY_NOTIFY:
3519 		hci_link_key_notify_evt(hdev, skb);
3520 		break;
3521 
3522 	case HCI_EV_CLOCK_OFFSET:
3523 		hci_clock_offset_evt(hdev, skb);
3524 		break;
3525 
3526 	case HCI_EV_PKT_TYPE_CHANGE:
3527 		hci_pkt_type_change_evt(hdev, skb);
3528 		break;
3529 
3530 	case HCI_EV_PSCAN_REP_MODE:
3531 		hci_pscan_rep_mode_evt(hdev, skb);
3532 		break;
3533 
3534 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3535 		hci_inquiry_result_with_rssi_evt(hdev, skb);
3536 		break;
3537 
3538 	case HCI_EV_REMOTE_EXT_FEATURES:
3539 		hci_remote_ext_features_evt(hdev, skb);
3540 		break;
3541 
3542 	case HCI_EV_SYNC_CONN_COMPLETE:
3543 		hci_sync_conn_complete_evt(hdev, skb);
3544 		break;
3545 
3546 	case HCI_EV_SYNC_CONN_CHANGED:
3547 		hci_sync_conn_changed_evt(hdev, skb);
3548 		break;
3549 
3550 	case HCI_EV_SNIFF_SUBRATE:
3551 		hci_sniff_subrate_evt(hdev, skb);
3552 		break;
3553 
3554 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
3555 		hci_extended_inquiry_result_evt(hdev, skb);
3556 		break;
3557 
3558 	case HCI_EV_IO_CAPA_REQUEST:
3559 		hci_io_capa_request_evt(hdev, skb);
3560 		break;
3561 
3562 	case HCI_EV_IO_CAPA_REPLY:
3563 		hci_io_capa_reply_evt(hdev, skb);
3564 		break;
3565 
3566 	case HCI_EV_USER_CONFIRM_REQUEST:
3567 		hci_user_confirm_request_evt(hdev, skb);
3568 		break;
3569 
3570 	case HCI_EV_USER_PASSKEY_REQUEST:
3571 		hci_user_passkey_request_evt(hdev, skb);
3572 		break;
3573 
3574 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
3575 		hci_simple_pair_complete_evt(hdev, skb);
3576 		break;
3577 
3578 	case HCI_EV_REMOTE_HOST_FEATURES:
3579 		hci_remote_host_features_evt(hdev, skb);
3580 		break;
3581 
3582 	case HCI_EV_LE_META:
3583 		hci_le_meta_evt(hdev, skb);
3584 		break;
3585 
3586 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3587 		hci_remote_oob_data_request_evt(hdev, skb);
3588 		break;
3589 
3590 	case HCI_EV_NUM_COMP_BLOCKS:
3591 		hci_num_comp_blocks_evt(hdev, skb);
3592 		break;
3593 
3594 	default:
3595 		BT_DBG("%s event 0x%x", hdev->name, event);
3596 		break;
3597 	}
3598 
3599 	kfree_skb(skb);
3600 	hdev->stat.evt_rx++;
3601 }
3602