1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47
48 /* Handle HCI Event packets */
49
hci_cc_inquiry_cancel(struct hci_dev * hdev,struct sk_buff * skb)50 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
51 {
52 __u8 status = *((__u8 *) skb->data);
53
54 BT_DBG("%s status 0x%x", hdev->name, status);
55
56 if (status)
57 return;
58
59 clear_bit(HCI_INQUIRY, &hdev->flags);
60
61 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
62
63 hci_conn_check_pending(hdev);
64 }
65
hci_cc_exit_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)66 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
67 {
68 __u8 status = *((__u8 *) skb->data);
69
70 BT_DBG("%s status 0x%x", hdev->name, status);
71
72 if (status)
73 return;
74
75 clear_bit(HCI_INQUIRY, &hdev->flags);
76
77 hci_conn_check_pending(hdev);
78 }
79
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,struct sk_buff * skb)80 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
81 {
82 BT_DBG("%s", hdev->name);
83 }
84
hci_cc_role_discovery(struct hci_dev * hdev,struct sk_buff * skb)85 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
86 {
87 struct hci_rp_role_discovery *rp = (void *) skb->data;
88 struct hci_conn *conn;
89
90 BT_DBG("%s status 0x%x", hdev->name, rp->status);
91
92 if (rp->status)
93 return;
94
95 hci_dev_lock(hdev);
96
97 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
98 if (conn) {
99 if (rp->role)
100 conn->link_mode &= ~HCI_LM_MASTER;
101 else
102 conn->link_mode |= HCI_LM_MASTER;
103 }
104
105 hci_dev_unlock(hdev);
106 }
107
hci_cc_read_link_policy(struct hci_dev * hdev,struct sk_buff * skb)108 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
109 {
110 struct hci_rp_read_link_policy *rp = (void *) skb->data;
111 struct hci_conn *conn;
112
113 BT_DBG("%s status 0x%x", hdev->name, rp->status);
114
115 if (rp->status)
116 return;
117
118 hci_dev_lock(hdev);
119
120 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
121 if (conn)
122 conn->link_policy = __le16_to_cpu(rp->policy);
123
124 hci_dev_unlock(hdev);
125 }
126
hci_cc_write_link_policy(struct hci_dev * hdev,struct sk_buff * skb)127 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
128 {
129 struct hci_rp_write_link_policy *rp = (void *) skb->data;
130 struct hci_conn *conn;
131 void *sent;
132
133 BT_DBG("%s status 0x%x", hdev->name, rp->status);
134
135 if (rp->status)
136 return;
137
138 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
139 if (!sent)
140 return;
141
142 hci_dev_lock(hdev);
143
144 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
145 if (conn)
146 conn->link_policy = get_unaligned_le16(sent + 2);
147
148 hci_dev_unlock(hdev);
149 }
150
hci_cc_read_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)151 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
152 {
153 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
154
155 BT_DBG("%s status 0x%x", hdev->name, rp->status);
156
157 if (rp->status)
158 return;
159
160 hdev->link_policy = __le16_to_cpu(rp->policy);
161 }
162
hci_cc_write_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)163 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
164 {
165 __u8 status = *((__u8 *) skb->data);
166 void *sent;
167
168 BT_DBG("%s status 0x%x", hdev->name, status);
169
170 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
171 if (!sent)
172 return;
173
174 if (!status)
175 hdev->link_policy = get_unaligned_le16(sent);
176
177 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
178 }
179
hci_cc_reset(struct hci_dev * hdev,struct sk_buff * skb)180 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
181 {
182 __u8 status = *((__u8 *) skb->data);
183
184 BT_DBG("%s status 0x%x", hdev->name, status);
185
186 clear_bit(HCI_RESET, &hdev->flags);
187
188 hci_req_complete(hdev, HCI_OP_RESET, status);
189 }
190
hci_cc_write_local_name(struct hci_dev * hdev,struct sk_buff * skb)191 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
192 {
193 __u8 status = *((__u8 *) skb->data);
194 void *sent;
195
196 BT_DBG("%s status 0x%x", hdev->name, status);
197
198 if (status)
199 return;
200
201 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
202 if (!sent)
203 return;
204
205 memcpy(hdev->dev_name, sent, 248);
206 }
207
hci_cc_read_local_name(struct hci_dev * hdev,struct sk_buff * skb)208 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
209 {
210 struct hci_rp_read_local_name *rp = (void *) skb->data;
211
212 BT_DBG("%s status 0x%x", hdev->name, rp->status);
213
214 if (rp->status)
215 return;
216
217 memcpy(hdev->dev_name, rp->name, 248);
218 }
219
hci_cc_write_auth_enable(struct hci_dev * hdev,struct sk_buff * skb)220 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
221 {
222 __u8 status = *((__u8 *) skb->data);
223 void *sent;
224
225 BT_DBG("%s status 0x%x", hdev->name, status);
226
227 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
228 if (!sent)
229 return;
230
231 if (!status) {
232 __u8 param = *((__u8 *) sent);
233
234 if (param == AUTH_ENABLED)
235 set_bit(HCI_AUTH, &hdev->flags);
236 else
237 clear_bit(HCI_AUTH, &hdev->flags);
238 }
239
240 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
241 }
242
hci_cc_write_encrypt_mode(struct hci_dev * hdev,struct sk_buff * skb)243 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
244 {
245 __u8 status = *((__u8 *) skb->data);
246 void *sent;
247
248 BT_DBG("%s status 0x%x", hdev->name, status);
249
250 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
251 if (!sent)
252 return;
253
254 if (!status) {
255 __u8 param = *((__u8 *) sent);
256
257 if (param)
258 set_bit(HCI_ENCRYPT, &hdev->flags);
259 else
260 clear_bit(HCI_ENCRYPT, &hdev->flags);
261 }
262
263 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
264 }
265
hci_cc_write_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)266 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
267 {
268 __u8 status = *((__u8 *) skb->data);
269 void *sent;
270
271 BT_DBG("%s status 0x%x", hdev->name, status);
272
273 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
274 if (!sent)
275 return;
276
277 if (!status) {
278 __u8 param = *((__u8 *) sent);
279 int old_pscan, old_iscan;
280
281 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
282 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
283
284 if (param & SCAN_INQUIRY) {
285 set_bit(HCI_ISCAN, &hdev->flags);
286 if (!old_iscan)
287 mgmt_discoverable(hdev->id, 1);
288 } else if (old_iscan)
289 mgmt_discoverable(hdev->id, 0);
290
291 if (param & SCAN_PAGE) {
292 set_bit(HCI_PSCAN, &hdev->flags);
293 if (!old_pscan)
294 mgmt_connectable(hdev->id, 1);
295 } else if (old_pscan)
296 mgmt_connectable(hdev->id, 0);
297 }
298
299 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
300 }
301
hci_cc_read_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)302 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
303 {
304 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
305
306 BT_DBG("%s status 0x%x", hdev->name, rp->status);
307
308 if (rp->status)
309 return;
310
311 memcpy(hdev->dev_class, rp->dev_class, 3);
312
313 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
314 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
315 }
316
hci_cc_write_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)317 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
318 {
319 __u8 status = *((__u8 *) skb->data);
320 void *sent;
321
322 BT_DBG("%s status 0x%x", hdev->name, status);
323
324 if (status)
325 return;
326
327 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
328 if (!sent)
329 return;
330
331 memcpy(hdev->dev_class, sent, 3);
332 }
333
hci_cc_read_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)334 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
335 {
336 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
337 __u16 setting;
338
339 BT_DBG("%s status 0x%x", hdev->name, rp->status);
340
341 if (rp->status)
342 return;
343
344 setting = __le16_to_cpu(rp->voice_setting);
345
346 if (hdev->voice_setting == setting)
347 return;
348
349 hdev->voice_setting = setting;
350
351 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
352
353 if (hdev->notify) {
354 tasklet_disable(&hdev->tx_task);
355 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
356 tasklet_enable(&hdev->tx_task);
357 }
358 }
359
hci_cc_write_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)360 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
361 {
362 __u8 status = *((__u8 *) skb->data);
363 __u16 setting;
364 void *sent;
365
366 BT_DBG("%s status 0x%x", hdev->name, status);
367
368 if (status)
369 return;
370
371 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
372 if (!sent)
373 return;
374
375 setting = get_unaligned_le16(sent);
376
377 if (hdev->voice_setting == setting)
378 return;
379
380 hdev->voice_setting = setting;
381
382 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
383
384 if (hdev->notify) {
385 tasklet_disable(&hdev->tx_task);
386 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
387 tasklet_enable(&hdev->tx_task);
388 }
389 }
390
hci_cc_host_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)391 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
392 {
393 __u8 status = *((__u8 *) skb->data);
394
395 BT_DBG("%s status 0x%x", hdev->name, status);
396
397 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
398 }
399
hci_cc_read_ssp_mode(struct hci_dev * hdev,struct sk_buff * skb)400 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
401 {
402 struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
403
404 BT_DBG("%s status 0x%x", hdev->name, rp->status);
405
406 if (rp->status)
407 return;
408
409 hdev->ssp_mode = rp->mode;
410 }
411
hci_cc_write_ssp_mode(struct hci_dev * hdev,struct sk_buff * skb)412 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
413 {
414 __u8 status = *((__u8 *) skb->data);
415 void *sent;
416
417 BT_DBG("%s status 0x%x", hdev->name, status);
418
419 if (status)
420 return;
421
422 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
423 if (!sent)
424 return;
425
426 hdev->ssp_mode = *((__u8 *) sent);
427 }
428
hci_get_inquiry_mode(struct hci_dev * hdev)429 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
430 {
431 if (hdev->features[6] & LMP_EXT_INQ)
432 return 2;
433
434 if (hdev->features[3] & LMP_RSSI_INQ)
435 return 1;
436
437 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
438 hdev->lmp_subver == 0x0757)
439 return 1;
440
441 if (hdev->manufacturer == 15) {
442 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
443 return 1;
444 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
445 return 1;
446 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
447 return 1;
448 }
449
450 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
451 hdev->lmp_subver == 0x1805)
452 return 1;
453
454 return 0;
455 }
456
hci_setup_inquiry_mode(struct hci_dev * hdev)457 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
458 {
459 u8 mode;
460
461 mode = hci_get_inquiry_mode(hdev);
462
463 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
464 }
465
hci_setup_event_mask(struct hci_dev * hdev)466 static void hci_setup_event_mask(struct hci_dev *hdev)
467 {
468 /* The second byte is 0xff instead of 0x9f (two reserved bits
469 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
470 * command otherwise */
471 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
472
473 /* Events for 1.2 and newer controllers */
474 if (hdev->lmp_ver > 1) {
475 events[4] |= 0x01; /* Flow Specification Complete */
476 events[4] |= 0x02; /* Inquiry Result with RSSI */
477 events[4] |= 0x04; /* Read Remote Extended Features Complete */
478 events[5] |= 0x08; /* Synchronous Connection Complete */
479 events[5] |= 0x10; /* Synchronous Connection Changed */
480 }
481
482 if (hdev->features[3] & LMP_RSSI_INQ)
483 events[4] |= 0x04; /* Inquiry Result with RSSI */
484
485 if (hdev->features[5] & LMP_SNIFF_SUBR)
486 events[5] |= 0x20; /* Sniff Subrating */
487
488 if (hdev->features[5] & LMP_PAUSE_ENC)
489 events[5] |= 0x80; /* Encryption Key Refresh Complete */
490
491 if (hdev->features[6] & LMP_EXT_INQ)
492 events[5] |= 0x40; /* Extended Inquiry Result */
493
494 if (hdev->features[6] & LMP_NO_FLUSH)
495 events[7] |= 0x01; /* Enhanced Flush Complete */
496
497 if (hdev->features[7] & LMP_LSTO)
498 events[6] |= 0x80; /* Link Supervision Timeout Changed */
499
500 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
501 events[6] |= 0x01; /* IO Capability Request */
502 events[6] |= 0x02; /* IO Capability Response */
503 events[6] |= 0x04; /* User Confirmation Request */
504 events[6] |= 0x08; /* User Passkey Request */
505 events[6] |= 0x10; /* Remote OOB Data Request */
506 events[6] |= 0x20; /* Simple Pairing Complete */
507 events[7] |= 0x04; /* User Passkey Notification */
508 events[7] |= 0x08; /* Keypress Notification */
509 events[7] |= 0x10; /* Remote Host Supported
510 * Features Notification */
511 }
512
513 if (hdev->features[4] & LMP_LE)
514 events[7] |= 0x20; /* LE Meta-Event */
515
516 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
517 }
518
hci_setup(struct hci_dev * hdev)519 static void hci_setup(struct hci_dev *hdev)
520 {
521 hci_setup_event_mask(hdev);
522
523 if (hdev->lmp_ver > 1)
524 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
525
526 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
527 u8 mode = 0x01;
528 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
529 }
530
531 if (hdev->features[3] & LMP_RSSI_INQ)
532 hci_setup_inquiry_mode(hdev);
533
534 if (hdev->features[7] & LMP_INQ_TX_PWR)
535 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
536 }
537
hci_cc_read_local_version(struct hci_dev * hdev,struct sk_buff * skb)538 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
539 {
540 struct hci_rp_read_local_version *rp = (void *) skb->data;
541
542 BT_DBG("%s status 0x%x", hdev->name, rp->status);
543
544 if (rp->status)
545 return;
546
547 hdev->hci_ver = rp->hci_ver;
548 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
549 hdev->lmp_ver = rp->lmp_ver;
550 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
551 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
552
553 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
554 hdev->manufacturer,
555 hdev->hci_ver, hdev->hci_rev);
556
557 if (test_bit(HCI_INIT, &hdev->flags))
558 hci_setup(hdev);
559 }
560
hci_setup_link_policy(struct hci_dev * hdev)561 static void hci_setup_link_policy(struct hci_dev *hdev)
562 {
563 u16 link_policy = 0;
564
565 if (hdev->features[0] & LMP_RSWITCH)
566 link_policy |= HCI_LP_RSWITCH;
567 if (hdev->features[0] & LMP_HOLD)
568 link_policy |= HCI_LP_HOLD;
569 if (hdev->features[0] & LMP_SNIFF)
570 link_policy |= HCI_LP_SNIFF;
571 if (hdev->features[1] & LMP_PARK)
572 link_policy |= HCI_LP_PARK;
573
574 link_policy = cpu_to_le16(link_policy);
575 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
576 sizeof(link_policy), &link_policy);
577 }
578
hci_cc_read_local_commands(struct hci_dev * hdev,struct sk_buff * skb)579 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
580 {
581 struct hci_rp_read_local_commands *rp = (void *) skb->data;
582
583 BT_DBG("%s status 0x%x", hdev->name, rp->status);
584
585 if (rp->status)
586 goto done;
587
588 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
589
590 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
591 hci_setup_link_policy(hdev);
592
593 done:
594 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
595 }
596
hci_cc_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)597 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
598 {
599 struct hci_rp_read_local_features *rp = (void *) skb->data;
600
601 BT_DBG("%s status 0x%x", hdev->name, rp->status);
602
603 if (rp->status)
604 return;
605
606 memcpy(hdev->features, rp->features, 8);
607
608 /* Adjust default settings according to features
609 * supported by device. */
610
611 if (hdev->features[0] & LMP_3SLOT)
612 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
613
614 if (hdev->features[0] & LMP_5SLOT)
615 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
616
617 if (hdev->features[1] & LMP_HV2) {
618 hdev->pkt_type |= (HCI_HV2);
619 hdev->esco_type |= (ESCO_HV2);
620 }
621
622 if (hdev->features[1] & LMP_HV3) {
623 hdev->pkt_type |= (HCI_HV3);
624 hdev->esco_type |= (ESCO_HV3);
625 }
626
627 if (hdev->features[3] & LMP_ESCO)
628 hdev->esco_type |= (ESCO_EV3);
629
630 if (hdev->features[4] & LMP_EV4)
631 hdev->esco_type |= (ESCO_EV4);
632
633 if (hdev->features[4] & LMP_EV5)
634 hdev->esco_type |= (ESCO_EV5);
635
636 if (hdev->features[5] & LMP_EDR_ESCO_2M)
637 hdev->esco_type |= (ESCO_2EV3);
638
639 if (hdev->features[5] & LMP_EDR_ESCO_3M)
640 hdev->esco_type |= (ESCO_3EV3);
641
642 if (hdev->features[5] & LMP_EDR_3S_ESCO)
643 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
644
645 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
646 hdev->features[0], hdev->features[1],
647 hdev->features[2], hdev->features[3],
648 hdev->features[4], hdev->features[5],
649 hdev->features[6], hdev->features[7]);
650 }
651
hci_cc_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)652 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
653 {
654 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
655
656 BT_DBG("%s status 0x%x", hdev->name, rp->status);
657
658 if (rp->status)
659 return;
660
661 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
662 hdev->sco_mtu = rp->sco_mtu;
663 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
664 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
665
666 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
667 hdev->sco_mtu = 64;
668 hdev->sco_pkts = 8;
669 }
670
671 hdev->acl_cnt = hdev->acl_pkts;
672 hdev->sco_cnt = hdev->sco_pkts;
673
674 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
675 hdev->acl_mtu, hdev->acl_pkts,
676 hdev->sco_mtu, hdev->sco_pkts);
677 }
678
hci_cc_read_bd_addr(struct hci_dev * hdev,struct sk_buff * skb)679 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
680 {
681 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
682
683 BT_DBG("%s status 0x%x", hdev->name, rp->status);
684
685 if (!rp->status)
686 bacpy(&hdev->bdaddr, &rp->bdaddr);
687
688 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
689 }
690
hci_cc_write_ca_timeout(struct hci_dev * hdev,struct sk_buff * skb)691 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
692 {
693 __u8 status = *((__u8 *) skb->data);
694
695 BT_DBG("%s status 0x%x", hdev->name, status);
696
697 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
698 }
699
hci_cc_delete_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)700 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
701 struct sk_buff *skb)
702 {
703 __u8 status = *((__u8 *) skb->data);
704
705 BT_DBG("%s status 0x%x", hdev->name, status);
706
707 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
708 }
709
hci_cc_set_event_mask(struct hci_dev * hdev,struct sk_buff * skb)710 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
711 {
712 __u8 status = *((__u8 *) skb->data);
713
714 BT_DBG("%s status 0x%x", hdev->name, status);
715
716 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
717 }
718
hci_cc_write_inquiry_mode(struct hci_dev * hdev,struct sk_buff * skb)719 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
720 struct sk_buff *skb)
721 {
722 __u8 status = *((__u8 *) skb->data);
723
724 BT_DBG("%s status 0x%x", hdev->name, status);
725
726 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
727 }
728
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,struct sk_buff * skb)729 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
730 struct sk_buff *skb)
731 {
732 __u8 status = *((__u8 *) skb->data);
733
734 BT_DBG("%s status 0x%x", hdev->name, status);
735
736 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
737 }
738
hci_cc_set_event_flt(struct hci_dev * hdev,struct sk_buff * skb)739 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
740 {
741 __u8 status = *((__u8 *) skb->data);
742
743 BT_DBG("%s status 0x%x", hdev->name, status);
744
745 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
746 }
747
hci_cc_pin_code_reply(struct hci_dev * hdev,struct sk_buff * skb)748 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
749 {
750 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
751 struct hci_cp_pin_code_reply *cp;
752 struct hci_conn *conn;
753
754 BT_DBG("%s status 0x%x", hdev->name, rp->status);
755
756 if (test_bit(HCI_MGMT, &hdev->flags))
757 mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
758
759 if (rp->status != 0)
760 return;
761
762 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
763 if (!cp)
764 return;
765
766 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
767 if (conn)
768 conn->pin_length = cp->pin_len;
769 }
770
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)771 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
772 {
773 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
774
775 BT_DBG("%s status 0x%x", hdev->name, rp->status);
776
777 if (test_bit(HCI_MGMT, &hdev->flags))
778 mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
779 rp->status);
780 }
hci_cc_le_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)781 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
782 struct sk_buff *skb)
783 {
784 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
785
786 BT_DBG("%s status 0x%x", hdev->name, rp->status);
787
788 if (rp->status)
789 return;
790
791 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
792 hdev->le_pkts = rp->le_max_pkt;
793
794 hdev->le_cnt = hdev->le_pkts;
795
796 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
797
798 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
799 }
800
hci_cc_user_confirm_reply(struct hci_dev * hdev,struct sk_buff * skb)801 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
802 {
803 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
804
805 BT_DBG("%s status 0x%x", hdev->name, rp->status);
806
807 if (test_bit(HCI_MGMT, &hdev->flags))
808 mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
809 rp->status);
810 }
811
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)812 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
813 struct sk_buff *skb)
814 {
815 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
816
817 BT_DBG("%s status 0x%x", hdev->name, rp->status);
818
819 if (test_bit(HCI_MGMT, &hdev->flags))
820 mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
821 rp->status);
822 }
823
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)824 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
825 {
826 BT_DBG("%s status 0x%x", hdev->name, status);
827
828 if (status) {
829 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
830
831 hci_conn_check_pending(hdev);
832 } else
833 set_bit(HCI_INQUIRY, &hdev->flags);
834 }
835
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)836 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
837 {
838 struct hci_cp_create_conn *cp;
839 struct hci_conn *conn;
840
841 BT_DBG("%s status 0x%x", hdev->name, status);
842
843 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
844 if (!cp)
845 return;
846
847 hci_dev_lock(hdev);
848
849 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
850
851 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
852
853 if (status) {
854 if (conn && conn->state == BT_CONNECT) {
855 if (status != 0x0c || conn->attempt > 2) {
856 conn->state = BT_CLOSED;
857 hci_proto_connect_cfm(conn, status);
858 hci_conn_del(conn);
859 } else
860 conn->state = BT_CONNECT2;
861 }
862 } else {
863 if (!conn) {
864 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
865 if (conn) {
866 conn->out = 1;
867 conn->link_mode |= HCI_LM_MASTER;
868 } else
869 BT_ERR("No memory for new connection");
870 }
871 }
872
873 hci_dev_unlock(hdev);
874 }
875
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)876 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
877 {
878 struct hci_cp_add_sco *cp;
879 struct hci_conn *acl, *sco;
880 __u16 handle;
881
882 BT_DBG("%s status 0x%x", hdev->name, status);
883
884 if (!status)
885 return;
886
887 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
888 if (!cp)
889 return;
890
891 handle = __le16_to_cpu(cp->handle);
892
893 BT_DBG("%s handle %d", hdev->name, handle);
894
895 hci_dev_lock(hdev);
896
897 acl = hci_conn_hash_lookup_handle(hdev, handle);
898 if (acl) {
899 sco = acl->link;
900 if (sco) {
901 sco->state = BT_CLOSED;
902
903 hci_proto_connect_cfm(sco, status);
904 hci_conn_del(sco);
905 }
906 }
907
908 hci_dev_unlock(hdev);
909 }
910
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)911 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
912 {
913 struct hci_cp_auth_requested *cp;
914 struct hci_conn *conn;
915
916 BT_DBG("%s status 0x%x", hdev->name, status);
917
918 if (!status)
919 return;
920
921 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
922 if (!cp)
923 return;
924
925 hci_dev_lock(hdev);
926
927 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
928 if (conn) {
929 if (conn->state == BT_CONFIG) {
930 hci_proto_connect_cfm(conn, status);
931 hci_conn_put(conn);
932 }
933 }
934
935 hci_dev_unlock(hdev);
936 }
937
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)938 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
939 {
940 struct hci_cp_set_conn_encrypt *cp;
941 struct hci_conn *conn;
942
943 BT_DBG("%s status 0x%x", hdev->name, status);
944
945 if (!status)
946 return;
947
948 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
949 if (!cp)
950 return;
951
952 hci_dev_lock(hdev);
953
954 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
955 if (conn) {
956 if (conn->state == BT_CONFIG) {
957 hci_proto_connect_cfm(conn, status);
958 hci_conn_put(conn);
959 }
960 }
961
962 hci_dev_unlock(hdev);
963 }
964
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)965 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
966 struct hci_conn *conn)
967 {
968 if (conn->state != BT_CONFIG || !conn->out)
969 return 0;
970
971 if (conn->pending_sec_level == BT_SECURITY_SDP)
972 return 0;
973
974 /* Only request authentication for SSP connections or non-SSP
975 * devices with sec_level HIGH */
976 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
977 conn->pending_sec_level != BT_SECURITY_HIGH)
978 return 0;
979
980 return 1;
981 }
982
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)983 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
984 {
985 struct hci_cp_remote_name_req *cp;
986 struct hci_conn *conn;
987
988 BT_DBG("%s status 0x%x", hdev->name, status);
989
990 /* If successful wait for the name req complete event before
991 * checking for the need to do authentication */
992 if (!status)
993 return;
994
995 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
996 if (!cp)
997 return;
998
999 hci_dev_lock(hdev);
1000
1001 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1002 if (conn && hci_outgoing_auth_needed(hdev, conn)) {
1003 struct hci_cp_auth_requested cp;
1004 cp.handle = __cpu_to_le16(conn->handle);
1005 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1006 }
1007
1008 hci_dev_unlock(hdev);
1009 }
1010
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)1011 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1012 {
1013 struct hci_cp_read_remote_features *cp;
1014 struct hci_conn *conn;
1015
1016 BT_DBG("%s status 0x%x", hdev->name, status);
1017
1018 if (!status)
1019 return;
1020
1021 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1022 if (!cp)
1023 return;
1024
1025 hci_dev_lock(hdev);
1026
1027 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1028 if (conn) {
1029 if (conn->state == BT_CONFIG) {
1030 hci_proto_connect_cfm(conn, status);
1031 hci_conn_put(conn);
1032 }
1033 }
1034
1035 hci_dev_unlock(hdev);
1036 }
1037
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)1038 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1039 {
1040 struct hci_cp_read_remote_ext_features *cp;
1041 struct hci_conn *conn;
1042
1043 BT_DBG("%s status 0x%x", hdev->name, status);
1044
1045 if (!status)
1046 return;
1047
1048 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1049 if (!cp)
1050 return;
1051
1052 hci_dev_lock(hdev);
1053
1054 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1055 if (conn) {
1056 if (conn->state == BT_CONFIG) {
1057 hci_proto_connect_cfm(conn, status);
1058 hci_conn_put(conn);
1059 }
1060 }
1061
1062 hci_dev_unlock(hdev);
1063 }
1064
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)1065 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1066 {
1067 struct hci_cp_setup_sync_conn *cp;
1068 struct hci_conn *acl, *sco;
1069 __u16 handle;
1070
1071 BT_DBG("%s status 0x%x", hdev->name, status);
1072
1073 if (!status)
1074 return;
1075
1076 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1077 if (!cp)
1078 return;
1079
1080 handle = __le16_to_cpu(cp->handle);
1081
1082 BT_DBG("%s handle %d", hdev->name, handle);
1083
1084 hci_dev_lock(hdev);
1085
1086 acl = hci_conn_hash_lookup_handle(hdev, handle);
1087 if (acl) {
1088 sco = acl->link;
1089 if (sco) {
1090 sco->state = BT_CLOSED;
1091
1092 hci_proto_connect_cfm(sco, status);
1093 hci_conn_del(sco);
1094 }
1095 }
1096
1097 hci_dev_unlock(hdev);
1098 }
1099
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)1100 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1101 {
1102 struct hci_cp_sniff_mode *cp;
1103 struct hci_conn *conn;
1104
1105 BT_DBG("%s status 0x%x", hdev->name, status);
1106
1107 if (!status)
1108 return;
1109
1110 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1111 if (!cp)
1112 return;
1113
1114 hci_dev_lock(hdev);
1115
1116 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1117 if (conn) {
1118 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1119
1120 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1121 hci_sco_setup(conn, status);
1122 }
1123
1124 hci_dev_unlock(hdev);
1125 }
1126
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)1127 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1128 {
1129 struct hci_cp_exit_sniff_mode *cp;
1130 struct hci_conn *conn;
1131
1132 BT_DBG("%s status 0x%x", hdev->name, status);
1133
1134 if (!status)
1135 return;
1136
1137 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1138 if (!cp)
1139 return;
1140
1141 hci_dev_lock(hdev);
1142
1143 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1144 if (conn) {
1145 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1146
1147 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1148 hci_sco_setup(conn, status);
1149 }
1150
1151 hci_dev_unlock(hdev);
1152 }
1153
hci_cs_le_create_conn(struct hci_dev * hdev,__u8 status)1154 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1155 {
1156 struct hci_cp_le_create_conn *cp;
1157 struct hci_conn *conn;
1158
1159 BT_DBG("%s status 0x%x", hdev->name, status);
1160
1161 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1162 if (!cp)
1163 return;
1164
1165 hci_dev_lock(hdev);
1166
1167 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1168
1169 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1170 conn);
1171
1172 if (status) {
1173 if (conn && conn->state == BT_CONNECT) {
1174 conn->state = BT_CLOSED;
1175 hci_proto_connect_cfm(conn, status);
1176 hci_conn_del(conn);
1177 }
1178 } else {
1179 if (!conn) {
1180 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1181 if (conn)
1182 conn->out = 1;
1183 else
1184 BT_ERR("No memory for new connection");
1185 }
1186 }
1187
1188 hci_dev_unlock(hdev);
1189 }
1190
hci_inquiry_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1191 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1192 {
1193 __u8 status = *((__u8 *) skb->data);
1194
1195 BT_DBG("%s status %d", hdev->name, status);
1196
1197 clear_bit(HCI_INQUIRY, &hdev->flags);
1198
1199 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1200
1201 hci_conn_check_pending(hdev);
1202 }
1203
hci_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)1204 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1205 {
1206 struct inquiry_data data;
1207 struct inquiry_info *info = (void *) (skb->data + 1);
1208 int num_rsp = *((__u8 *) skb->data);
1209
1210 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1211
1212 if (!num_rsp)
1213 return;
1214
1215 hci_dev_lock(hdev);
1216
1217 for (; num_rsp; num_rsp--) {
1218 bacpy(&data.bdaddr, &info->bdaddr);
1219 data.pscan_rep_mode = info->pscan_rep_mode;
1220 data.pscan_period_mode = info->pscan_period_mode;
1221 data.pscan_mode = info->pscan_mode;
1222 memcpy(data.dev_class, info->dev_class, 3);
1223 data.clock_offset = info->clock_offset;
1224 data.rssi = 0x00;
1225 data.ssp_mode = 0x00;
1226 info++;
1227 hci_inquiry_cache_update(hdev, &data);
1228 }
1229
1230 hci_dev_unlock(hdev);
1231 }
1232
hci_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1233 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1234 {
1235 struct hci_ev_conn_complete *ev = (void *) skb->data;
1236 struct hci_conn *conn;
1237
1238 BT_DBG("%s", hdev->name);
1239
1240 hci_dev_lock(hdev);
1241
1242 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1243 if (!conn) {
1244 if (ev->link_type != SCO_LINK)
1245 goto unlock;
1246
1247 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1248 if (!conn)
1249 goto unlock;
1250
1251 conn->type = SCO_LINK;
1252 }
1253
1254 if (!ev->status) {
1255 conn->handle = __le16_to_cpu(ev->handle);
1256
1257 if (conn->type == ACL_LINK) {
1258 conn->state = BT_CONFIG;
1259 hci_conn_hold(conn);
1260 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1261 mgmt_connected(hdev->id, &ev->bdaddr);
1262 } else
1263 conn->state = BT_CONNECTED;
1264
1265 hci_conn_hold_device(conn);
1266 hci_conn_add_sysfs(conn);
1267
1268 if (test_bit(HCI_AUTH, &hdev->flags))
1269 conn->link_mode |= HCI_LM_AUTH;
1270
1271 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1272 conn->link_mode |= HCI_LM_ENCRYPT;
1273
1274 /* Get remote features */
1275 if (conn->type == ACL_LINK) {
1276 struct hci_cp_read_remote_features cp;
1277 cp.handle = ev->handle;
1278 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1279 sizeof(cp), &cp);
1280 }
1281
1282 /* Set packet type for incoming connection */
1283 if (!conn->out && hdev->hci_ver < 3) {
1284 struct hci_cp_change_conn_ptype cp;
1285 cp.handle = ev->handle;
1286 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1287 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1288 sizeof(cp), &cp);
1289 }
1290 } else {
1291 conn->state = BT_CLOSED;
1292 if (conn->type == ACL_LINK)
1293 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
1294 }
1295
1296 if (conn->type == ACL_LINK)
1297 hci_sco_setup(conn, ev->status);
1298
1299 if (ev->status) {
1300 hci_proto_connect_cfm(conn, ev->status);
1301 hci_conn_del(conn);
1302 } else if (ev->link_type != ACL_LINK)
1303 hci_proto_connect_cfm(conn, ev->status);
1304
1305 unlock:
1306 hci_dev_unlock(hdev);
1307
1308 hci_conn_check_pending(hdev);
1309 }
1310
hci_conn_request_evt(struct hci_dev * hdev,struct sk_buff * skb)1311 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1312 {
1313 struct hci_ev_conn_request *ev = (void *) skb->data;
1314 int mask = hdev->link_mode;
1315
1316 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1317 batostr(&ev->bdaddr), ev->link_type);
1318
1319 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1320
1321 if ((mask & HCI_LM_ACCEPT) &&
1322 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1323 /* Connection accepted */
1324 struct inquiry_entry *ie;
1325 struct hci_conn *conn;
1326
1327 hci_dev_lock(hdev);
1328
1329 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1330 if (ie)
1331 memcpy(ie->data.dev_class, ev->dev_class, 3);
1332
1333 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1334 if (!conn) {
1335 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1336 if (!conn) {
1337 BT_ERR("No memory for new connection");
1338 hci_dev_unlock(hdev);
1339 return;
1340 }
1341 }
1342
1343 memcpy(conn->dev_class, ev->dev_class, 3);
1344 conn->state = BT_CONNECT;
1345
1346 hci_dev_unlock(hdev);
1347
1348 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1349 struct hci_cp_accept_conn_req cp;
1350
1351 bacpy(&cp.bdaddr, &ev->bdaddr);
1352
1353 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1354 cp.role = 0x00; /* Become master */
1355 else
1356 cp.role = 0x01; /* Remain slave */
1357
1358 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1359 sizeof(cp), &cp);
1360 } else {
1361 struct hci_cp_accept_sync_conn_req cp;
1362
1363 bacpy(&cp.bdaddr, &ev->bdaddr);
1364 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1365
1366 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1367 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1368 cp.max_latency = cpu_to_le16(0xffff);
1369 cp.content_format = cpu_to_le16(hdev->voice_setting);
1370 cp.retrans_effort = 0xff;
1371
1372 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1373 sizeof(cp), &cp);
1374 }
1375 } else {
1376 /* Connection rejected */
1377 struct hci_cp_reject_conn_req cp;
1378
1379 bacpy(&cp.bdaddr, &ev->bdaddr);
1380 cp.reason = 0x0f;
1381 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1382 }
1383 }
1384
hci_disconn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1385 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1386 {
1387 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1388 struct hci_conn *conn;
1389
1390 BT_DBG("%s status %d", hdev->name, ev->status);
1391
1392 if (ev->status) {
1393 mgmt_disconnect_failed(hdev->id);
1394 return;
1395 }
1396
1397 hci_dev_lock(hdev);
1398
1399 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1400 if (!conn)
1401 goto unlock;
1402
1403 conn->state = BT_CLOSED;
1404
1405 if (conn->type == ACL_LINK)
1406 mgmt_disconnected(hdev->id, &conn->dst);
1407
1408 hci_proto_disconn_cfm(conn, ev->reason);
1409 hci_conn_del(conn);
1410
1411 unlock:
1412 hci_dev_unlock(hdev);
1413 }
1414
hci_auth_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1415 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1416 {
1417 struct hci_ev_auth_complete *ev = (void *) skb->data;
1418 struct hci_conn *conn;
1419
1420 BT_DBG("%s status %d", hdev->name, ev->status);
1421
1422 hci_dev_lock(hdev);
1423
1424 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1425 if (conn) {
1426 if (!ev->status) {
1427 conn->link_mode |= HCI_LM_AUTH;
1428 conn->sec_level = conn->pending_sec_level;
1429 } else {
1430 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1431 conn->sec_level = BT_SECURITY_LOW;
1432 }
1433
1434 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1435
1436 if (conn->state == BT_CONFIG) {
1437 if (!ev->status && hdev->ssp_mode > 0 &&
1438 conn->ssp_mode > 0) {
1439 struct hci_cp_set_conn_encrypt cp;
1440 cp.handle = ev->handle;
1441 cp.encrypt = 0x01;
1442 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
1443 sizeof(cp), &cp);
1444 } else {
1445 conn->state = BT_CONNECTED;
1446 hci_proto_connect_cfm(conn, ev->status);
1447 hci_conn_put(conn);
1448 }
1449 } else {
1450 hci_auth_cfm(conn, ev->status);
1451
1452 hci_conn_hold(conn);
1453 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1454 hci_conn_put(conn);
1455 }
1456
1457 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1458 if (!ev->status) {
1459 struct hci_cp_set_conn_encrypt cp;
1460 cp.handle = ev->handle;
1461 cp.encrypt = 0x01;
1462 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
1463 sizeof(cp), &cp);
1464 } else {
1465 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1466 hci_encrypt_cfm(conn, ev->status, 0x00);
1467 }
1468 }
1469 }
1470
1471 hci_dev_unlock(hdev);
1472 }
1473
hci_remote_name_evt(struct hci_dev * hdev,struct sk_buff * skb)1474 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1475 {
1476 struct hci_ev_remote_name *ev = (void *) skb->data;
1477 struct hci_conn *conn;
1478
1479 BT_DBG("%s", hdev->name);
1480
1481 hci_conn_check_pending(hdev);
1482
1483 hci_dev_lock(hdev);
1484
1485 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1486 if (conn && hci_outgoing_auth_needed(hdev, conn)) {
1487 struct hci_cp_auth_requested cp;
1488 cp.handle = __cpu_to_le16(conn->handle);
1489 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1490 }
1491
1492 hci_dev_unlock(hdev);
1493 }
1494
hci_encrypt_change_evt(struct hci_dev * hdev,struct sk_buff * skb)1495 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1496 {
1497 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1498 struct hci_conn *conn;
1499
1500 BT_DBG("%s status %d", hdev->name, ev->status);
1501
1502 hci_dev_lock(hdev);
1503
1504 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1505 if (conn) {
1506 if (!ev->status) {
1507 if (ev->encrypt) {
1508 /* Encryption implies authentication */
1509 conn->link_mode |= HCI_LM_AUTH;
1510 conn->link_mode |= HCI_LM_ENCRYPT;
1511 } else
1512 conn->link_mode &= ~HCI_LM_ENCRYPT;
1513 }
1514
1515 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1516
1517 if (conn->state == BT_CONFIG) {
1518 if (!ev->status)
1519 conn->state = BT_CONNECTED;
1520
1521 hci_proto_connect_cfm(conn, ev->status);
1522 hci_conn_put(conn);
1523 } else
1524 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1525 }
1526
1527 hci_dev_unlock(hdev);
1528 }
1529
hci_change_link_key_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1530 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1531 {
1532 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1533 struct hci_conn *conn;
1534
1535 BT_DBG("%s status %d", hdev->name, ev->status);
1536
1537 hci_dev_lock(hdev);
1538
1539 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1540 if (conn) {
1541 if (!ev->status)
1542 conn->link_mode |= HCI_LM_SECURE;
1543
1544 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1545
1546 hci_key_change_cfm(conn, ev->status);
1547 }
1548
1549 hci_dev_unlock(hdev);
1550 }
1551
hci_remote_features_evt(struct hci_dev * hdev,struct sk_buff * skb)1552 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1553 {
1554 struct hci_ev_remote_features *ev = (void *) skb->data;
1555 struct hci_conn *conn;
1556
1557 BT_DBG("%s status %d", hdev->name, ev->status);
1558
1559 hci_dev_lock(hdev);
1560
1561 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1562 if (!conn)
1563 goto unlock;
1564
1565 if (!ev->status)
1566 memcpy(conn->features, ev->features, 8);
1567
1568 if (conn->state != BT_CONFIG)
1569 goto unlock;
1570
1571 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
1572 struct hci_cp_read_remote_ext_features cp;
1573 cp.handle = ev->handle;
1574 cp.page = 0x01;
1575 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
1576 sizeof(cp), &cp);
1577 goto unlock;
1578 }
1579
1580 if (!ev->status) {
1581 struct hci_cp_remote_name_req cp;
1582 memset(&cp, 0, sizeof(cp));
1583 bacpy(&cp.bdaddr, &conn->dst);
1584 cp.pscan_rep_mode = 0x02;
1585 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1586 }
1587
1588 if (!hci_outgoing_auth_needed(hdev, conn)) {
1589 conn->state = BT_CONNECTED;
1590 hci_proto_connect_cfm(conn, ev->status);
1591 hci_conn_put(conn);
1592 }
1593
1594 unlock:
1595 hci_dev_unlock(hdev);
1596 }
1597
hci_remote_version_evt(struct hci_dev * hdev,struct sk_buff * skb)1598 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
1599 {
1600 BT_DBG("%s", hdev->name);
1601 }
1602
hci_qos_setup_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1603 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1604 {
1605 BT_DBG("%s", hdev->name);
1606 }
1607
hci_cmd_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1608 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1609 {
1610 struct hci_ev_cmd_complete *ev = (void *) skb->data;
1611 __u16 opcode;
1612
1613 skb_pull(skb, sizeof(*ev));
1614
1615 opcode = __le16_to_cpu(ev->opcode);
1616
1617 switch (opcode) {
1618 case HCI_OP_INQUIRY_CANCEL:
1619 hci_cc_inquiry_cancel(hdev, skb);
1620 break;
1621
1622 case HCI_OP_EXIT_PERIODIC_INQ:
1623 hci_cc_exit_periodic_inq(hdev, skb);
1624 break;
1625
1626 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
1627 hci_cc_remote_name_req_cancel(hdev, skb);
1628 break;
1629
1630 case HCI_OP_ROLE_DISCOVERY:
1631 hci_cc_role_discovery(hdev, skb);
1632 break;
1633
1634 case HCI_OP_READ_LINK_POLICY:
1635 hci_cc_read_link_policy(hdev, skb);
1636 break;
1637
1638 case HCI_OP_WRITE_LINK_POLICY:
1639 hci_cc_write_link_policy(hdev, skb);
1640 break;
1641
1642 case HCI_OP_READ_DEF_LINK_POLICY:
1643 hci_cc_read_def_link_policy(hdev, skb);
1644 break;
1645
1646 case HCI_OP_WRITE_DEF_LINK_POLICY:
1647 hci_cc_write_def_link_policy(hdev, skb);
1648 break;
1649
1650 case HCI_OP_RESET:
1651 hci_cc_reset(hdev, skb);
1652 break;
1653
1654 case HCI_OP_WRITE_LOCAL_NAME:
1655 hci_cc_write_local_name(hdev, skb);
1656 break;
1657
1658 case HCI_OP_READ_LOCAL_NAME:
1659 hci_cc_read_local_name(hdev, skb);
1660 break;
1661
1662 case HCI_OP_WRITE_AUTH_ENABLE:
1663 hci_cc_write_auth_enable(hdev, skb);
1664 break;
1665
1666 case HCI_OP_WRITE_ENCRYPT_MODE:
1667 hci_cc_write_encrypt_mode(hdev, skb);
1668 break;
1669
1670 case HCI_OP_WRITE_SCAN_ENABLE:
1671 hci_cc_write_scan_enable(hdev, skb);
1672 break;
1673
1674 case HCI_OP_READ_CLASS_OF_DEV:
1675 hci_cc_read_class_of_dev(hdev, skb);
1676 break;
1677
1678 case HCI_OP_WRITE_CLASS_OF_DEV:
1679 hci_cc_write_class_of_dev(hdev, skb);
1680 break;
1681
1682 case HCI_OP_READ_VOICE_SETTING:
1683 hci_cc_read_voice_setting(hdev, skb);
1684 break;
1685
1686 case HCI_OP_WRITE_VOICE_SETTING:
1687 hci_cc_write_voice_setting(hdev, skb);
1688 break;
1689
1690 case HCI_OP_HOST_BUFFER_SIZE:
1691 hci_cc_host_buffer_size(hdev, skb);
1692 break;
1693
1694 case HCI_OP_READ_SSP_MODE:
1695 hci_cc_read_ssp_mode(hdev, skb);
1696 break;
1697
1698 case HCI_OP_WRITE_SSP_MODE:
1699 hci_cc_write_ssp_mode(hdev, skb);
1700 break;
1701
1702 case HCI_OP_READ_LOCAL_VERSION:
1703 hci_cc_read_local_version(hdev, skb);
1704 break;
1705
1706 case HCI_OP_READ_LOCAL_COMMANDS:
1707 hci_cc_read_local_commands(hdev, skb);
1708 break;
1709
1710 case HCI_OP_READ_LOCAL_FEATURES:
1711 hci_cc_read_local_features(hdev, skb);
1712 break;
1713
1714 case HCI_OP_READ_BUFFER_SIZE:
1715 hci_cc_read_buffer_size(hdev, skb);
1716 break;
1717
1718 case HCI_OP_READ_BD_ADDR:
1719 hci_cc_read_bd_addr(hdev, skb);
1720 break;
1721
1722 case HCI_OP_WRITE_CA_TIMEOUT:
1723 hci_cc_write_ca_timeout(hdev, skb);
1724 break;
1725
1726 case HCI_OP_DELETE_STORED_LINK_KEY:
1727 hci_cc_delete_stored_link_key(hdev, skb);
1728 break;
1729
1730 case HCI_OP_SET_EVENT_MASK:
1731 hci_cc_set_event_mask(hdev, skb);
1732 break;
1733
1734 case HCI_OP_WRITE_INQUIRY_MODE:
1735 hci_cc_write_inquiry_mode(hdev, skb);
1736 break;
1737
1738 case HCI_OP_READ_INQ_RSP_TX_POWER:
1739 hci_cc_read_inq_rsp_tx_power(hdev, skb);
1740 break;
1741
1742 case HCI_OP_SET_EVENT_FLT:
1743 hci_cc_set_event_flt(hdev, skb);
1744 break;
1745
1746 case HCI_OP_PIN_CODE_REPLY:
1747 hci_cc_pin_code_reply(hdev, skb);
1748 break;
1749
1750 case HCI_OP_PIN_CODE_NEG_REPLY:
1751 hci_cc_pin_code_neg_reply(hdev, skb);
1752 break;
1753
1754 case HCI_OP_LE_READ_BUFFER_SIZE:
1755 hci_cc_le_read_buffer_size(hdev, skb);
1756 break;
1757
1758 case HCI_OP_USER_CONFIRM_REPLY:
1759 hci_cc_user_confirm_reply(hdev, skb);
1760 break;
1761
1762 case HCI_OP_USER_CONFIRM_NEG_REPLY:
1763 hci_cc_user_confirm_neg_reply(hdev, skb);
1764 break;
1765
1766 default:
1767 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1768 break;
1769 }
1770
1771 if (ev->opcode != HCI_OP_NOP)
1772 del_timer(&hdev->cmd_timer);
1773
1774 if (ev->ncmd) {
1775 atomic_set(&hdev->cmd_cnt, 1);
1776 if (!skb_queue_empty(&hdev->cmd_q))
1777 tasklet_schedule(&hdev->cmd_task);
1778 }
1779 }
1780
hci_cmd_status_evt(struct hci_dev * hdev,struct sk_buff * skb)1781 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
1782 {
1783 struct hci_ev_cmd_status *ev = (void *) skb->data;
1784 __u16 opcode;
1785
1786 skb_pull(skb, sizeof(*ev));
1787
1788 opcode = __le16_to_cpu(ev->opcode);
1789
1790 switch (opcode) {
1791 case HCI_OP_INQUIRY:
1792 hci_cs_inquiry(hdev, ev->status);
1793 break;
1794
1795 case HCI_OP_CREATE_CONN:
1796 hci_cs_create_conn(hdev, ev->status);
1797 break;
1798
1799 case HCI_OP_ADD_SCO:
1800 hci_cs_add_sco(hdev, ev->status);
1801 break;
1802
1803 case HCI_OP_AUTH_REQUESTED:
1804 hci_cs_auth_requested(hdev, ev->status);
1805 break;
1806
1807 case HCI_OP_SET_CONN_ENCRYPT:
1808 hci_cs_set_conn_encrypt(hdev, ev->status);
1809 break;
1810
1811 case HCI_OP_REMOTE_NAME_REQ:
1812 hci_cs_remote_name_req(hdev, ev->status);
1813 break;
1814
1815 case HCI_OP_READ_REMOTE_FEATURES:
1816 hci_cs_read_remote_features(hdev, ev->status);
1817 break;
1818
1819 case HCI_OP_READ_REMOTE_EXT_FEATURES:
1820 hci_cs_read_remote_ext_features(hdev, ev->status);
1821 break;
1822
1823 case HCI_OP_SETUP_SYNC_CONN:
1824 hci_cs_setup_sync_conn(hdev, ev->status);
1825 break;
1826
1827 case HCI_OP_SNIFF_MODE:
1828 hci_cs_sniff_mode(hdev, ev->status);
1829 break;
1830
1831 case HCI_OP_EXIT_SNIFF_MODE:
1832 hci_cs_exit_sniff_mode(hdev, ev->status);
1833 break;
1834
1835 case HCI_OP_DISCONNECT:
1836 if (ev->status != 0)
1837 mgmt_disconnect_failed(hdev->id);
1838 break;
1839
1840 case HCI_OP_LE_CREATE_CONN:
1841 hci_cs_le_create_conn(hdev, ev->status);
1842 break;
1843
1844 default:
1845 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1846 break;
1847 }
1848
1849 if (ev->opcode != HCI_OP_NOP)
1850 del_timer(&hdev->cmd_timer);
1851
1852 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
1853 atomic_set(&hdev->cmd_cnt, 1);
1854 if (!skb_queue_empty(&hdev->cmd_q))
1855 tasklet_schedule(&hdev->cmd_task);
1856 }
1857 }
1858
hci_role_change_evt(struct hci_dev * hdev,struct sk_buff * skb)1859 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1860 {
1861 struct hci_ev_role_change *ev = (void *) skb->data;
1862 struct hci_conn *conn;
1863
1864 BT_DBG("%s status %d", hdev->name, ev->status);
1865
1866 hci_dev_lock(hdev);
1867
1868 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1869 if (conn) {
1870 if (!ev->status) {
1871 if (ev->role)
1872 conn->link_mode &= ~HCI_LM_MASTER;
1873 else
1874 conn->link_mode |= HCI_LM_MASTER;
1875 }
1876
1877 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
1878
1879 hci_role_switch_cfm(conn, ev->status, ev->role);
1880 }
1881
1882 hci_dev_unlock(hdev);
1883 }
1884
hci_num_comp_pkts_evt(struct hci_dev * hdev,struct sk_buff * skb)1885 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
1886 {
1887 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
1888 __le16 *ptr;
1889 int i;
1890
1891 skb_pull(skb, sizeof(*ev));
1892
1893 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
1894
1895 if (skb->len < ev->num_hndl * 4) {
1896 BT_DBG("%s bad parameters", hdev->name);
1897 return;
1898 }
1899
1900 tasklet_disable(&hdev->tx_task);
1901
1902 for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
1903 struct hci_conn *conn;
1904 __u16 handle, count;
1905
1906 handle = get_unaligned_le16(ptr++);
1907 count = get_unaligned_le16(ptr++);
1908
1909 conn = hci_conn_hash_lookup_handle(hdev, handle);
1910 if (conn) {
1911 conn->sent -= count;
1912
1913 if (conn->type == ACL_LINK) {
1914 hdev->acl_cnt += count;
1915 if (hdev->acl_cnt > hdev->acl_pkts)
1916 hdev->acl_cnt = hdev->acl_pkts;
1917 } else if (conn->type == LE_LINK) {
1918 if (hdev->le_pkts) {
1919 hdev->le_cnt += count;
1920 if (hdev->le_cnt > hdev->le_pkts)
1921 hdev->le_cnt = hdev->le_pkts;
1922 } else {
1923 hdev->acl_cnt += count;
1924 if (hdev->acl_cnt > hdev->acl_pkts)
1925 hdev->acl_cnt = hdev->acl_pkts;
1926 }
1927 } else {
1928 hdev->sco_cnt += count;
1929 if (hdev->sco_cnt > hdev->sco_pkts)
1930 hdev->sco_cnt = hdev->sco_pkts;
1931 }
1932 }
1933 }
1934
1935 tasklet_schedule(&hdev->tx_task);
1936
1937 tasklet_enable(&hdev->tx_task);
1938 }
1939
hci_mode_change_evt(struct hci_dev * hdev,struct sk_buff * skb)1940 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1941 {
1942 struct hci_ev_mode_change *ev = (void *) skb->data;
1943 struct hci_conn *conn;
1944
1945 BT_DBG("%s status %d", hdev->name, ev->status);
1946
1947 hci_dev_lock(hdev);
1948
1949 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1950 if (conn) {
1951 conn->mode = ev->mode;
1952 conn->interval = __le16_to_cpu(ev->interval);
1953
1954 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
1955 if (conn->mode == HCI_CM_ACTIVE)
1956 conn->power_save = 1;
1957 else
1958 conn->power_save = 0;
1959 }
1960
1961 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1962 hci_sco_setup(conn, ev->status);
1963 }
1964
1965 hci_dev_unlock(hdev);
1966 }
1967
hci_pin_code_request_evt(struct hci_dev * hdev,struct sk_buff * skb)1968 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1969 {
1970 struct hci_ev_pin_code_req *ev = (void *) skb->data;
1971 struct hci_conn *conn;
1972
1973 BT_DBG("%s", hdev->name);
1974
1975 hci_dev_lock(hdev);
1976
1977 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1978 if (conn && conn->state == BT_CONNECTED) {
1979 hci_conn_hold(conn);
1980 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1981 hci_conn_put(conn);
1982 }
1983
1984 if (!test_bit(HCI_PAIRABLE, &hdev->flags))
1985 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1986 sizeof(ev->bdaddr), &ev->bdaddr);
1987
1988 if (test_bit(HCI_MGMT, &hdev->flags))
1989 mgmt_pin_code_request(hdev->id, &ev->bdaddr);
1990
1991 hci_dev_unlock(hdev);
1992 }
1993
hci_link_key_request_evt(struct hci_dev * hdev,struct sk_buff * skb)1994 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1995 {
1996 struct hci_ev_link_key_req *ev = (void *) skb->data;
1997 struct hci_cp_link_key_reply cp;
1998 struct hci_conn *conn;
1999 struct link_key *key;
2000
2001 BT_DBG("%s", hdev->name);
2002
2003 if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
2004 return;
2005
2006 hci_dev_lock(hdev);
2007
2008 key = hci_find_link_key(hdev, &ev->bdaddr);
2009 if (!key) {
2010 BT_DBG("%s link key not found for %s", hdev->name,
2011 batostr(&ev->bdaddr));
2012 goto not_found;
2013 }
2014
2015 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2016 batostr(&ev->bdaddr));
2017
2018 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && key->type == 0x03) {
2019 BT_DBG("%s ignoring debug key", hdev->name);
2020 goto not_found;
2021 }
2022
2023 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2024
2025 if (key->type == 0x04 && conn && conn->auth_type != 0xff &&
2026 (conn->auth_type & 0x01)) {
2027 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2028 goto not_found;
2029 }
2030
2031 bacpy(&cp.bdaddr, &ev->bdaddr);
2032 memcpy(cp.link_key, key->val, 16);
2033
2034 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2035
2036 hci_dev_unlock(hdev);
2037
2038 return;
2039
2040 not_found:
2041 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2042 hci_dev_unlock(hdev);
2043 }
2044
hci_link_key_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)2045 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2046 {
2047 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2048 struct hci_conn *conn;
2049 u8 pin_len = 0;
2050
2051 BT_DBG("%s", hdev->name);
2052
2053 hci_dev_lock(hdev);
2054
2055 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2056 if (conn) {
2057 hci_conn_hold(conn);
2058 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2059 pin_len = conn->pin_length;
2060 hci_conn_put(conn);
2061 }
2062
2063 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2064 hci_add_link_key(hdev, 1, &ev->bdaddr, ev->link_key,
2065 ev->key_type, pin_len);
2066
2067 hci_dev_unlock(hdev);
2068 }
2069
hci_clock_offset_evt(struct hci_dev * hdev,struct sk_buff * skb)2070 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2071 {
2072 struct hci_ev_clock_offset *ev = (void *) skb->data;
2073 struct hci_conn *conn;
2074
2075 BT_DBG("%s status %d", hdev->name, ev->status);
2076
2077 hci_dev_lock(hdev);
2078
2079 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2080 if (conn && !ev->status) {
2081 struct inquiry_entry *ie;
2082
2083 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2084 if (ie) {
2085 ie->data.clock_offset = ev->clock_offset;
2086 ie->timestamp = jiffies;
2087 }
2088 }
2089
2090 hci_dev_unlock(hdev);
2091 }
2092
hci_pkt_type_change_evt(struct hci_dev * hdev,struct sk_buff * skb)2093 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2094 {
2095 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2096 struct hci_conn *conn;
2097
2098 BT_DBG("%s status %d", hdev->name, ev->status);
2099
2100 hci_dev_lock(hdev);
2101
2102 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2103 if (conn && !ev->status)
2104 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2105
2106 hci_dev_unlock(hdev);
2107 }
2108
hci_pscan_rep_mode_evt(struct hci_dev * hdev,struct sk_buff * skb)2109 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2110 {
2111 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2112 struct inquiry_entry *ie;
2113
2114 BT_DBG("%s", hdev->name);
2115
2116 hci_dev_lock(hdev);
2117
2118 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2119 if (ie) {
2120 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2121 ie->timestamp = jiffies;
2122 }
2123
2124 hci_dev_unlock(hdev);
2125 }
2126
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,struct sk_buff * skb)2127 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2128 {
2129 struct inquiry_data data;
2130 int num_rsp = *((__u8 *) skb->data);
2131
2132 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2133
2134 if (!num_rsp)
2135 return;
2136
2137 hci_dev_lock(hdev);
2138
2139 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2140 struct inquiry_info_with_rssi_and_pscan_mode *info;
2141 info = (void *) (skb->data + 1);
2142
2143 for (; num_rsp; num_rsp--) {
2144 bacpy(&data.bdaddr, &info->bdaddr);
2145 data.pscan_rep_mode = info->pscan_rep_mode;
2146 data.pscan_period_mode = info->pscan_period_mode;
2147 data.pscan_mode = info->pscan_mode;
2148 memcpy(data.dev_class, info->dev_class, 3);
2149 data.clock_offset = info->clock_offset;
2150 data.rssi = info->rssi;
2151 data.ssp_mode = 0x00;
2152 info++;
2153 hci_inquiry_cache_update(hdev, &data);
2154 }
2155 } else {
2156 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2157
2158 for (; num_rsp; num_rsp--) {
2159 bacpy(&data.bdaddr, &info->bdaddr);
2160 data.pscan_rep_mode = info->pscan_rep_mode;
2161 data.pscan_period_mode = info->pscan_period_mode;
2162 data.pscan_mode = 0x00;
2163 memcpy(data.dev_class, info->dev_class, 3);
2164 data.clock_offset = info->clock_offset;
2165 data.rssi = info->rssi;
2166 data.ssp_mode = 0x00;
2167 info++;
2168 hci_inquiry_cache_update(hdev, &data);
2169 }
2170 }
2171
2172 hci_dev_unlock(hdev);
2173 }
2174
hci_remote_ext_features_evt(struct hci_dev * hdev,struct sk_buff * skb)2175 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2176 {
2177 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2178 struct hci_conn *conn;
2179
2180 BT_DBG("%s", hdev->name);
2181
2182 hci_dev_lock(hdev);
2183
2184 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2185 if (!conn)
2186 goto unlock;
2187
2188 if (!ev->status && ev->page == 0x01) {
2189 struct inquiry_entry *ie;
2190
2191 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2192 if (ie)
2193 ie->data.ssp_mode = (ev->features[0] & 0x01);
2194
2195 conn->ssp_mode = (ev->features[0] & 0x01);
2196 }
2197
2198 if (conn->state != BT_CONFIG)
2199 goto unlock;
2200
2201 if (!ev->status) {
2202 struct hci_cp_remote_name_req cp;
2203 memset(&cp, 0, sizeof(cp));
2204 bacpy(&cp.bdaddr, &conn->dst);
2205 cp.pscan_rep_mode = 0x02;
2206 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2207 }
2208
2209 if (!hci_outgoing_auth_needed(hdev, conn)) {
2210 conn->state = BT_CONNECTED;
2211 hci_proto_connect_cfm(conn, ev->status);
2212 hci_conn_put(conn);
2213 }
2214
2215 unlock:
2216 hci_dev_unlock(hdev);
2217 }
2218
hci_sync_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2219 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2220 {
2221 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2222 struct hci_conn *conn;
2223
2224 BT_DBG("%s status %d", hdev->name, ev->status);
2225
2226 hci_dev_lock(hdev);
2227
2228 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2229 if (!conn) {
2230 if (ev->link_type == ESCO_LINK)
2231 goto unlock;
2232
2233 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2234 if (!conn)
2235 goto unlock;
2236
2237 conn->type = SCO_LINK;
2238 }
2239
2240 switch (ev->status) {
2241 case 0x00:
2242 conn->handle = __le16_to_cpu(ev->handle);
2243 conn->state = BT_CONNECTED;
2244
2245 hci_conn_hold_device(conn);
2246 hci_conn_add_sysfs(conn);
2247 break;
2248
2249 case 0x11: /* Unsupported Feature or Parameter Value */
2250 case 0x1c: /* SCO interval rejected */
2251 case 0x1a: /* Unsupported Remote Feature */
2252 case 0x1f: /* Unspecified error */
2253 if (conn->out && conn->attempt < 2) {
2254 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2255 (hdev->esco_type & EDR_ESCO_MASK);
2256 hci_setup_sync(conn, conn->link->handle);
2257 goto unlock;
2258 }
2259 /* fall through */
2260
2261 default:
2262 conn->state = BT_CLOSED;
2263 break;
2264 }
2265
2266 hci_proto_connect_cfm(conn, ev->status);
2267 if (ev->status)
2268 hci_conn_del(conn);
2269
2270 unlock:
2271 hci_dev_unlock(hdev);
2272 }
2273
hci_sync_conn_changed_evt(struct hci_dev * hdev,struct sk_buff * skb)2274 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2275 {
2276 BT_DBG("%s", hdev->name);
2277 }
2278
hci_sniff_subrate_evt(struct hci_dev * hdev,struct sk_buff * skb)2279 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2280 {
2281 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2282
2283 BT_DBG("%s status %d", hdev->name, ev->status);
2284 }
2285
hci_extended_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)2286 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2287 {
2288 struct inquiry_data data;
2289 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2290 int num_rsp = *((__u8 *) skb->data);
2291
2292 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2293
2294 if (!num_rsp)
2295 return;
2296
2297 hci_dev_lock(hdev);
2298
2299 for (; num_rsp; num_rsp--) {
2300 bacpy(&data.bdaddr, &info->bdaddr);
2301 data.pscan_rep_mode = info->pscan_rep_mode;
2302 data.pscan_period_mode = info->pscan_period_mode;
2303 data.pscan_mode = 0x00;
2304 memcpy(data.dev_class, info->dev_class, 3);
2305 data.clock_offset = info->clock_offset;
2306 data.rssi = info->rssi;
2307 data.ssp_mode = 0x01;
2308 info++;
2309 hci_inquiry_cache_update(hdev, &data);
2310 }
2311
2312 hci_dev_unlock(hdev);
2313 }
2314
hci_get_auth_req(struct hci_conn * conn)2315 static inline u8 hci_get_auth_req(struct hci_conn *conn)
2316 {
2317 /* If remote requests dedicated bonding follow that lead */
2318 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2319 /* If both remote and local IO capabilities allow MITM
2320 * protection then require it, otherwise don't */
2321 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2322 return 0x02;
2323 else
2324 return 0x03;
2325 }
2326
2327 /* If remote requests no-bonding follow that lead */
2328 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2329 return 0x00;
2330
2331 return conn->auth_type;
2332 }
2333
hci_io_capa_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2334 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2335 {
2336 struct hci_ev_io_capa_request *ev = (void *) skb->data;
2337 struct hci_conn *conn;
2338
2339 BT_DBG("%s", hdev->name);
2340
2341 hci_dev_lock(hdev);
2342
2343 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2344 if (!conn)
2345 goto unlock;
2346
2347 hci_conn_hold(conn);
2348
2349 if (!test_bit(HCI_MGMT, &hdev->flags))
2350 goto unlock;
2351
2352 if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
2353 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2354 struct hci_cp_io_capability_reply cp;
2355
2356 bacpy(&cp.bdaddr, &ev->bdaddr);
2357 cp.capability = conn->io_capability;
2358 cp.oob_data = 0;
2359 cp.authentication = hci_get_auth_req(conn);
2360
2361 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2362 sizeof(cp), &cp);
2363 } else {
2364 struct hci_cp_io_capability_neg_reply cp;
2365
2366 bacpy(&cp.bdaddr, &ev->bdaddr);
2367 cp.reason = 0x16; /* Pairing not allowed */
2368
2369 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2370 sizeof(cp), &cp);
2371 }
2372
2373 unlock:
2374 hci_dev_unlock(hdev);
2375 }
2376
hci_io_capa_reply_evt(struct hci_dev * hdev,struct sk_buff * skb)2377 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2378 {
2379 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2380 struct hci_conn *conn;
2381
2382 BT_DBG("%s", hdev->name);
2383
2384 hci_dev_lock(hdev);
2385
2386 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2387 if (!conn)
2388 goto unlock;
2389
2390 conn->remote_cap = ev->capability;
2391 conn->remote_oob = ev->oob_data;
2392 conn->remote_auth = ev->authentication;
2393
2394 unlock:
2395 hci_dev_unlock(hdev);
2396 }
2397
hci_user_confirm_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2398 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2399 struct sk_buff *skb)
2400 {
2401 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2402
2403 BT_DBG("%s", hdev->name);
2404
2405 hci_dev_lock(hdev);
2406
2407 if (test_bit(HCI_MGMT, &hdev->flags))
2408 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey);
2409
2410 hci_dev_unlock(hdev);
2411 }
2412
hci_simple_pair_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2413 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2414 {
2415 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
2416 struct hci_conn *conn;
2417
2418 BT_DBG("%s", hdev->name);
2419
2420 hci_dev_lock(hdev);
2421
2422 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2423 if (!conn)
2424 goto unlock;
2425
2426 /* To avoid duplicate auth_failed events to user space we check
2427 * the HCI_CONN_AUTH_PEND flag which will be set if we
2428 * initiated the authentication. A traditional auth_complete
2429 * event gets always produced as initiator and is also mapped to
2430 * the mgmt_auth_failed event */
2431 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
2432 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
2433
2434 hci_conn_put(conn);
2435
2436 unlock:
2437 hci_dev_unlock(hdev);
2438 }
2439
hci_remote_host_features_evt(struct hci_dev * hdev,struct sk_buff * skb)2440 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2441 {
2442 struct hci_ev_remote_host_features *ev = (void *) skb->data;
2443 struct inquiry_entry *ie;
2444
2445 BT_DBG("%s", hdev->name);
2446
2447 hci_dev_lock(hdev);
2448
2449 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2450 if (ie)
2451 ie->data.ssp_mode = (ev->features[0] & 0x01);
2452
2453 hci_dev_unlock(hdev);
2454 }
2455
hci_le_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2456 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2457 {
2458 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
2459 struct hci_conn *conn;
2460
2461 BT_DBG("%s status %d", hdev->name, ev->status);
2462
2463 hci_dev_lock(hdev);
2464
2465 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
2466 if (!conn) {
2467 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
2468 if (!conn) {
2469 BT_ERR("No memory for new connection");
2470 hci_dev_unlock(hdev);
2471 return;
2472 }
2473 }
2474
2475 if (ev->status) {
2476 hci_proto_connect_cfm(conn, ev->status);
2477 conn->state = BT_CLOSED;
2478 hci_conn_del(conn);
2479 goto unlock;
2480 }
2481
2482 conn->handle = __le16_to_cpu(ev->handle);
2483 conn->state = BT_CONNECTED;
2484
2485 hci_conn_hold_device(conn);
2486 hci_conn_add_sysfs(conn);
2487
2488 hci_proto_connect_cfm(conn, ev->status);
2489
2490 unlock:
2491 hci_dev_unlock(hdev);
2492 }
2493
hci_le_meta_evt(struct hci_dev * hdev,struct sk_buff * skb)2494 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
2495 {
2496 struct hci_ev_le_meta *le_ev = (void *) skb->data;
2497
2498 skb_pull(skb, sizeof(*le_ev));
2499
2500 switch (le_ev->subevent) {
2501 case HCI_EV_LE_CONN_COMPLETE:
2502 hci_le_conn_complete_evt(hdev, skb);
2503 break;
2504
2505 default:
2506 break;
2507 }
2508 }
2509
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)2510 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
2511 {
2512 struct hci_event_hdr *hdr = (void *) skb->data;
2513 __u8 event = hdr->evt;
2514
2515 skb_pull(skb, HCI_EVENT_HDR_SIZE);
2516
2517 switch (event) {
2518 case HCI_EV_INQUIRY_COMPLETE:
2519 hci_inquiry_complete_evt(hdev, skb);
2520 break;
2521
2522 case HCI_EV_INQUIRY_RESULT:
2523 hci_inquiry_result_evt(hdev, skb);
2524 break;
2525
2526 case HCI_EV_CONN_COMPLETE:
2527 hci_conn_complete_evt(hdev, skb);
2528 break;
2529
2530 case HCI_EV_CONN_REQUEST:
2531 hci_conn_request_evt(hdev, skb);
2532 break;
2533
2534 case HCI_EV_DISCONN_COMPLETE:
2535 hci_disconn_complete_evt(hdev, skb);
2536 break;
2537
2538 case HCI_EV_AUTH_COMPLETE:
2539 hci_auth_complete_evt(hdev, skb);
2540 break;
2541
2542 case HCI_EV_REMOTE_NAME:
2543 hci_remote_name_evt(hdev, skb);
2544 break;
2545
2546 case HCI_EV_ENCRYPT_CHANGE:
2547 hci_encrypt_change_evt(hdev, skb);
2548 break;
2549
2550 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
2551 hci_change_link_key_complete_evt(hdev, skb);
2552 break;
2553
2554 case HCI_EV_REMOTE_FEATURES:
2555 hci_remote_features_evt(hdev, skb);
2556 break;
2557
2558 case HCI_EV_REMOTE_VERSION:
2559 hci_remote_version_evt(hdev, skb);
2560 break;
2561
2562 case HCI_EV_QOS_SETUP_COMPLETE:
2563 hci_qos_setup_complete_evt(hdev, skb);
2564 break;
2565
2566 case HCI_EV_CMD_COMPLETE:
2567 hci_cmd_complete_evt(hdev, skb);
2568 break;
2569
2570 case HCI_EV_CMD_STATUS:
2571 hci_cmd_status_evt(hdev, skb);
2572 break;
2573
2574 case HCI_EV_ROLE_CHANGE:
2575 hci_role_change_evt(hdev, skb);
2576 break;
2577
2578 case HCI_EV_NUM_COMP_PKTS:
2579 hci_num_comp_pkts_evt(hdev, skb);
2580 break;
2581
2582 case HCI_EV_MODE_CHANGE:
2583 hci_mode_change_evt(hdev, skb);
2584 break;
2585
2586 case HCI_EV_PIN_CODE_REQ:
2587 hci_pin_code_request_evt(hdev, skb);
2588 break;
2589
2590 case HCI_EV_LINK_KEY_REQ:
2591 hci_link_key_request_evt(hdev, skb);
2592 break;
2593
2594 case HCI_EV_LINK_KEY_NOTIFY:
2595 hci_link_key_notify_evt(hdev, skb);
2596 break;
2597
2598 case HCI_EV_CLOCK_OFFSET:
2599 hci_clock_offset_evt(hdev, skb);
2600 break;
2601
2602 case HCI_EV_PKT_TYPE_CHANGE:
2603 hci_pkt_type_change_evt(hdev, skb);
2604 break;
2605
2606 case HCI_EV_PSCAN_REP_MODE:
2607 hci_pscan_rep_mode_evt(hdev, skb);
2608 break;
2609
2610 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
2611 hci_inquiry_result_with_rssi_evt(hdev, skb);
2612 break;
2613
2614 case HCI_EV_REMOTE_EXT_FEATURES:
2615 hci_remote_ext_features_evt(hdev, skb);
2616 break;
2617
2618 case HCI_EV_SYNC_CONN_COMPLETE:
2619 hci_sync_conn_complete_evt(hdev, skb);
2620 break;
2621
2622 case HCI_EV_SYNC_CONN_CHANGED:
2623 hci_sync_conn_changed_evt(hdev, skb);
2624 break;
2625
2626 case HCI_EV_SNIFF_SUBRATE:
2627 hci_sniff_subrate_evt(hdev, skb);
2628 break;
2629
2630 case HCI_EV_EXTENDED_INQUIRY_RESULT:
2631 hci_extended_inquiry_result_evt(hdev, skb);
2632 break;
2633
2634 case HCI_EV_IO_CAPA_REQUEST:
2635 hci_io_capa_request_evt(hdev, skb);
2636 break;
2637
2638 case HCI_EV_IO_CAPA_REPLY:
2639 hci_io_capa_reply_evt(hdev, skb);
2640 break;
2641
2642 case HCI_EV_USER_CONFIRM_REQUEST:
2643 hci_user_confirm_request_evt(hdev, skb);
2644 break;
2645
2646 case HCI_EV_SIMPLE_PAIR_COMPLETE:
2647 hci_simple_pair_complete_evt(hdev, skb);
2648 break;
2649
2650 case HCI_EV_REMOTE_HOST_FEATURES:
2651 hci_remote_host_features_evt(hdev, skb);
2652 break;
2653
2654 case HCI_EV_LE_META:
2655 hci_le_meta_evt(hdev, skb);
2656 break;
2657
2658 default:
2659 BT_DBG("%s event 0x%x", hdev->name, event);
2660 break;
2661 }
2662
2663 kfree_skb(skb);
2664 hdev->stat.evt_rx++;
2665 }
2666
2667 /* Generate internal stack event */
hci_si_event(struct hci_dev * hdev,int type,int dlen,void * data)2668 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
2669 {
2670 struct hci_event_hdr *hdr;
2671 struct hci_ev_stack_internal *ev;
2672 struct sk_buff *skb;
2673
2674 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
2675 if (!skb)
2676 return;
2677
2678 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
2679 hdr->evt = HCI_EV_STACK_INTERNAL;
2680 hdr->plen = sizeof(*ev) + dlen;
2681
2682 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
2683 ev->type = type;
2684 memcpy(ev->data, data, dlen);
2685
2686 bt_cb(skb)->incoming = 1;
2687 __net_timestamp(skb);
2688
2689 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
2690 skb->dev = (void *) hdev;
2691 hci_send_to_sock(hdev, skb, NULL);
2692 kfree_skb(skb);
2693 }
2694