1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/iso.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "a2mp.h"
39 #include "eir.h"
40
41 struct sco_param {
42 u16 pkt_type;
43 u16 max_latency;
44 u8 retrans_effort;
45 };
46
47 struct conn_handle_t {
48 struct hci_conn *conn;
49 __u16 handle;
50 };
51
52 static const struct sco_param esco_param_cvsd[] = {
53 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */
54 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */
55 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */
56 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */
57 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */
58 };
59
60 static const struct sco_param sco_param_cvsd[] = {
61 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */
62 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */
63 };
64
65 static const struct sco_param esco_param_msbc[] = {
66 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */
67 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */
68 };
69
70 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan_cleanup(struct hci_conn * conn)71 static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
72 {
73 struct hci_conn_params *params;
74 struct hci_dev *hdev = conn->hdev;
75 struct smp_irk *irk;
76 bdaddr_t *bdaddr;
77 u8 bdaddr_type;
78
79 bdaddr = &conn->dst;
80 bdaddr_type = conn->dst_type;
81
82 /* Check if we need to convert to identity address */
83 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
84 if (irk) {
85 bdaddr = &irk->bdaddr;
86 bdaddr_type = irk->addr_type;
87 }
88
89 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
90 bdaddr_type);
91 if (!params || !params->explicit_connect)
92 return;
93
94 /* The connection attempt was doing scan for new RPA, and is
95 * in scan phase. If params are not associated with any other
96 * autoconnect action, remove them completely. If they are, just unmark
97 * them as waiting for connection, by clearing explicit_connect field.
98 */
99 params->explicit_connect = false;
100
101 list_del_init(¶ms->action);
102
103 switch (params->auto_connect) {
104 case HCI_AUTO_CONN_EXPLICIT:
105 hci_conn_params_del(hdev, bdaddr, bdaddr_type);
106 /* return instead of break to avoid duplicate scan update */
107 return;
108 case HCI_AUTO_CONN_DIRECT:
109 case HCI_AUTO_CONN_ALWAYS:
110 list_add(¶ms->action, &hdev->pend_le_conns);
111 break;
112 case HCI_AUTO_CONN_REPORT:
113 list_add(¶ms->action, &hdev->pend_le_reports);
114 break;
115 default:
116 break;
117 }
118
119 hci_update_passive_scan(hdev);
120 }
121
hci_conn_cleanup(struct hci_conn * conn)122 static void hci_conn_cleanup(struct hci_conn *conn)
123 {
124 struct hci_dev *hdev = conn->hdev;
125
126 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
127 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
128
129 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
130 hci_remove_link_key(hdev, &conn->dst);
131
132 hci_chan_list_flush(conn);
133
134 hci_conn_hash_del(hdev, conn);
135
136 if (conn->cleanup)
137 conn->cleanup(conn);
138
139 if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
140 switch (conn->setting & SCO_AIRMODE_MASK) {
141 case SCO_AIRMODE_CVSD:
142 case SCO_AIRMODE_TRANSP:
143 if (hdev->notify)
144 hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
145 break;
146 }
147 } else {
148 if (hdev->notify)
149 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
150 }
151
152 hci_conn_del_sysfs(conn);
153
154 debugfs_remove_recursive(conn->debugfs);
155
156 hci_dev_put(hdev);
157
158 hci_conn_put(conn);
159 }
160
le_scan_cleanup(struct work_struct * work)161 static void le_scan_cleanup(struct work_struct *work)
162 {
163 struct hci_conn *conn = container_of(work, struct hci_conn,
164 le_scan_cleanup);
165 struct hci_dev *hdev = conn->hdev;
166 struct hci_conn *c = NULL;
167
168 BT_DBG("%s hcon %p", hdev->name, conn);
169
170 hci_dev_lock(hdev);
171
172 /* Check that the hci_conn is still around */
173 rcu_read_lock();
174 list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
175 if (c == conn)
176 break;
177 }
178 rcu_read_unlock();
179
180 if (c == conn) {
181 hci_connect_le_scan_cleanup(conn);
182 hci_conn_cleanup(conn);
183 }
184
185 hci_dev_unlock(hdev);
186 hci_dev_put(hdev);
187 hci_conn_put(conn);
188 }
189
hci_connect_le_scan_remove(struct hci_conn * conn)190 static void hci_connect_le_scan_remove(struct hci_conn *conn)
191 {
192 BT_DBG("%s hcon %p", conn->hdev->name, conn);
193
194 /* We can't call hci_conn_del/hci_conn_cleanup here since that
195 * could deadlock with another hci_conn_del() call that's holding
196 * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
197 * Instead, grab temporary extra references to the hci_dev and
198 * hci_conn and perform the necessary cleanup in a separate work
199 * callback.
200 */
201
202 hci_dev_hold(conn->hdev);
203 hci_conn_get(conn);
204
205 /* Even though we hold a reference to the hdev, many other
206 * things might get cleaned up meanwhile, including the hdev's
207 * own workqueue, so we can't use that for scheduling.
208 */
209 schedule_work(&conn->le_scan_cleanup);
210 }
211
hci_acl_create_connection(struct hci_conn * conn)212 static void hci_acl_create_connection(struct hci_conn *conn)
213 {
214 struct hci_dev *hdev = conn->hdev;
215 struct inquiry_entry *ie;
216 struct hci_cp_create_conn cp;
217
218 BT_DBG("hcon %p", conn);
219
220 /* Many controllers disallow HCI Create Connection while it is doing
221 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
222 * Connection. This may cause the MGMT discovering state to become false
223 * without user space's request but it is okay since the MGMT Discovery
224 * APIs do not promise that discovery should be done forever. Instead,
225 * the user space monitors the status of MGMT discovering and it may
226 * request for discovery again when this flag becomes false.
227 */
228 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
229 /* Put this connection to "pending" state so that it will be
230 * executed after the inquiry cancel command complete event.
231 */
232 conn->state = BT_CONNECT2;
233 hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
234 return;
235 }
236
237 conn->state = BT_CONNECT;
238 conn->out = true;
239 conn->role = HCI_ROLE_MASTER;
240
241 conn->attempt++;
242
243 conn->link_policy = hdev->link_policy;
244
245 memset(&cp, 0, sizeof(cp));
246 bacpy(&cp.bdaddr, &conn->dst);
247 cp.pscan_rep_mode = 0x02;
248
249 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
250 if (ie) {
251 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
252 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
253 cp.pscan_mode = ie->data.pscan_mode;
254 cp.clock_offset = ie->data.clock_offset |
255 cpu_to_le16(0x8000);
256 }
257
258 memcpy(conn->dev_class, ie->data.dev_class, 3);
259 }
260
261 cp.pkt_type = cpu_to_le16(conn->pkt_type);
262 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
263 cp.role_switch = 0x01;
264 else
265 cp.role_switch = 0x00;
266
267 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
268 }
269
hci_disconnect(struct hci_conn * conn,__u8 reason)270 int hci_disconnect(struct hci_conn *conn, __u8 reason)
271 {
272 BT_DBG("hcon %p", conn);
273
274 /* When we are central of an established connection and it enters
275 * the disconnect timeout, then go ahead and try to read the
276 * current clock offset. Processing of the result is done
277 * within the event handling and hci_clock_offset_evt function.
278 */
279 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
280 (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
281 struct hci_dev *hdev = conn->hdev;
282 struct hci_cp_read_clock_offset clkoff_cp;
283
284 clkoff_cp.handle = cpu_to_le16(conn->handle);
285 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
286 &clkoff_cp);
287 }
288
289 return hci_abort_conn(conn, reason);
290 }
291
hci_add_sco(struct hci_conn * conn,__u16 handle)292 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
293 {
294 struct hci_dev *hdev = conn->hdev;
295 struct hci_cp_add_sco cp;
296
297 BT_DBG("hcon %p", conn);
298
299 conn->state = BT_CONNECT;
300 conn->out = true;
301
302 conn->attempt++;
303
304 cp.handle = cpu_to_le16(handle);
305 cp.pkt_type = cpu_to_le16(conn->pkt_type);
306
307 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
308 }
309
find_next_esco_param(struct hci_conn * conn,const struct sco_param * esco_param,int size)310 static bool find_next_esco_param(struct hci_conn *conn,
311 const struct sco_param *esco_param, int size)
312 {
313 for (; conn->attempt <= size; conn->attempt++) {
314 if (lmp_esco_2m_capable(conn->link) ||
315 (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
316 break;
317 BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
318 conn, conn->attempt);
319 }
320
321 return conn->attempt <= size;
322 }
323
configure_datapath_sync(struct hci_dev * hdev,struct bt_codec * codec)324 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
325 {
326 int err;
327 __u8 vnd_len, *vnd_data = NULL;
328 struct hci_op_configure_data_path *cmd = NULL;
329
330 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
331 &vnd_data);
332 if (err < 0)
333 goto error;
334
335 cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
336 if (!cmd) {
337 err = -ENOMEM;
338 goto error;
339 }
340
341 err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
342 if (err < 0)
343 goto error;
344
345 cmd->vnd_len = vnd_len;
346 memcpy(cmd->vnd_data, vnd_data, vnd_len);
347
348 cmd->direction = 0x00;
349 __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
350 sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
351
352 cmd->direction = 0x01;
353 err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
354 sizeof(*cmd) + vnd_len, cmd,
355 HCI_CMD_TIMEOUT);
356 error:
357
358 kfree(cmd);
359 kfree(vnd_data);
360 return err;
361 }
362
hci_enhanced_setup_sync(struct hci_dev * hdev,void * data)363 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
364 {
365 struct conn_handle_t *conn_handle = data;
366 struct hci_conn *conn = conn_handle->conn;
367 __u16 handle = conn_handle->handle;
368 struct hci_cp_enhanced_setup_sync_conn cp;
369 const struct sco_param *param;
370
371 kfree(conn_handle);
372
373 bt_dev_dbg(hdev, "hcon %p", conn);
374
375 /* for offload use case, codec needs to configured before opening SCO */
376 if (conn->codec.data_path)
377 configure_datapath_sync(hdev, &conn->codec);
378
379 conn->state = BT_CONNECT;
380 conn->out = true;
381
382 conn->attempt++;
383
384 memset(&cp, 0x00, sizeof(cp));
385
386 cp.handle = cpu_to_le16(handle);
387
388 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
389 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
390
391 switch (conn->codec.id) {
392 case BT_CODEC_MSBC:
393 if (!find_next_esco_param(conn, esco_param_msbc,
394 ARRAY_SIZE(esco_param_msbc)))
395 return -EINVAL;
396
397 param = &esco_param_msbc[conn->attempt - 1];
398 cp.tx_coding_format.id = 0x05;
399 cp.rx_coding_format.id = 0x05;
400 cp.tx_codec_frame_size = __cpu_to_le16(60);
401 cp.rx_codec_frame_size = __cpu_to_le16(60);
402 cp.in_bandwidth = __cpu_to_le32(32000);
403 cp.out_bandwidth = __cpu_to_le32(32000);
404 cp.in_coding_format.id = 0x04;
405 cp.out_coding_format.id = 0x04;
406 cp.in_coded_data_size = __cpu_to_le16(16);
407 cp.out_coded_data_size = __cpu_to_le16(16);
408 cp.in_pcm_data_format = 2;
409 cp.out_pcm_data_format = 2;
410 cp.in_pcm_sample_payload_msb_pos = 0;
411 cp.out_pcm_sample_payload_msb_pos = 0;
412 cp.in_data_path = conn->codec.data_path;
413 cp.out_data_path = conn->codec.data_path;
414 cp.in_transport_unit_size = 1;
415 cp.out_transport_unit_size = 1;
416 break;
417
418 case BT_CODEC_TRANSPARENT:
419 if (!find_next_esco_param(conn, esco_param_msbc,
420 ARRAY_SIZE(esco_param_msbc)))
421 return false;
422 param = &esco_param_msbc[conn->attempt - 1];
423 cp.tx_coding_format.id = 0x03;
424 cp.rx_coding_format.id = 0x03;
425 cp.tx_codec_frame_size = __cpu_to_le16(60);
426 cp.rx_codec_frame_size = __cpu_to_le16(60);
427 cp.in_bandwidth = __cpu_to_le32(0x1f40);
428 cp.out_bandwidth = __cpu_to_le32(0x1f40);
429 cp.in_coding_format.id = 0x03;
430 cp.out_coding_format.id = 0x03;
431 cp.in_coded_data_size = __cpu_to_le16(16);
432 cp.out_coded_data_size = __cpu_to_le16(16);
433 cp.in_pcm_data_format = 2;
434 cp.out_pcm_data_format = 2;
435 cp.in_pcm_sample_payload_msb_pos = 0;
436 cp.out_pcm_sample_payload_msb_pos = 0;
437 cp.in_data_path = conn->codec.data_path;
438 cp.out_data_path = conn->codec.data_path;
439 cp.in_transport_unit_size = 1;
440 cp.out_transport_unit_size = 1;
441 break;
442
443 case BT_CODEC_CVSD:
444 if (lmp_esco_capable(conn->link)) {
445 if (!find_next_esco_param(conn, esco_param_cvsd,
446 ARRAY_SIZE(esco_param_cvsd)))
447 return -EINVAL;
448 param = &esco_param_cvsd[conn->attempt - 1];
449 } else {
450 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
451 return -EINVAL;
452 param = &sco_param_cvsd[conn->attempt - 1];
453 }
454 cp.tx_coding_format.id = 2;
455 cp.rx_coding_format.id = 2;
456 cp.tx_codec_frame_size = __cpu_to_le16(60);
457 cp.rx_codec_frame_size = __cpu_to_le16(60);
458 cp.in_bandwidth = __cpu_to_le32(16000);
459 cp.out_bandwidth = __cpu_to_le32(16000);
460 cp.in_coding_format.id = 4;
461 cp.out_coding_format.id = 4;
462 cp.in_coded_data_size = __cpu_to_le16(16);
463 cp.out_coded_data_size = __cpu_to_le16(16);
464 cp.in_pcm_data_format = 2;
465 cp.out_pcm_data_format = 2;
466 cp.in_pcm_sample_payload_msb_pos = 0;
467 cp.out_pcm_sample_payload_msb_pos = 0;
468 cp.in_data_path = conn->codec.data_path;
469 cp.out_data_path = conn->codec.data_path;
470 cp.in_transport_unit_size = 16;
471 cp.out_transport_unit_size = 16;
472 break;
473 default:
474 return -EINVAL;
475 }
476
477 cp.retrans_effort = param->retrans_effort;
478 cp.pkt_type = __cpu_to_le16(param->pkt_type);
479 cp.max_latency = __cpu_to_le16(param->max_latency);
480
481 if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
482 return -EIO;
483
484 return 0;
485 }
486
hci_setup_sync_conn(struct hci_conn * conn,__u16 handle)487 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
488 {
489 struct hci_dev *hdev = conn->hdev;
490 struct hci_cp_setup_sync_conn cp;
491 const struct sco_param *param;
492
493 bt_dev_dbg(hdev, "hcon %p", conn);
494
495 conn->state = BT_CONNECT;
496 conn->out = true;
497
498 conn->attempt++;
499
500 cp.handle = cpu_to_le16(handle);
501
502 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
503 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
504 cp.voice_setting = cpu_to_le16(conn->setting);
505
506 switch (conn->setting & SCO_AIRMODE_MASK) {
507 case SCO_AIRMODE_TRANSP:
508 if (!find_next_esco_param(conn, esco_param_msbc,
509 ARRAY_SIZE(esco_param_msbc)))
510 return false;
511 param = &esco_param_msbc[conn->attempt - 1];
512 break;
513 case SCO_AIRMODE_CVSD:
514 if (lmp_esco_capable(conn->link)) {
515 if (!find_next_esco_param(conn, esco_param_cvsd,
516 ARRAY_SIZE(esco_param_cvsd)))
517 return false;
518 param = &esco_param_cvsd[conn->attempt - 1];
519 } else {
520 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
521 return false;
522 param = &sco_param_cvsd[conn->attempt - 1];
523 }
524 break;
525 default:
526 return false;
527 }
528
529 cp.retrans_effort = param->retrans_effort;
530 cp.pkt_type = __cpu_to_le16(param->pkt_type);
531 cp.max_latency = __cpu_to_le16(param->max_latency);
532
533 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
534 return false;
535
536 return true;
537 }
538
hci_setup_sync(struct hci_conn * conn,__u16 handle)539 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
540 {
541 int result;
542 struct conn_handle_t *conn_handle;
543
544 if (enhanced_sync_conn_capable(conn->hdev)) {
545 conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
546
547 if (!conn_handle)
548 return false;
549
550 conn_handle->conn = conn;
551 conn_handle->handle = handle;
552 result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
553 conn_handle, NULL);
554 if (result < 0)
555 kfree(conn_handle);
556
557 return result == 0;
558 }
559
560 return hci_setup_sync_conn(conn, handle);
561 }
562
hci_le_conn_update(struct hci_conn * conn,u16 min,u16 max,u16 latency,u16 to_multiplier)563 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
564 u16 to_multiplier)
565 {
566 struct hci_dev *hdev = conn->hdev;
567 struct hci_conn_params *params;
568 struct hci_cp_le_conn_update cp;
569
570 hci_dev_lock(hdev);
571
572 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
573 if (params) {
574 params->conn_min_interval = min;
575 params->conn_max_interval = max;
576 params->conn_latency = latency;
577 params->supervision_timeout = to_multiplier;
578 }
579
580 hci_dev_unlock(hdev);
581
582 memset(&cp, 0, sizeof(cp));
583 cp.handle = cpu_to_le16(conn->handle);
584 cp.conn_interval_min = cpu_to_le16(min);
585 cp.conn_interval_max = cpu_to_le16(max);
586 cp.conn_latency = cpu_to_le16(latency);
587 cp.supervision_timeout = cpu_to_le16(to_multiplier);
588 cp.min_ce_len = cpu_to_le16(0x0000);
589 cp.max_ce_len = cpu_to_le16(0x0000);
590
591 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
592
593 if (params)
594 return 0x01;
595
596 return 0x00;
597 }
598
hci_le_start_enc(struct hci_conn * conn,__le16 ediv,__le64 rand,__u8 ltk[16],__u8 key_size)599 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
600 __u8 ltk[16], __u8 key_size)
601 {
602 struct hci_dev *hdev = conn->hdev;
603 struct hci_cp_le_start_enc cp;
604
605 BT_DBG("hcon %p", conn);
606
607 memset(&cp, 0, sizeof(cp));
608
609 cp.handle = cpu_to_le16(conn->handle);
610 cp.rand = rand;
611 cp.ediv = ediv;
612 memcpy(cp.ltk, ltk, key_size);
613
614 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
615 }
616
617 /* Device _must_ be locked */
hci_sco_setup(struct hci_conn * conn,__u8 status)618 void hci_sco_setup(struct hci_conn *conn, __u8 status)
619 {
620 struct hci_conn *sco = conn->link;
621
622 if (!sco)
623 return;
624
625 BT_DBG("hcon %p", conn);
626
627 if (!status) {
628 if (lmp_esco_capable(conn->hdev))
629 hci_setup_sync(sco, conn->handle);
630 else
631 hci_add_sco(sco, conn->handle);
632 } else {
633 hci_connect_cfm(sco, status);
634 hci_conn_del(sco);
635 }
636 }
637
hci_conn_timeout(struct work_struct * work)638 static void hci_conn_timeout(struct work_struct *work)
639 {
640 struct hci_conn *conn = container_of(work, struct hci_conn,
641 disc_work.work);
642 int refcnt = atomic_read(&conn->refcnt);
643
644 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
645
646 WARN_ON(refcnt < 0);
647
648 /* FIXME: It was observed that in pairing failed scenario, refcnt
649 * drops below 0. Probably this is because l2cap_conn_del calls
650 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
651 * dropped. After that loop hci_chan_del is called which also drops
652 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
653 * otherwise drop it.
654 */
655 if (refcnt > 0)
656 return;
657
658 /* LE connections in scanning state need special handling */
659 if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
660 test_bit(HCI_CONN_SCANNING, &conn->flags)) {
661 hci_connect_le_scan_remove(conn);
662 return;
663 }
664
665 hci_abort_conn(conn, hci_proto_disconn_ind(conn));
666 }
667
668 /* Enter sniff mode */
hci_conn_idle(struct work_struct * work)669 static void hci_conn_idle(struct work_struct *work)
670 {
671 struct hci_conn *conn = container_of(work, struct hci_conn,
672 idle_work.work);
673 struct hci_dev *hdev = conn->hdev;
674
675 BT_DBG("hcon %p mode %d", conn, conn->mode);
676
677 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
678 return;
679
680 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
681 return;
682
683 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
684 struct hci_cp_sniff_subrate cp;
685 cp.handle = cpu_to_le16(conn->handle);
686 cp.max_latency = cpu_to_le16(0);
687 cp.min_remote_timeout = cpu_to_le16(0);
688 cp.min_local_timeout = cpu_to_le16(0);
689 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
690 }
691
692 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
693 struct hci_cp_sniff_mode cp;
694 cp.handle = cpu_to_le16(conn->handle);
695 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
696 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
697 cp.attempt = cpu_to_le16(4);
698 cp.timeout = cpu_to_le16(1);
699 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
700 }
701 }
702
hci_conn_auto_accept(struct work_struct * work)703 static void hci_conn_auto_accept(struct work_struct *work)
704 {
705 struct hci_conn *conn = container_of(work, struct hci_conn,
706 auto_accept_work.work);
707
708 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
709 &conn->dst);
710 }
711
le_disable_advertising(struct hci_dev * hdev)712 static void le_disable_advertising(struct hci_dev *hdev)
713 {
714 if (ext_adv_capable(hdev)) {
715 struct hci_cp_le_set_ext_adv_enable cp;
716
717 cp.enable = 0x00;
718 cp.num_of_sets = 0x00;
719
720 hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
721 &cp);
722 } else {
723 u8 enable = 0x00;
724 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
725 &enable);
726 }
727 }
728
le_conn_timeout(struct work_struct * work)729 static void le_conn_timeout(struct work_struct *work)
730 {
731 struct hci_conn *conn = container_of(work, struct hci_conn,
732 le_conn_timeout.work);
733 struct hci_dev *hdev = conn->hdev;
734
735 BT_DBG("");
736
737 /* We could end up here due to having done directed advertising,
738 * so clean up the state if necessary. This should however only
739 * happen with broken hardware or if low duty cycle was used
740 * (which doesn't have a timeout of its own).
741 */
742 if (conn->role == HCI_ROLE_SLAVE) {
743 /* Disable LE Advertising */
744 le_disable_advertising(hdev);
745 hci_dev_lock(hdev);
746 hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
747 hci_dev_unlock(hdev);
748 return;
749 }
750
751 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
752 }
753
754 struct iso_list_data {
755 union {
756 u8 cig;
757 u8 big;
758 };
759 union {
760 u8 cis;
761 u8 bis;
762 u16 sync_handle;
763 };
764 int count;
765 struct {
766 struct hci_cp_le_set_cig_params cp;
767 struct hci_cis_params cis[0x11];
768 } pdu;
769 };
770
bis_list(struct hci_conn * conn,void * data)771 static void bis_list(struct hci_conn *conn, void *data)
772 {
773 struct iso_list_data *d = data;
774
775 /* Skip if not broadcast/ANY address */
776 if (bacmp(&conn->dst, BDADDR_ANY))
777 return;
778
779 if (d->big != conn->iso_qos.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
780 d->bis != conn->iso_qos.bis)
781 return;
782
783 d->count++;
784 }
785
find_bis(struct hci_conn * conn,void * data)786 static void find_bis(struct hci_conn *conn, void *data)
787 {
788 struct iso_list_data *d = data;
789
790 /* Ignore unicast */
791 if (bacmp(&conn->dst, BDADDR_ANY))
792 return;
793
794 d->count++;
795 }
796
terminate_big_sync(struct hci_dev * hdev,void * data)797 static int terminate_big_sync(struct hci_dev *hdev, void *data)
798 {
799 struct iso_list_data *d = data;
800
801 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
802
803 hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
804
805 /* Check if ISO connection is a BIS and terminate BIG if there are
806 * no other connections using it.
807 */
808 hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
809 if (d->count)
810 return 0;
811
812 return hci_le_terminate_big_sync(hdev, d->big,
813 HCI_ERROR_LOCAL_HOST_TERM);
814 }
815
terminate_big_destroy(struct hci_dev * hdev,void * data,int err)816 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
817 {
818 kfree(data);
819 }
820
hci_le_terminate_big(struct hci_dev * hdev,u8 big,u8 bis)821 static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
822 {
823 struct iso_list_data *d;
824 int ret;
825
826 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis);
827
828 d = kmalloc(sizeof(*d), GFP_KERNEL);
829 if (!d)
830 return -ENOMEM;
831
832 memset(d, 0, sizeof(*d));
833 d->big = big;
834 d->bis = bis;
835
836 ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
837 terminate_big_destroy);
838 if (ret)
839 kfree(d);
840
841 return ret;
842 }
843
big_terminate_sync(struct hci_dev * hdev,void * data)844 static int big_terminate_sync(struct hci_dev *hdev, void *data)
845 {
846 struct iso_list_data *d = data;
847
848 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
849 d->sync_handle);
850
851 /* Check if ISO connection is a BIS and terminate BIG if there are
852 * no other connections using it.
853 */
854 hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
855 if (d->count)
856 return 0;
857
858 hci_le_big_terminate_sync(hdev, d->big);
859
860 return hci_le_pa_terminate_sync(hdev, d->sync_handle);
861 }
862
hci_le_big_terminate(struct hci_dev * hdev,u8 big,u16 sync_handle)863 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
864 {
865 struct iso_list_data *d;
866 int ret;
867
868 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle);
869
870 d = kmalloc(sizeof(*d), GFP_KERNEL);
871 if (!d)
872 return -ENOMEM;
873
874 memset(d, 0, sizeof(*d));
875 d->big = big;
876 d->sync_handle = sync_handle;
877
878 ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
879 terminate_big_destroy);
880 if (ret)
881 kfree(d);
882
883 return ret;
884 }
885
886 /* Cleanup BIS connection
887 *
888 * Detects if there any BIS left connected in a BIG
889 * broadcaster: Remove advertising instance and terminate BIG.
890 * broadcaster receiver: Teminate BIG sync and terminate PA sync.
891 */
bis_cleanup(struct hci_conn * conn)892 static void bis_cleanup(struct hci_conn *conn)
893 {
894 struct hci_dev *hdev = conn->hdev;
895
896 bt_dev_dbg(hdev, "conn %p", conn);
897
898 if (conn->role == HCI_ROLE_MASTER) {
899 if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
900 return;
901
902 hci_le_terminate_big(hdev, conn->iso_qos.big,
903 conn->iso_qos.bis);
904 } else {
905 hci_le_big_terminate(hdev, conn->iso_qos.big,
906 conn->sync_handle);
907 }
908 }
909
remove_cig_sync(struct hci_dev * hdev,void * data)910 static int remove_cig_sync(struct hci_dev *hdev, void *data)
911 {
912 u8 handle = PTR_ERR(data);
913
914 return hci_le_remove_cig_sync(hdev, handle);
915 }
916
hci_le_remove_cig(struct hci_dev * hdev,u8 handle)917 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
918 {
919 bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
920
921 return hci_cmd_sync_queue(hdev, remove_cig_sync, ERR_PTR(handle), NULL);
922 }
923
find_cis(struct hci_conn * conn,void * data)924 static void find_cis(struct hci_conn *conn, void *data)
925 {
926 struct iso_list_data *d = data;
927
928 /* Ignore broadcast */
929 if (!bacmp(&conn->dst, BDADDR_ANY))
930 return;
931
932 d->count++;
933 }
934
935 /* Cleanup CIS connection:
936 *
937 * Detects if there any CIS left connected in a CIG and remove it.
938 */
cis_cleanup(struct hci_conn * conn)939 static void cis_cleanup(struct hci_conn *conn)
940 {
941 struct hci_dev *hdev = conn->hdev;
942 struct iso_list_data d;
943
944 memset(&d, 0, sizeof(d));
945 d.cig = conn->iso_qos.cig;
946
947 /* Check if ISO connection is a CIS and remove CIG if there are
948 * no other connections using it.
949 */
950 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
951 if (d.count)
952 return;
953
954 hci_le_remove_cig(hdev, conn->iso_qos.cig);
955 }
956
hci_conn_add(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role)957 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
958 u8 role)
959 {
960 struct hci_conn *conn;
961
962 BT_DBG("%s dst %pMR", hdev->name, dst);
963
964 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
965 if (!conn)
966 return NULL;
967
968 bacpy(&conn->dst, dst);
969 bacpy(&conn->src, &hdev->bdaddr);
970 conn->handle = HCI_CONN_HANDLE_UNSET;
971 conn->hdev = hdev;
972 conn->type = type;
973 conn->role = role;
974 conn->mode = HCI_CM_ACTIVE;
975 conn->state = BT_OPEN;
976 conn->auth_type = HCI_AT_GENERAL_BONDING;
977 conn->io_capability = hdev->io_capability;
978 conn->remote_auth = 0xff;
979 conn->key_type = 0xff;
980 conn->rssi = HCI_RSSI_INVALID;
981 conn->tx_power = HCI_TX_POWER_INVALID;
982 conn->max_tx_power = HCI_TX_POWER_INVALID;
983
984 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
985 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
986
987 /* Set Default Authenticated payload timeout to 30s */
988 conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
989
990 if (conn->role == HCI_ROLE_MASTER)
991 conn->out = true;
992
993 switch (type) {
994 case ACL_LINK:
995 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
996 break;
997 case LE_LINK:
998 /* conn->src should reflect the local identity address */
999 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1000 break;
1001 case ISO_LINK:
1002 /* conn->src should reflect the local identity address */
1003 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1004
1005 /* set proper cleanup function */
1006 if (!bacmp(dst, BDADDR_ANY))
1007 conn->cleanup = bis_cleanup;
1008 else if (conn->role == HCI_ROLE_MASTER)
1009 conn->cleanup = cis_cleanup;
1010
1011 break;
1012 case SCO_LINK:
1013 if (lmp_esco_capable(hdev))
1014 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1015 (hdev->esco_type & EDR_ESCO_MASK);
1016 else
1017 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
1018 break;
1019 case ESCO_LINK:
1020 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1021 break;
1022 }
1023
1024 skb_queue_head_init(&conn->data_q);
1025
1026 INIT_LIST_HEAD(&conn->chan_list);
1027
1028 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1029 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1030 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1031 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1032 INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
1033
1034 atomic_set(&conn->refcnt, 0);
1035
1036 hci_dev_hold(hdev);
1037
1038 hci_conn_hash_add(hdev, conn);
1039
1040 /* The SCO and eSCO connections will only be notified when their
1041 * setup has been completed. This is different to ACL links which
1042 * can be notified right away.
1043 */
1044 if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1045 if (hdev->notify)
1046 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1047 }
1048
1049 hci_conn_init_sysfs(conn);
1050
1051 return conn;
1052 }
1053
hci_conn_del(struct hci_conn * conn)1054 int hci_conn_del(struct hci_conn *conn)
1055 {
1056 struct hci_dev *hdev = conn->hdev;
1057
1058 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1059
1060 cancel_delayed_work_sync(&conn->disc_work);
1061 cancel_delayed_work_sync(&conn->auto_accept_work);
1062 cancel_delayed_work_sync(&conn->idle_work);
1063
1064 if (conn->type == ACL_LINK) {
1065 struct hci_conn *sco = conn->link;
1066 if (sco)
1067 sco->link = NULL;
1068
1069 /* Unacked frames */
1070 hdev->acl_cnt += conn->sent;
1071 } else if (conn->type == LE_LINK) {
1072 cancel_delayed_work(&conn->le_conn_timeout);
1073
1074 if (hdev->le_pkts)
1075 hdev->le_cnt += conn->sent;
1076 else
1077 hdev->acl_cnt += conn->sent;
1078 } else {
1079 struct hci_conn *acl = conn->link;
1080
1081 if (acl) {
1082 acl->link = NULL;
1083 hci_conn_drop(acl);
1084 }
1085
1086 /* Unacked ISO frames */
1087 if (conn->type == ISO_LINK) {
1088 if (hdev->iso_pkts)
1089 hdev->iso_cnt += conn->sent;
1090 else if (hdev->le_pkts)
1091 hdev->le_cnt += conn->sent;
1092 else
1093 hdev->acl_cnt += conn->sent;
1094 }
1095 }
1096
1097 if (conn->amp_mgr)
1098 amp_mgr_put(conn->amp_mgr);
1099
1100 skb_queue_purge(&conn->data_q);
1101
1102 /* Remove the connection from the list and cleanup its remaining
1103 * state. This is a separate function since for some cases like
1104 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1105 * rest of hci_conn_del.
1106 */
1107 hci_conn_cleanup(conn);
1108
1109 return 0;
1110 }
1111
hci_get_route(bdaddr_t * dst,bdaddr_t * src,uint8_t src_type)1112 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1113 {
1114 int use_src = bacmp(src, BDADDR_ANY);
1115 struct hci_dev *hdev = NULL, *d;
1116
1117 BT_DBG("%pMR -> %pMR", src, dst);
1118
1119 read_lock(&hci_dev_list_lock);
1120
1121 list_for_each_entry(d, &hci_dev_list, list) {
1122 if (!test_bit(HCI_UP, &d->flags) ||
1123 hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
1124 d->dev_type != HCI_PRIMARY)
1125 continue;
1126
1127 /* Simple routing:
1128 * No source address - find interface with bdaddr != dst
1129 * Source address - find interface with bdaddr == src
1130 */
1131
1132 if (use_src) {
1133 bdaddr_t id_addr;
1134 u8 id_addr_type;
1135
1136 if (src_type == BDADDR_BREDR) {
1137 if (!lmp_bredr_capable(d))
1138 continue;
1139 bacpy(&id_addr, &d->bdaddr);
1140 id_addr_type = BDADDR_BREDR;
1141 } else {
1142 if (!lmp_le_capable(d))
1143 continue;
1144
1145 hci_copy_identity_address(d, &id_addr,
1146 &id_addr_type);
1147
1148 /* Convert from HCI to three-value type */
1149 if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1150 id_addr_type = BDADDR_LE_PUBLIC;
1151 else
1152 id_addr_type = BDADDR_LE_RANDOM;
1153 }
1154
1155 if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1156 hdev = d; break;
1157 }
1158 } else {
1159 if (bacmp(&d->bdaddr, dst)) {
1160 hdev = d; break;
1161 }
1162 }
1163 }
1164
1165 if (hdev)
1166 hdev = hci_dev_hold(hdev);
1167
1168 read_unlock(&hci_dev_list_lock);
1169 return hdev;
1170 }
1171 EXPORT_SYMBOL(hci_get_route);
1172
1173 /* This function requires the caller holds hdev->lock */
hci_le_conn_failed(struct hci_conn * conn,u8 status)1174 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1175 {
1176 struct hci_dev *hdev = conn->hdev;
1177 struct hci_conn_params *params;
1178
1179 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
1180 conn->dst_type);
1181 if (params && params->conn) {
1182 hci_conn_drop(params->conn);
1183 hci_conn_put(params->conn);
1184 params->conn = NULL;
1185 }
1186
1187 /* If the status indicates successful cancellation of
1188 * the attempt (i.e. Unknown Connection Id) there's no point of
1189 * notifying failure since we'll go back to keep trying to
1190 * connect. The only exception is explicit connect requests
1191 * where a timeout + cancel does indicate an actual failure.
1192 */
1193 if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
1194 (params && params->explicit_connect))
1195 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1196 conn->dst_type, status);
1197
1198 /* Since we may have temporarily stopped the background scanning in
1199 * favor of connection establishment, we should restart it.
1200 */
1201 hci_update_passive_scan(hdev);
1202
1203 /* Enable advertising in case this was a failed connection
1204 * attempt as a peripheral.
1205 */
1206 hci_enable_advertising(hdev);
1207 }
1208
1209 /* This function requires the caller holds hdev->lock */
hci_conn_failed(struct hci_conn * conn,u8 status)1210 void hci_conn_failed(struct hci_conn *conn, u8 status)
1211 {
1212 struct hci_dev *hdev = conn->hdev;
1213
1214 bt_dev_dbg(hdev, "status 0x%2.2x", status);
1215
1216 switch (conn->type) {
1217 case LE_LINK:
1218 hci_le_conn_failed(conn, status);
1219 break;
1220 case ACL_LINK:
1221 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1222 conn->dst_type, status);
1223 break;
1224 }
1225
1226 conn->state = BT_CLOSED;
1227 hci_connect_cfm(conn, status);
1228 hci_conn_del(conn);
1229 }
1230
create_le_conn_complete(struct hci_dev * hdev,void * data,int err)1231 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
1232 {
1233 struct hci_conn *conn = data;
1234
1235 hci_dev_lock(hdev);
1236
1237 if (!err) {
1238 hci_connect_le_scan_cleanup(conn);
1239 goto done;
1240 }
1241
1242 bt_dev_err(hdev, "request failed to create LE connection: err %d", err);
1243
1244 /* Check if connection is still pending */
1245 if (conn != hci_lookup_le_connect(hdev))
1246 goto done;
1247
1248 hci_conn_failed(conn, bt_status(err));
1249
1250 done:
1251 hci_dev_unlock(hdev);
1252 }
1253
hci_connect_le_sync(struct hci_dev * hdev,void * data)1254 static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
1255 {
1256 struct hci_conn *conn = data;
1257
1258 bt_dev_dbg(hdev, "conn %p", conn);
1259
1260 return hci_le_create_conn_sync(hdev, conn);
1261 }
1262
hci_connect_le(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,bool dst_resolved,u8 sec_level,u16 conn_timeout,u8 role)1263 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1264 u8 dst_type, bool dst_resolved, u8 sec_level,
1265 u16 conn_timeout, u8 role)
1266 {
1267 struct hci_conn *conn;
1268 struct smp_irk *irk;
1269 int err;
1270
1271 /* Let's make sure that le is enabled.*/
1272 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1273 if (lmp_le_capable(hdev))
1274 return ERR_PTR(-ECONNREFUSED);
1275
1276 return ERR_PTR(-EOPNOTSUPP);
1277 }
1278
1279 /* Since the controller supports only one LE connection attempt at a
1280 * time, we return -EBUSY if there is any connection attempt running.
1281 */
1282 if (hci_lookup_le_connect(hdev))
1283 return ERR_PTR(-EBUSY);
1284
1285 /* If there's already a connection object but it's not in
1286 * scanning state it means it must already be established, in
1287 * which case we can't do anything else except report a failure
1288 * to connect.
1289 */
1290 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1291 if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1292 return ERR_PTR(-EBUSY);
1293 }
1294
1295 /* Check if the destination address has been resolved by the controller
1296 * since if it did then the identity address shall be used.
1297 */
1298 if (!dst_resolved) {
1299 /* When given an identity address with existing identity
1300 * resolving key, the connection needs to be established
1301 * to a resolvable random address.
1302 *
1303 * Storing the resolvable random address is required here
1304 * to handle connection failures. The address will later
1305 * be resolved back into the original identity address
1306 * from the connect request.
1307 */
1308 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1309 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1310 dst = &irk->rpa;
1311 dst_type = ADDR_LE_DEV_RANDOM;
1312 }
1313 }
1314
1315 if (conn) {
1316 bacpy(&conn->dst, dst);
1317 } else {
1318 conn = hci_conn_add(hdev, LE_LINK, dst, role);
1319 if (!conn)
1320 return ERR_PTR(-ENOMEM);
1321 hci_conn_hold(conn);
1322 conn->pending_sec_level = sec_level;
1323 }
1324
1325 conn->dst_type = dst_type;
1326 conn->sec_level = BT_SECURITY_LOW;
1327 conn->conn_timeout = conn_timeout;
1328
1329 conn->state = BT_CONNECT;
1330 clear_bit(HCI_CONN_SCANNING, &conn->flags);
1331
1332 err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn,
1333 create_le_conn_complete);
1334 if (err) {
1335 hci_conn_del(conn);
1336 return ERR_PTR(err);
1337 }
1338
1339 return conn;
1340 }
1341
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)1342 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1343 {
1344 struct hci_conn *conn;
1345
1346 conn = hci_conn_hash_lookup_le(hdev, addr, type);
1347 if (!conn)
1348 return false;
1349
1350 if (conn->state != BT_CONNECTED)
1351 return false;
1352
1353 return true;
1354 }
1355
1356 /* This function requires the caller holds hdev->lock */
hci_explicit_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)1357 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1358 bdaddr_t *addr, u8 addr_type)
1359 {
1360 struct hci_conn_params *params;
1361
1362 if (is_connected(hdev, addr, addr_type))
1363 return -EISCONN;
1364
1365 params = hci_conn_params_lookup(hdev, addr, addr_type);
1366 if (!params) {
1367 params = hci_conn_params_add(hdev, addr, addr_type);
1368 if (!params)
1369 return -ENOMEM;
1370
1371 /* If we created new params, mark them to be deleted in
1372 * hci_connect_le_scan_cleanup. It's different case than
1373 * existing disabled params, those will stay after cleanup.
1374 */
1375 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1376 }
1377
1378 /* We're trying to connect, so make sure params are at pend_le_conns */
1379 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1380 params->auto_connect == HCI_AUTO_CONN_REPORT ||
1381 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1382 list_del_init(¶ms->action);
1383 list_add(¶ms->action, &hdev->pend_le_conns);
1384 }
1385
1386 params->explicit_connect = true;
1387
1388 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1389 params->auto_connect);
1390
1391 return 0;
1392 }
1393
qos_set_big(struct hci_dev * hdev,struct bt_iso_qos * qos)1394 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1395 {
1396 struct iso_list_data data;
1397
1398 /* Allocate a BIG if not set */
1399 if (qos->big == BT_ISO_QOS_BIG_UNSET) {
1400 for (data.big = 0x00; data.big < 0xef; data.big++) {
1401 data.count = 0;
1402 data.bis = 0xff;
1403
1404 hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1405 BT_BOUND, &data);
1406 if (!data.count)
1407 break;
1408 }
1409
1410 if (data.big == 0xef)
1411 return -EADDRNOTAVAIL;
1412
1413 /* Update BIG */
1414 qos->big = data.big;
1415 }
1416
1417 return 0;
1418 }
1419
qos_set_bis(struct hci_dev * hdev,struct bt_iso_qos * qos)1420 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1421 {
1422 struct iso_list_data data;
1423
1424 /* Allocate BIS if not set */
1425 if (qos->bis == BT_ISO_QOS_BIS_UNSET) {
1426 /* Find an unused adv set to advertise BIS, skip instance 0x00
1427 * since it is reserved as general purpose set.
1428 */
1429 for (data.bis = 0x01; data.bis < hdev->le_num_of_adv_sets;
1430 data.bis++) {
1431 data.count = 0;
1432
1433 hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1434 BT_BOUND, &data);
1435 if (!data.count)
1436 break;
1437 }
1438
1439 if (data.bis == hdev->le_num_of_adv_sets)
1440 return -EADDRNOTAVAIL;
1441
1442 /* Update BIS */
1443 qos->bis = data.bis;
1444 }
1445
1446 return 0;
1447 }
1448
1449 /* This function requires the caller holds hdev->lock */
hci_add_bis(struct hci_dev * hdev,bdaddr_t * dst,struct bt_iso_qos * qos)1450 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1451 struct bt_iso_qos *qos)
1452 {
1453 struct hci_conn *conn;
1454 struct iso_list_data data;
1455 int err;
1456
1457 /* Let's make sure that le is enabled.*/
1458 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1459 if (lmp_le_capable(hdev))
1460 return ERR_PTR(-ECONNREFUSED);
1461 return ERR_PTR(-EOPNOTSUPP);
1462 }
1463
1464 err = qos_set_big(hdev, qos);
1465 if (err)
1466 return ERR_PTR(err);
1467
1468 err = qos_set_bis(hdev, qos);
1469 if (err)
1470 return ERR_PTR(err);
1471
1472 data.big = qos->big;
1473 data.bis = qos->bis;
1474 data.count = 0;
1475
1476 /* Check if there is already a matching BIG/BIS */
1477 hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, BT_BOUND, &data);
1478 if (data.count)
1479 return ERR_PTR(-EADDRINUSE);
1480
1481 conn = hci_conn_hash_lookup_bis(hdev, dst, qos->big, qos->bis);
1482 if (conn)
1483 return ERR_PTR(-EADDRINUSE);
1484
1485 conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1486 if (!conn)
1487 return ERR_PTR(-ENOMEM);
1488
1489 set_bit(HCI_CONN_PER_ADV, &conn->flags);
1490 conn->state = BT_CONNECT;
1491
1492 hci_conn_hold(conn);
1493 return conn;
1494 }
1495
1496 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,u8 sec_level,u16 conn_timeout,enum conn_reasons conn_reason)1497 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1498 u8 dst_type, u8 sec_level,
1499 u16 conn_timeout,
1500 enum conn_reasons conn_reason)
1501 {
1502 struct hci_conn *conn;
1503
1504 /* Let's make sure that le is enabled.*/
1505 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1506 if (lmp_le_capable(hdev))
1507 return ERR_PTR(-ECONNREFUSED);
1508
1509 return ERR_PTR(-EOPNOTSUPP);
1510 }
1511
1512 /* Some devices send ATT messages as soon as the physical link is
1513 * established. To be able to handle these ATT messages, the user-
1514 * space first establishes the connection and then starts the pairing
1515 * process.
1516 *
1517 * So if a hci_conn object already exists for the following connection
1518 * attempt, we simply update pending_sec_level and auth_type fields
1519 * and return the object found.
1520 */
1521 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1522 if (conn) {
1523 if (conn->pending_sec_level < sec_level)
1524 conn->pending_sec_level = sec_level;
1525 goto done;
1526 }
1527
1528 BT_DBG("requesting refresh of dst_addr");
1529
1530 conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1531 if (!conn)
1532 return ERR_PTR(-ENOMEM);
1533
1534 if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1535 hci_conn_del(conn);
1536 return ERR_PTR(-EBUSY);
1537 }
1538
1539 conn->state = BT_CONNECT;
1540 set_bit(HCI_CONN_SCANNING, &conn->flags);
1541 conn->dst_type = dst_type;
1542 conn->sec_level = BT_SECURITY_LOW;
1543 conn->pending_sec_level = sec_level;
1544 conn->conn_timeout = conn_timeout;
1545 conn->conn_reason = conn_reason;
1546
1547 hci_update_passive_scan(hdev);
1548
1549 done:
1550 hci_conn_hold(conn);
1551 return conn;
1552 }
1553
hci_connect_acl(struct hci_dev * hdev,bdaddr_t * dst,u8 sec_level,u8 auth_type,enum conn_reasons conn_reason)1554 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1555 u8 sec_level, u8 auth_type,
1556 enum conn_reasons conn_reason)
1557 {
1558 struct hci_conn *acl;
1559
1560 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1561 if (lmp_bredr_capable(hdev))
1562 return ERR_PTR(-ECONNREFUSED);
1563
1564 return ERR_PTR(-EOPNOTSUPP);
1565 }
1566
1567 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1568 if (!acl) {
1569 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1570 if (!acl)
1571 return ERR_PTR(-ENOMEM);
1572 }
1573
1574 hci_conn_hold(acl);
1575
1576 acl->conn_reason = conn_reason;
1577 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1578 acl->sec_level = BT_SECURITY_LOW;
1579 acl->pending_sec_level = sec_level;
1580 acl->auth_type = auth_type;
1581 hci_acl_create_connection(acl);
1582 }
1583
1584 return acl;
1585 }
1586
hci_connect_sco(struct hci_dev * hdev,int type,bdaddr_t * dst,__u16 setting,struct bt_codec * codec)1587 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1588 __u16 setting, struct bt_codec *codec)
1589 {
1590 struct hci_conn *acl;
1591 struct hci_conn *sco;
1592
1593 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1594 CONN_REASON_SCO_CONNECT);
1595 if (IS_ERR(acl))
1596 return acl;
1597
1598 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1599 if (!sco) {
1600 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1601 if (!sco) {
1602 hci_conn_drop(acl);
1603 return ERR_PTR(-ENOMEM);
1604 }
1605 }
1606
1607 acl->link = sco;
1608 sco->link = acl;
1609
1610 hci_conn_hold(sco);
1611
1612 sco->setting = setting;
1613 sco->codec = *codec;
1614
1615 if (acl->state == BT_CONNECTED &&
1616 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1617 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1618 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1619
1620 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1621 /* defer SCO setup until mode change completed */
1622 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1623 return sco;
1624 }
1625
1626 hci_sco_setup(acl, 0x00);
1627 }
1628
1629 return sco;
1630 }
1631
cis_add(struct iso_list_data * d,struct bt_iso_qos * qos)1632 static void cis_add(struct iso_list_data *d, struct bt_iso_qos *qos)
1633 {
1634 struct hci_cis_params *cis = &d->pdu.cis[d->pdu.cp.num_cis];
1635
1636 cis->cis_id = qos->cis;
1637 cis->c_sdu = cpu_to_le16(qos->out.sdu);
1638 cis->p_sdu = cpu_to_le16(qos->in.sdu);
1639 cis->c_phy = qos->out.phy ? qos->out.phy : qos->in.phy;
1640 cis->p_phy = qos->in.phy ? qos->in.phy : qos->out.phy;
1641 cis->c_rtn = qos->out.rtn;
1642 cis->p_rtn = qos->in.rtn;
1643
1644 d->pdu.cp.num_cis++;
1645 }
1646
cis_list(struct hci_conn * conn,void * data)1647 static void cis_list(struct hci_conn *conn, void *data)
1648 {
1649 struct iso_list_data *d = data;
1650
1651 /* Skip if broadcast/ANY address */
1652 if (!bacmp(&conn->dst, BDADDR_ANY))
1653 return;
1654
1655 if (d->cig != conn->iso_qos.cig || d->cis == BT_ISO_QOS_CIS_UNSET ||
1656 d->cis != conn->iso_qos.cis)
1657 return;
1658
1659 d->count++;
1660
1661 if (d->pdu.cp.cig_id == BT_ISO_QOS_CIG_UNSET ||
1662 d->count >= ARRAY_SIZE(d->pdu.cis))
1663 return;
1664
1665 cis_add(d, &conn->iso_qos);
1666 }
1667
hci_le_create_big(struct hci_conn * conn,struct bt_iso_qos * qos)1668 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1669 {
1670 struct hci_dev *hdev = conn->hdev;
1671 struct hci_cp_le_create_big cp;
1672
1673 memset(&cp, 0, sizeof(cp));
1674
1675 cp.handle = qos->big;
1676 cp.adv_handle = qos->bis;
1677 cp.num_bis = 0x01;
1678 hci_cpu_to_le24(qos->out.interval, cp.bis.sdu_interval);
1679 cp.bis.sdu = cpu_to_le16(qos->out.sdu);
1680 cp.bis.latency = cpu_to_le16(qos->out.latency);
1681 cp.bis.rtn = qos->out.rtn;
1682 cp.bis.phy = qos->out.phy;
1683 cp.bis.packing = qos->packing;
1684 cp.bis.framing = qos->framing;
1685 cp.bis.encryption = 0x00;
1686 memset(&cp.bis.bcode, 0, sizeof(cp.bis.bcode));
1687
1688 return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1689 }
1690
hci_le_set_cig_params(struct hci_conn * conn,struct bt_iso_qos * qos)1691 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1692 {
1693 struct hci_dev *hdev = conn->hdev;
1694 struct iso_list_data data;
1695
1696 memset(&data, 0, sizeof(data));
1697
1698 /* Allocate a CIG if not set */
1699 if (qos->cig == BT_ISO_QOS_CIG_UNSET) {
1700 for (data.cig = 0x00; data.cig < 0xff; data.cig++) {
1701 data.count = 0;
1702 data.cis = 0xff;
1703
1704 hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
1705 BT_BOUND, &data);
1706 if (data.count)
1707 continue;
1708
1709 hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
1710 BT_CONNECTED, &data);
1711 if (!data.count)
1712 break;
1713 }
1714
1715 if (data.cig == 0xff)
1716 return false;
1717
1718 /* Update CIG */
1719 qos->cig = data.cig;
1720 }
1721
1722 data.pdu.cp.cig_id = qos->cig;
1723 hci_cpu_to_le24(qos->out.interval, data.pdu.cp.c_interval);
1724 hci_cpu_to_le24(qos->in.interval, data.pdu.cp.p_interval);
1725 data.pdu.cp.sca = qos->sca;
1726 data.pdu.cp.packing = qos->packing;
1727 data.pdu.cp.framing = qos->framing;
1728 data.pdu.cp.c_latency = cpu_to_le16(qos->out.latency);
1729 data.pdu.cp.p_latency = cpu_to_le16(qos->in.latency);
1730
1731 if (qos->cis != BT_ISO_QOS_CIS_UNSET) {
1732 data.count = 0;
1733 data.cig = qos->cig;
1734 data.cis = qos->cis;
1735
1736 hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1737 &data);
1738 if (data.count)
1739 return false;
1740
1741 cis_add(&data, qos);
1742 }
1743
1744 /* Reprogram all CIS(s) with the same CIG */
1745 for (data.cig = qos->cig, data.cis = 0x00; data.cis < 0x11;
1746 data.cis++) {
1747 data.count = 0;
1748
1749 hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1750 &data);
1751 if (data.count)
1752 continue;
1753
1754 /* Allocate a CIS if not set */
1755 if (qos->cis == BT_ISO_QOS_CIS_UNSET) {
1756 /* Update CIS */
1757 qos->cis = data.cis;
1758 cis_add(&data, qos);
1759 }
1760 }
1761
1762 if (qos->cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis)
1763 return false;
1764
1765 if (hci_send_cmd(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1766 sizeof(data.pdu.cp) +
1767 (data.pdu.cp.num_cis * sizeof(*data.pdu.cis)),
1768 &data.pdu) < 0)
1769 return false;
1770
1771 return true;
1772 }
1773
hci_bind_cis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos)1774 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1775 __u8 dst_type, struct bt_iso_qos *qos)
1776 {
1777 struct hci_conn *cis;
1778
1779 cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type);
1780 if (!cis) {
1781 cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1782 if (!cis)
1783 return ERR_PTR(-ENOMEM);
1784 cis->cleanup = cis_cleanup;
1785 cis->dst_type = dst_type;
1786 }
1787
1788 if (cis->state == BT_CONNECTED)
1789 return cis;
1790
1791 /* Check if CIS has been set and the settings matches */
1792 if (cis->state == BT_BOUND &&
1793 !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1794 return cis;
1795
1796 /* Update LINK PHYs according to QoS preference */
1797 cis->le_tx_phy = qos->out.phy;
1798 cis->le_rx_phy = qos->in.phy;
1799
1800 /* If output interval is not set use the input interval as it cannot be
1801 * 0x000000.
1802 */
1803 if (!qos->out.interval)
1804 qos->out.interval = qos->in.interval;
1805
1806 /* If input interval is not set use the output interval as it cannot be
1807 * 0x000000.
1808 */
1809 if (!qos->in.interval)
1810 qos->in.interval = qos->out.interval;
1811
1812 /* If output latency is not set use the input latency as it cannot be
1813 * 0x0000.
1814 */
1815 if (!qos->out.latency)
1816 qos->out.latency = qos->in.latency;
1817
1818 /* If input latency is not set use the output latency as it cannot be
1819 * 0x0000.
1820 */
1821 if (!qos->in.latency)
1822 qos->in.latency = qos->out.latency;
1823
1824 if (!hci_le_set_cig_params(cis, qos)) {
1825 hci_conn_drop(cis);
1826 return ERR_PTR(-EINVAL);
1827 }
1828
1829 cis->iso_qos = *qos;
1830 cis->state = BT_BOUND;
1831
1832 return cis;
1833 }
1834
hci_iso_setup_path(struct hci_conn * conn)1835 bool hci_iso_setup_path(struct hci_conn *conn)
1836 {
1837 struct hci_dev *hdev = conn->hdev;
1838 struct hci_cp_le_setup_iso_path cmd;
1839
1840 memset(&cmd, 0, sizeof(cmd));
1841
1842 if (conn->iso_qos.out.sdu) {
1843 cmd.handle = cpu_to_le16(conn->handle);
1844 cmd.direction = 0x00; /* Input (Host to Controller) */
1845 cmd.path = 0x00; /* HCI path if enabled */
1846 cmd.codec = 0x03; /* Transparent Data */
1847
1848 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1849 &cmd) < 0)
1850 return false;
1851 }
1852
1853 if (conn->iso_qos.in.sdu) {
1854 cmd.handle = cpu_to_le16(conn->handle);
1855 cmd.direction = 0x01; /* Output (Controller to Host) */
1856 cmd.path = 0x00; /* HCI path if enabled */
1857 cmd.codec = 0x03; /* Transparent Data */
1858
1859 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1860 &cmd) < 0)
1861 return false;
1862 }
1863
1864 return true;
1865 }
1866
hci_create_cis_sync(struct hci_dev * hdev,void * data)1867 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1868 {
1869 struct {
1870 struct hci_cp_le_create_cis cp;
1871 struct hci_cis cis[0x1f];
1872 } cmd;
1873 struct hci_conn *conn = data;
1874 u8 cig;
1875
1876 memset(&cmd, 0, sizeof(cmd));
1877 cmd.cis[0].acl_handle = cpu_to_le16(conn->link->handle);
1878 cmd.cis[0].cis_handle = cpu_to_le16(conn->handle);
1879 cmd.cp.num_cis++;
1880 cig = conn->iso_qos.cig;
1881
1882 hci_dev_lock(hdev);
1883
1884 rcu_read_lock();
1885
1886 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
1887 struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis];
1888
1889 if (conn == data || conn->type != ISO_LINK ||
1890 conn->state == BT_CONNECTED || conn->iso_qos.cig != cig)
1891 continue;
1892
1893 /* Check if all CIS(s) belonging to a CIG are ready */
1894 if (!conn->link || conn->link->state != BT_CONNECTED ||
1895 conn->state != BT_CONNECT) {
1896 cmd.cp.num_cis = 0;
1897 break;
1898 }
1899
1900 /* Group all CIS with state BT_CONNECT since the spec don't
1901 * allow to send them individually:
1902 *
1903 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
1904 * page 2566:
1905 *
1906 * If the Host issues this command before all the
1907 * HCI_LE_CIS_Established events from the previous use of the
1908 * command have been generated, the Controller shall return the
1909 * error code Command Disallowed (0x0C).
1910 */
1911 cis->acl_handle = cpu_to_le16(conn->link->handle);
1912 cis->cis_handle = cpu_to_le16(conn->handle);
1913 cmd.cp.num_cis++;
1914 }
1915
1916 rcu_read_unlock();
1917
1918 hci_dev_unlock(hdev);
1919
1920 if (!cmd.cp.num_cis)
1921 return 0;
1922
1923 return hci_send_cmd(hdev, HCI_OP_LE_CREATE_CIS, sizeof(cmd.cp) +
1924 sizeof(cmd.cis[0]) * cmd.cp.num_cis, &cmd);
1925 }
1926
hci_le_create_cis(struct hci_conn * conn)1927 int hci_le_create_cis(struct hci_conn *conn)
1928 {
1929 struct hci_conn *cis;
1930 struct hci_dev *hdev = conn->hdev;
1931 int err;
1932
1933 switch (conn->type) {
1934 case LE_LINK:
1935 if (!conn->link || conn->state != BT_CONNECTED)
1936 return -EINVAL;
1937 cis = conn->link;
1938 break;
1939 case ISO_LINK:
1940 cis = conn;
1941 break;
1942 default:
1943 return -EINVAL;
1944 }
1945
1946 if (cis->state == BT_CONNECT)
1947 return 0;
1948
1949 /* Queue Create CIS */
1950 err = hci_cmd_sync_queue(hdev, hci_create_cis_sync, cis, NULL);
1951 if (err)
1952 return err;
1953
1954 cis->state = BT_CONNECT;
1955
1956 return 0;
1957 }
1958
hci_iso_qos_setup(struct hci_dev * hdev,struct hci_conn * conn,struct bt_iso_io_qos * qos,__u8 phy)1959 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
1960 struct bt_iso_io_qos *qos, __u8 phy)
1961 {
1962 /* Only set MTU if PHY is enabled */
1963 if (!qos->sdu && qos->phy) {
1964 if (hdev->iso_mtu > 0)
1965 qos->sdu = hdev->iso_mtu;
1966 else if (hdev->le_mtu > 0)
1967 qos->sdu = hdev->le_mtu;
1968 else
1969 qos->sdu = hdev->acl_mtu;
1970 }
1971
1972 /* Use the same PHY as ACL if set to any */
1973 if (qos->phy == BT_ISO_PHY_ANY)
1974 qos->phy = phy;
1975
1976 /* Use LE ACL connection interval if not set */
1977 if (!qos->interval)
1978 /* ACL interval unit in 1.25 ms to us */
1979 qos->interval = conn->le_conn_interval * 1250;
1980
1981 /* Use LE ACL connection latency if not set */
1982 if (!qos->latency)
1983 qos->latency = conn->le_conn_latency;
1984 }
1985
hci_bind_bis(struct hci_conn * conn,struct bt_iso_qos * qos)1986 static struct hci_conn *hci_bind_bis(struct hci_conn *conn,
1987 struct bt_iso_qos *qos)
1988 {
1989 /* Update LINK PHYs according to QoS preference */
1990 conn->le_tx_phy = qos->out.phy;
1991 conn->le_tx_phy = qos->out.phy;
1992 conn->iso_qos = *qos;
1993 conn->state = BT_BOUND;
1994
1995 return conn;
1996 }
1997
create_big_sync(struct hci_dev * hdev,void * data)1998 static int create_big_sync(struct hci_dev *hdev, void *data)
1999 {
2000 struct hci_conn *conn = data;
2001 struct bt_iso_qos *qos = &conn->iso_qos;
2002 u16 interval, sync_interval = 0;
2003 u32 flags = 0;
2004 int err;
2005
2006 if (qos->out.phy == 0x02)
2007 flags |= MGMT_ADV_FLAG_SEC_2M;
2008
2009 /* Align intervals */
2010 interval = qos->out.interval / 1250;
2011
2012 if (qos->bis)
2013 sync_interval = qos->sync_interval * 1600;
2014
2015 err = hci_start_per_adv_sync(hdev, qos->bis, conn->le_per_adv_data_len,
2016 conn->le_per_adv_data, flags, interval,
2017 interval, sync_interval);
2018 if (err)
2019 return err;
2020
2021 return hci_le_create_big(conn, &conn->iso_qos);
2022 }
2023
create_pa_complete(struct hci_dev * hdev,void * data,int err)2024 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2025 {
2026 struct hci_cp_le_pa_create_sync *cp = data;
2027
2028 bt_dev_dbg(hdev, "");
2029
2030 if (err)
2031 bt_dev_err(hdev, "Unable to create PA: %d", err);
2032
2033 kfree(cp);
2034 }
2035
create_pa_sync(struct hci_dev * hdev,void * data)2036 static int create_pa_sync(struct hci_dev *hdev, void *data)
2037 {
2038 struct hci_cp_le_pa_create_sync *cp = data;
2039 int err;
2040
2041 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2042 sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2043 if (err) {
2044 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2045 return err;
2046 }
2047
2048 return hci_update_passive_scan_sync(hdev);
2049 }
2050
hci_pa_create_sync(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,__u8 sid)2051 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2052 __u8 sid)
2053 {
2054 struct hci_cp_le_pa_create_sync *cp;
2055
2056 if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2057 return -EBUSY;
2058
2059 cp = kmalloc(sizeof(*cp), GFP_KERNEL);
2060 if (!cp) {
2061 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2062 return -ENOMEM;
2063 }
2064
2065 /* Convert from ISO socket address type to HCI address type */
2066 if (dst_type == BDADDR_LE_PUBLIC)
2067 dst_type = ADDR_LE_DEV_PUBLIC;
2068 else
2069 dst_type = ADDR_LE_DEV_RANDOM;
2070
2071 memset(cp, 0, sizeof(*cp));
2072 cp->sid = sid;
2073 cp->addr_type = dst_type;
2074 bacpy(&cp->addr, dst);
2075
2076 /* Queue start pa_create_sync and scan */
2077 return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2078 }
2079
hci_le_big_create_sync(struct hci_dev * hdev,struct bt_iso_qos * qos,__u16 sync_handle,__u8 num_bis,__u8 bis[])2080 int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos,
2081 __u16 sync_handle, __u8 num_bis, __u8 bis[])
2082 {
2083 struct _packed {
2084 struct hci_cp_le_big_create_sync cp;
2085 __u8 bis[0x11];
2086 } pdu;
2087 int err;
2088
2089 if (num_bis > sizeof(pdu.bis))
2090 return -EINVAL;
2091
2092 err = qos_set_big(hdev, qos);
2093 if (err)
2094 return err;
2095
2096 memset(&pdu, 0, sizeof(pdu));
2097 pdu.cp.handle = qos->big;
2098 pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2099 pdu.cp.num_bis = num_bis;
2100 memcpy(pdu.bis, bis, num_bis);
2101
2102 return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2103 sizeof(pdu.cp) + num_bis, &pdu);
2104 }
2105
create_big_complete(struct hci_dev * hdev,void * data,int err)2106 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2107 {
2108 struct hci_conn *conn = data;
2109
2110 bt_dev_dbg(hdev, "conn %p", conn);
2111
2112 if (err) {
2113 bt_dev_err(hdev, "Unable to create BIG: %d", err);
2114 hci_connect_cfm(conn, err);
2115 hci_conn_del(conn);
2116 }
2117 }
2118
hci_connect_bis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos,__u8 base_len,__u8 * base)2119 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2120 __u8 dst_type, struct bt_iso_qos *qos,
2121 __u8 base_len, __u8 *base)
2122 {
2123 struct hci_conn *conn;
2124 int err;
2125
2126 /* We need hci_conn object using the BDADDR_ANY as dst */
2127 conn = hci_add_bis(hdev, dst, qos);
2128 if (IS_ERR(conn))
2129 return conn;
2130
2131 conn = hci_bind_bis(conn, qos);
2132 if (!conn) {
2133 hci_conn_drop(conn);
2134 return ERR_PTR(-ENOMEM);
2135 }
2136
2137 /* Add Basic Announcement into Peridic Adv Data if BASE is set */
2138 if (base_len && base) {
2139 base_len = eir_append_service_data(conn->le_per_adv_data, 0,
2140 0x1851, base, base_len);
2141 conn->le_per_adv_data_len = base_len;
2142 }
2143
2144 /* Queue start periodic advertising and create BIG */
2145 err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2146 create_big_complete);
2147 if (err < 0) {
2148 hci_conn_drop(conn);
2149 return ERR_PTR(err);
2150 }
2151
2152 hci_iso_qos_setup(hdev, conn, &qos->out,
2153 conn->le_tx_phy ? conn->le_tx_phy :
2154 hdev->le_tx_def_phys);
2155
2156 return conn;
2157 }
2158
hci_connect_cis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos)2159 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2160 __u8 dst_type, struct bt_iso_qos *qos)
2161 {
2162 struct hci_conn *le;
2163 struct hci_conn *cis;
2164
2165 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2166 le = hci_connect_le(hdev, dst, dst_type, false,
2167 BT_SECURITY_LOW,
2168 HCI_LE_CONN_TIMEOUT,
2169 HCI_ROLE_SLAVE);
2170 else
2171 le = hci_connect_le_scan(hdev, dst, dst_type,
2172 BT_SECURITY_LOW,
2173 HCI_LE_CONN_TIMEOUT,
2174 CONN_REASON_ISO_CONNECT);
2175 if (IS_ERR(le))
2176 return le;
2177
2178 hci_iso_qos_setup(hdev, le, &qos->out,
2179 le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2180 hci_iso_qos_setup(hdev, le, &qos->in,
2181 le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2182
2183 cis = hci_bind_cis(hdev, dst, dst_type, qos);
2184 if (IS_ERR(cis)) {
2185 hci_conn_drop(le);
2186 return cis;
2187 }
2188
2189 le->link = cis;
2190 cis->link = le;
2191
2192 hci_conn_hold(cis);
2193
2194 /* If LE is already connected and CIS handle is already set proceed to
2195 * Create CIS immediately.
2196 */
2197 if (le->state == BT_CONNECTED && cis->handle != HCI_CONN_HANDLE_UNSET)
2198 hci_le_create_cis(le);
2199
2200 return cis;
2201 }
2202
2203 /* Check link security requirement */
hci_conn_check_link_mode(struct hci_conn * conn)2204 int hci_conn_check_link_mode(struct hci_conn *conn)
2205 {
2206 BT_DBG("hcon %p", conn);
2207
2208 /* In Secure Connections Only mode, it is required that Secure
2209 * Connections is used and the link is encrypted with AES-CCM
2210 * using a P-256 authenticated combination key.
2211 */
2212 if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2213 if (!hci_conn_sc_enabled(conn) ||
2214 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2215 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2216 return 0;
2217 }
2218
2219 /* AES encryption is required for Level 4:
2220 *
2221 * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2222 * page 1319:
2223 *
2224 * 128-bit equivalent strength for link and encryption keys
2225 * required using FIPS approved algorithms (E0 not allowed,
2226 * SAFER+ not allowed, and P-192 not allowed; encryption key
2227 * not shortened)
2228 */
2229 if (conn->sec_level == BT_SECURITY_FIPS &&
2230 !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2231 bt_dev_err(conn->hdev,
2232 "Invalid security: Missing AES-CCM usage");
2233 return 0;
2234 }
2235
2236 if (hci_conn_ssp_enabled(conn) &&
2237 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2238 return 0;
2239
2240 return 1;
2241 }
2242
2243 /* Authenticate remote device */
hci_conn_auth(struct hci_conn * conn,__u8 sec_level,__u8 auth_type)2244 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2245 {
2246 BT_DBG("hcon %p", conn);
2247
2248 if (conn->pending_sec_level > sec_level)
2249 sec_level = conn->pending_sec_level;
2250
2251 if (sec_level > conn->sec_level)
2252 conn->pending_sec_level = sec_level;
2253 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2254 return 1;
2255
2256 /* Make sure we preserve an existing MITM requirement*/
2257 auth_type |= (conn->auth_type & 0x01);
2258
2259 conn->auth_type = auth_type;
2260
2261 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2262 struct hci_cp_auth_requested cp;
2263
2264 cp.handle = cpu_to_le16(conn->handle);
2265 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2266 sizeof(cp), &cp);
2267
2268 /* If we're already encrypted set the REAUTH_PEND flag,
2269 * otherwise set the ENCRYPT_PEND.
2270 */
2271 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2272 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2273 else
2274 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2275 }
2276
2277 return 0;
2278 }
2279
2280 /* Encrypt the link */
hci_conn_encrypt(struct hci_conn * conn)2281 static void hci_conn_encrypt(struct hci_conn *conn)
2282 {
2283 BT_DBG("hcon %p", conn);
2284
2285 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2286 struct hci_cp_set_conn_encrypt cp;
2287 cp.handle = cpu_to_le16(conn->handle);
2288 cp.encrypt = 0x01;
2289 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2290 &cp);
2291 }
2292 }
2293
2294 /* Enable security */
hci_conn_security(struct hci_conn * conn,__u8 sec_level,__u8 auth_type,bool initiator)2295 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2296 bool initiator)
2297 {
2298 BT_DBG("hcon %p", conn);
2299
2300 if (conn->type == LE_LINK)
2301 return smp_conn_security(conn, sec_level);
2302
2303 /* For sdp we don't need the link key. */
2304 if (sec_level == BT_SECURITY_SDP)
2305 return 1;
2306
2307 /* For non 2.1 devices and low security level we don't need the link
2308 key. */
2309 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2310 return 1;
2311
2312 /* For other security levels we need the link key. */
2313 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2314 goto auth;
2315
2316 /* An authenticated FIPS approved combination key has sufficient
2317 * security for security level 4. */
2318 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
2319 sec_level == BT_SECURITY_FIPS)
2320 goto encrypt;
2321
2322 /* An authenticated combination key has sufficient security for
2323 security level 3. */
2324 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
2325 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
2326 sec_level == BT_SECURITY_HIGH)
2327 goto encrypt;
2328
2329 /* An unauthenticated combination key has sufficient security for
2330 security level 1 and 2. */
2331 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
2332 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
2333 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
2334 goto encrypt;
2335
2336 /* A combination key has always sufficient security for the security
2337 levels 1 or 2. High security level requires the combination key
2338 is generated using maximum PIN code length (16).
2339 For pre 2.1 units. */
2340 if (conn->key_type == HCI_LK_COMBINATION &&
2341 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
2342 conn->pin_length == 16))
2343 goto encrypt;
2344
2345 auth:
2346 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2347 return 0;
2348
2349 if (initiator)
2350 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2351
2352 if (!hci_conn_auth(conn, sec_level, auth_type))
2353 return 0;
2354
2355 encrypt:
2356 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2357 /* Ensure that the encryption key size has been read,
2358 * otherwise stall the upper layer responses.
2359 */
2360 if (!conn->enc_key_size)
2361 return 0;
2362
2363 /* Nothing else needed, all requirements are met */
2364 return 1;
2365 }
2366
2367 hci_conn_encrypt(conn);
2368 return 0;
2369 }
2370 EXPORT_SYMBOL(hci_conn_security);
2371
2372 /* Check secure link requirement */
hci_conn_check_secure(struct hci_conn * conn,__u8 sec_level)2373 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2374 {
2375 BT_DBG("hcon %p", conn);
2376
2377 /* Accept if non-secure or higher security level is required */
2378 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2379 return 1;
2380
2381 /* Accept if secure or higher security level is already present */
2382 if (conn->sec_level == BT_SECURITY_HIGH ||
2383 conn->sec_level == BT_SECURITY_FIPS)
2384 return 1;
2385
2386 /* Reject not secure link */
2387 return 0;
2388 }
2389 EXPORT_SYMBOL(hci_conn_check_secure);
2390
2391 /* Switch role */
hci_conn_switch_role(struct hci_conn * conn,__u8 role)2392 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2393 {
2394 BT_DBG("hcon %p", conn);
2395
2396 if (role == conn->role)
2397 return 1;
2398
2399 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2400 struct hci_cp_switch_role cp;
2401 bacpy(&cp.bdaddr, &conn->dst);
2402 cp.role = role;
2403 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2404 }
2405
2406 return 0;
2407 }
2408 EXPORT_SYMBOL(hci_conn_switch_role);
2409
2410 /* Enter active mode */
hci_conn_enter_active_mode(struct hci_conn * conn,__u8 force_active)2411 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2412 {
2413 struct hci_dev *hdev = conn->hdev;
2414
2415 BT_DBG("hcon %p mode %d", conn, conn->mode);
2416
2417 if (conn->mode != HCI_CM_SNIFF)
2418 goto timer;
2419
2420 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2421 goto timer;
2422
2423 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2424 struct hci_cp_exit_sniff_mode cp;
2425 cp.handle = cpu_to_le16(conn->handle);
2426 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2427 }
2428
2429 timer:
2430 if (hdev->idle_timeout > 0)
2431 queue_delayed_work(hdev->workqueue, &conn->idle_work,
2432 msecs_to_jiffies(hdev->idle_timeout));
2433 }
2434
2435 /* Drop all connection on the device */
hci_conn_hash_flush(struct hci_dev * hdev)2436 void hci_conn_hash_flush(struct hci_dev *hdev)
2437 {
2438 struct hci_conn_hash *h = &hdev->conn_hash;
2439 struct hci_conn *c, *n;
2440
2441 BT_DBG("hdev %s", hdev->name);
2442
2443 list_for_each_entry_safe(c, n, &h->list, list) {
2444 c->state = BT_CLOSED;
2445
2446 hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
2447 hci_conn_del(c);
2448 }
2449 }
2450
2451 /* Check pending connect attempts */
hci_conn_check_pending(struct hci_dev * hdev)2452 void hci_conn_check_pending(struct hci_dev *hdev)
2453 {
2454 struct hci_conn *conn;
2455
2456 BT_DBG("hdev %s", hdev->name);
2457
2458 hci_dev_lock(hdev);
2459
2460 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
2461 if (conn)
2462 hci_acl_create_connection(conn);
2463
2464 hci_dev_unlock(hdev);
2465 }
2466
get_link_mode(struct hci_conn * conn)2467 static u32 get_link_mode(struct hci_conn *conn)
2468 {
2469 u32 link_mode = 0;
2470
2471 if (conn->role == HCI_ROLE_MASTER)
2472 link_mode |= HCI_LM_MASTER;
2473
2474 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2475 link_mode |= HCI_LM_ENCRYPT;
2476
2477 if (test_bit(HCI_CONN_AUTH, &conn->flags))
2478 link_mode |= HCI_LM_AUTH;
2479
2480 if (test_bit(HCI_CONN_SECURE, &conn->flags))
2481 link_mode |= HCI_LM_SECURE;
2482
2483 if (test_bit(HCI_CONN_FIPS, &conn->flags))
2484 link_mode |= HCI_LM_FIPS;
2485
2486 return link_mode;
2487 }
2488
hci_get_conn_list(void __user * arg)2489 int hci_get_conn_list(void __user *arg)
2490 {
2491 struct hci_conn *c;
2492 struct hci_conn_list_req req, *cl;
2493 struct hci_conn_info *ci;
2494 struct hci_dev *hdev;
2495 int n = 0, size, err;
2496
2497 if (copy_from_user(&req, arg, sizeof(req)))
2498 return -EFAULT;
2499
2500 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2501 return -EINVAL;
2502
2503 size = sizeof(req) + req.conn_num * sizeof(*ci);
2504
2505 cl = kmalloc(size, GFP_KERNEL);
2506 if (!cl)
2507 return -ENOMEM;
2508
2509 hdev = hci_dev_get(req.dev_id);
2510 if (!hdev) {
2511 kfree(cl);
2512 return -ENODEV;
2513 }
2514
2515 ci = cl->conn_info;
2516
2517 hci_dev_lock(hdev);
2518 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2519 bacpy(&(ci + n)->bdaddr, &c->dst);
2520 (ci + n)->handle = c->handle;
2521 (ci + n)->type = c->type;
2522 (ci + n)->out = c->out;
2523 (ci + n)->state = c->state;
2524 (ci + n)->link_mode = get_link_mode(c);
2525 if (++n >= req.conn_num)
2526 break;
2527 }
2528 hci_dev_unlock(hdev);
2529
2530 cl->dev_id = hdev->id;
2531 cl->conn_num = n;
2532 size = sizeof(req) + n * sizeof(*ci);
2533
2534 hci_dev_put(hdev);
2535
2536 err = copy_to_user(arg, cl, size);
2537 kfree(cl);
2538
2539 return err ? -EFAULT : 0;
2540 }
2541
hci_get_conn_info(struct hci_dev * hdev,void __user * arg)2542 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2543 {
2544 struct hci_conn_info_req req;
2545 struct hci_conn_info ci;
2546 struct hci_conn *conn;
2547 char __user *ptr = arg + sizeof(req);
2548
2549 if (copy_from_user(&req, arg, sizeof(req)))
2550 return -EFAULT;
2551
2552 hci_dev_lock(hdev);
2553 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2554 if (conn) {
2555 bacpy(&ci.bdaddr, &conn->dst);
2556 ci.handle = conn->handle;
2557 ci.type = conn->type;
2558 ci.out = conn->out;
2559 ci.state = conn->state;
2560 ci.link_mode = get_link_mode(conn);
2561 }
2562 hci_dev_unlock(hdev);
2563
2564 if (!conn)
2565 return -ENOENT;
2566
2567 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2568 }
2569
hci_get_auth_info(struct hci_dev * hdev,void __user * arg)2570 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2571 {
2572 struct hci_auth_info_req req;
2573 struct hci_conn *conn;
2574
2575 if (copy_from_user(&req, arg, sizeof(req)))
2576 return -EFAULT;
2577
2578 hci_dev_lock(hdev);
2579 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2580 if (conn)
2581 req.type = conn->auth_type;
2582 hci_dev_unlock(hdev);
2583
2584 if (!conn)
2585 return -ENOENT;
2586
2587 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2588 }
2589
hci_chan_create(struct hci_conn * conn)2590 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2591 {
2592 struct hci_dev *hdev = conn->hdev;
2593 struct hci_chan *chan;
2594
2595 BT_DBG("%s hcon %p", hdev->name, conn);
2596
2597 if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2598 BT_DBG("Refusing to create new hci_chan");
2599 return NULL;
2600 }
2601
2602 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2603 if (!chan)
2604 return NULL;
2605
2606 chan->conn = hci_conn_get(conn);
2607 skb_queue_head_init(&chan->data_q);
2608 chan->state = BT_CONNECTED;
2609
2610 list_add_rcu(&chan->list, &conn->chan_list);
2611
2612 return chan;
2613 }
2614
hci_chan_del(struct hci_chan * chan)2615 void hci_chan_del(struct hci_chan *chan)
2616 {
2617 struct hci_conn *conn = chan->conn;
2618 struct hci_dev *hdev = conn->hdev;
2619
2620 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2621
2622 list_del_rcu(&chan->list);
2623
2624 synchronize_rcu();
2625
2626 /* Prevent new hci_chan's to be created for this hci_conn */
2627 set_bit(HCI_CONN_DROP, &conn->flags);
2628
2629 hci_conn_put(conn);
2630
2631 skb_queue_purge(&chan->data_q);
2632 kfree(chan);
2633 }
2634
hci_chan_list_flush(struct hci_conn * conn)2635 void hci_chan_list_flush(struct hci_conn *conn)
2636 {
2637 struct hci_chan *chan, *n;
2638
2639 BT_DBG("hcon %p", conn);
2640
2641 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2642 hci_chan_del(chan);
2643 }
2644
__hci_chan_lookup_handle(struct hci_conn * hcon,__u16 handle)2645 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2646 __u16 handle)
2647 {
2648 struct hci_chan *hchan;
2649
2650 list_for_each_entry(hchan, &hcon->chan_list, list) {
2651 if (hchan->handle == handle)
2652 return hchan;
2653 }
2654
2655 return NULL;
2656 }
2657
hci_chan_lookup_handle(struct hci_dev * hdev,__u16 handle)2658 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2659 {
2660 struct hci_conn_hash *h = &hdev->conn_hash;
2661 struct hci_conn *hcon;
2662 struct hci_chan *hchan = NULL;
2663
2664 rcu_read_lock();
2665
2666 list_for_each_entry_rcu(hcon, &h->list, list) {
2667 hchan = __hci_chan_lookup_handle(hcon, handle);
2668 if (hchan)
2669 break;
2670 }
2671
2672 rcu_read_unlock();
2673
2674 return hchan;
2675 }
2676
hci_conn_get_phy(struct hci_conn * conn)2677 u32 hci_conn_get_phy(struct hci_conn *conn)
2678 {
2679 u32 phys = 0;
2680
2681 /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2682 * Table 6.2: Packets defined for synchronous, asynchronous, and
2683 * CPB logical transport types.
2684 */
2685 switch (conn->type) {
2686 case SCO_LINK:
2687 /* SCO logical transport (1 Mb/s):
2688 * HV1, HV2, HV3 and DV.
2689 */
2690 phys |= BT_PHY_BR_1M_1SLOT;
2691
2692 break;
2693
2694 case ACL_LINK:
2695 /* ACL logical transport (1 Mb/s) ptt=0:
2696 * DH1, DM3, DH3, DM5 and DH5.
2697 */
2698 phys |= BT_PHY_BR_1M_1SLOT;
2699
2700 if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2701 phys |= BT_PHY_BR_1M_3SLOT;
2702
2703 if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2704 phys |= BT_PHY_BR_1M_5SLOT;
2705
2706 /* ACL logical transport (2 Mb/s) ptt=1:
2707 * 2-DH1, 2-DH3 and 2-DH5.
2708 */
2709 if (!(conn->pkt_type & HCI_2DH1))
2710 phys |= BT_PHY_EDR_2M_1SLOT;
2711
2712 if (!(conn->pkt_type & HCI_2DH3))
2713 phys |= BT_PHY_EDR_2M_3SLOT;
2714
2715 if (!(conn->pkt_type & HCI_2DH5))
2716 phys |= BT_PHY_EDR_2M_5SLOT;
2717
2718 /* ACL logical transport (3 Mb/s) ptt=1:
2719 * 3-DH1, 3-DH3 and 3-DH5.
2720 */
2721 if (!(conn->pkt_type & HCI_3DH1))
2722 phys |= BT_PHY_EDR_3M_1SLOT;
2723
2724 if (!(conn->pkt_type & HCI_3DH3))
2725 phys |= BT_PHY_EDR_3M_3SLOT;
2726
2727 if (!(conn->pkt_type & HCI_3DH5))
2728 phys |= BT_PHY_EDR_3M_5SLOT;
2729
2730 break;
2731
2732 case ESCO_LINK:
2733 /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2734 phys |= BT_PHY_BR_1M_1SLOT;
2735
2736 if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2737 phys |= BT_PHY_BR_1M_3SLOT;
2738
2739 /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2740 if (!(conn->pkt_type & ESCO_2EV3))
2741 phys |= BT_PHY_EDR_2M_1SLOT;
2742
2743 if (!(conn->pkt_type & ESCO_2EV5))
2744 phys |= BT_PHY_EDR_2M_3SLOT;
2745
2746 /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2747 if (!(conn->pkt_type & ESCO_3EV3))
2748 phys |= BT_PHY_EDR_3M_1SLOT;
2749
2750 if (!(conn->pkt_type & ESCO_3EV5))
2751 phys |= BT_PHY_EDR_3M_3SLOT;
2752
2753 break;
2754
2755 case LE_LINK:
2756 if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2757 phys |= BT_PHY_LE_1M_TX;
2758
2759 if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2760 phys |= BT_PHY_LE_1M_RX;
2761
2762 if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2763 phys |= BT_PHY_LE_2M_TX;
2764
2765 if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2766 phys |= BT_PHY_LE_2M_RX;
2767
2768 if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2769 phys |= BT_PHY_LE_CODED_TX;
2770
2771 if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2772 phys |= BT_PHY_LE_CODED_RX;
2773
2774 break;
2775 }
2776
2777 return phys;
2778 }
2779
hci_abort_conn(struct hci_conn * conn,u8 reason)2780 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2781 {
2782 int r = 0;
2783
2784 switch (conn->state) {
2785 case BT_CONNECTED:
2786 case BT_CONFIG:
2787 if (conn->type == AMP_LINK) {
2788 struct hci_cp_disconn_phy_link cp;
2789
2790 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2791 cp.reason = reason;
2792 r = hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
2793 sizeof(cp), &cp);
2794 } else {
2795 struct hci_cp_disconnect dc;
2796
2797 dc.handle = cpu_to_le16(conn->handle);
2798 dc.reason = reason;
2799 r = hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT,
2800 sizeof(dc), &dc);
2801 }
2802
2803 conn->state = BT_DISCONN;
2804
2805 break;
2806 case BT_CONNECT:
2807 if (conn->type == LE_LINK) {
2808 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2809 break;
2810 r = hci_send_cmd(conn->hdev,
2811 HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
2812 } else if (conn->type == ACL_LINK) {
2813 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
2814 break;
2815 r = hci_send_cmd(conn->hdev,
2816 HCI_OP_CREATE_CONN_CANCEL,
2817 6, &conn->dst);
2818 }
2819 break;
2820 case BT_CONNECT2:
2821 if (conn->type == ACL_LINK) {
2822 struct hci_cp_reject_conn_req rej;
2823
2824 bacpy(&rej.bdaddr, &conn->dst);
2825 rej.reason = reason;
2826
2827 r = hci_send_cmd(conn->hdev,
2828 HCI_OP_REJECT_CONN_REQ,
2829 sizeof(rej), &rej);
2830 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2831 struct hci_cp_reject_sync_conn_req rej;
2832
2833 bacpy(&rej.bdaddr, &conn->dst);
2834
2835 /* SCO rejection has its own limited set of
2836 * allowed error values (0x0D-0x0F) which isn't
2837 * compatible with most values passed to this
2838 * function. To be safe hard-code one of the
2839 * values that's suitable for SCO.
2840 */
2841 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2842
2843 r = hci_send_cmd(conn->hdev,
2844 HCI_OP_REJECT_SYNC_CONN_REQ,
2845 sizeof(rej), &rej);
2846 }
2847 break;
2848 default:
2849 conn->state = BT_CLOSED;
2850 break;
2851 }
2852
2853 return r;
2854 }
2855