1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/module.h>
28 
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <net/sock.h>
39 
40 #include <linux/uaccess.h>
41 #include <asm/unaligned.h>
42 
43 #include <net/bluetooth/bluetooth.h>
44 #include <net/bluetooth/hci_core.h>
45 #include <net/bluetooth/smp.h>
46 
hci_le_connect(struct hci_conn * conn)47 static void hci_le_connect(struct hci_conn *conn)
48 {
49 	struct hci_dev *hdev = conn->hdev;
50 	struct hci_cp_le_create_conn cp;
51 
52 	conn->state = BT_CONNECT;
53 	conn->out = true;
54 	conn->link_mode |= HCI_LM_MASTER;
55 	conn->sec_level = BT_SECURITY_LOW;
56 
57 	memset(&cp, 0, sizeof(cp));
58 	cp.scan_interval = cpu_to_le16(0x0060);
59 	cp.scan_window = cpu_to_le16(0x0030);
60 	bacpy(&cp.peer_addr, &conn->dst);
61 	cp.peer_addr_type = conn->dst_type;
62 	cp.conn_interval_min = cpu_to_le16(0x0028);
63 	cp.conn_interval_max = cpu_to_le16(0x0038);
64 	cp.supervision_timeout = cpu_to_le16(0x002a);
65 	cp.min_ce_len = cpu_to_le16(0x0000);
66 	cp.max_ce_len = cpu_to_le16(0x0000);
67 
68 	hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
69 }
70 
hci_le_connect_cancel(struct hci_conn * conn)71 static void hci_le_connect_cancel(struct hci_conn *conn)
72 {
73 	hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
74 }
75 
hci_acl_connect(struct hci_conn * conn)76 void hci_acl_connect(struct hci_conn *conn)
77 {
78 	struct hci_dev *hdev = conn->hdev;
79 	struct inquiry_entry *ie;
80 	struct hci_cp_create_conn cp;
81 
82 	BT_DBG("hcon %p", conn);
83 
84 	conn->state = BT_CONNECT;
85 	conn->out = true;
86 
87 	conn->link_mode = HCI_LM_MASTER;
88 
89 	conn->attempt++;
90 
91 	conn->link_policy = hdev->link_policy;
92 
93 	memset(&cp, 0, sizeof(cp));
94 	bacpy(&cp.bdaddr, &conn->dst);
95 	cp.pscan_rep_mode = 0x02;
96 
97 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
98 	if (ie) {
99 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
100 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
101 			cp.pscan_mode     = ie->data.pscan_mode;
102 			cp.clock_offset   = ie->data.clock_offset |
103 							cpu_to_le16(0x8000);
104 		}
105 
106 		memcpy(conn->dev_class, ie->data.dev_class, 3);
107 		if (ie->data.ssp_mode > 0)
108 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
109 	}
110 
111 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
112 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
113 		cp.role_switch = 0x01;
114 	else
115 		cp.role_switch = 0x00;
116 
117 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
118 }
119 
hci_acl_connect_cancel(struct hci_conn * conn)120 static void hci_acl_connect_cancel(struct hci_conn *conn)
121 {
122 	struct hci_cp_create_conn_cancel cp;
123 
124 	BT_DBG("%p", conn);
125 
126 	if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
127 		return;
128 
129 	bacpy(&cp.bdaddr, &conn->dst);
130 	hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
131 }
132 
hci_acl_disconn(struct hci_conn * conn,__u8 reason)133 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
134 {
135 	struct hci_cp_disconnect cp;
136 
137 	BT_DBG("%p", conn);
138 
139 	conn->state = BT_DISCONN;
140 
141 	cp.handle = cpu_to_le16(conn->handle);
142 	cp.reason = reason;
143 	hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
144 }
145 
hci_add_sco(struct hci_conn * conn,__u16 handle)146 void hci_add_sco(struct hci_conn *conn, __u16 handle)
147 {
148 	struct hci_dev *hdev = conn->hdev;
149 	struct hci_cp_add_sco cp;
150 
151 	BT_DBG("%p", conn);
152 
153 	conn->state = BT_CONNECT;
154 	conn->out = true;
155 
156 	conn->attempt++;
157 
158 	cp.handle   = cpu_to_le16(handle);
159 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
160 
161 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
162 }
163 
hci_setup_sync(struct hci_conn * conn,__u16 handle)164 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
165 {
166 	struct hci_dev *hdev = conn->hdev;
167 	struct hci_cp_setup_sync_conn cp;
168 
169 	BT_DBG("%p", conn);
170 
171 	conn->state = BT_CONNECT;
172 	conn->out = true;
173 
174 	conn->attempt++;
175 
176 	cp.handle   = cpu_to_le16(handle);
177 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
178 
179 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
180 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
181 	cp.max_latency    = cpu_to_le16(0xffff);
182 	cp.voice_setting  = cpu_to_le16(hdev->voice_setting);
183 	cp.retrans_effort = 0xff;
184 
185 	hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
186 }
187 
hci_le_conn_update(struct hci_conn * conn,u16 min,u16 max,u16 latency,u16 to_multiplier)188 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
189 					u16 latency, u16 to_multiplier)
190 {
191 	struct hci_cp_le_conn_update cp;
192 	struct hci_dev *hdev = conn->hdev;
193 
194 	memset(&cp, 0, sizeof(cp));
195 
196 	cp.handle		= cpu_to_le16(conn->handle);
197 	cp.conn_interval_min	= cpu_to_le16(min);
198 	cp.conn_interval_max	= cpu_to_le16(max);
199 	cp.conn_latency		= cpu_to_le16(latency);
200 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
201 	cp.min_ce_len		= cpu_to_le16(0x0001);
202 	cp.max_ce_len		= cpu_to_le16(0x0001);
203 
204 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
205 }
206 EXPORT_SYMBOL(hci_le_conn_update);
207 
hci_le_start_enc(struct hci_conn * conn,__le16 ediv,__u8 rand[8],__u8 ltk[16])208 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
209 							__u8 ltk[16])
210 {
211 	struct hci_dev *hdev = conn->hdev;
212 	struct hci_cp_le_start_enc cp;
213 
214 	BT_DBG("%p", conn);
215 
216 	memset(&cp, 0, sizeof(cp));
217 
218 	cp.handle = cpu_to_le16(conn->handle);
219 	memcpy(cp.ltk, ltk, sizeof(cp.ltk));
220 	cp.ediv = ediv;
221 	memcpy(cp.rand, rand, sizeof(cp.rand));
222 
223 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
224 }
225 EXPORT_SYMBOL(hci_le_start_enc);
226 
hci_le_ltk_neg_reply(struct hci_conn * conn)227 void hci_le_ltk_neg_reply(struct hci_conn *conn)
228 {
229 	struct hci_dev *hdev = conn->hdev;
230 	struct hci_cp_le_ltk_neg_reply cp;
231 
232 	BT_DBG("%p", conn);
233 
234 	memset(&cp, 0, sizeof(cp));
235 
236 	cp.handle = cpu_to_le16(conn->handle);
237 
238 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
239 }
240 
241 /* Device _must_ be locked */
hci_sco_setup(struct hci_conn * conn,__u8 status)242 void hci_sco_setup(struct hci_conn *conn, __u8 status)
243 {
244 	struct hci_conn *sco = conn->link;
245 
246 	BT_DBG("%p", conn);
247 
248 	if (!sco)
249 		return;
250 
251 	if (!status) {
252 		if (lmp_esco_capable(conn->hdev))
253 			hci_setup_sync(sco, conn->handle);
254 		else
255 			hci_add_sco(sco, conn->handle);
256 	} else {
257 		hci_proto_connect_cfm(sco, status);
258 		hci_conn_del(sco);
259 	}
260 }
261 
hci_conn_timeout(struct work_struct * work)262 static void hci_conn_timeout(struct work_struct *work)
263 {
264 	struct hci_conn *conn = container_of(work, struct hci_conn,
265 							disc_work.work);
266 	__u8 reason;
267 
268 	BT_DBG("conn %p state %s", conn, state_to_string(conn->state));
269 
270 	if (atomic_read(&conn->refcnt))
271 		return;
272 
273 	switch (conn->state) {
274 	case BT_CONNECT:
275 	case BT_CONNECT2:
276 		if (conn->out) {
277 			if (conn->type == ACL_LINK)
278 				hci_acl_connect_cancel(conn);
279 			else if (conn->type == LE_LINK)
280 				hci_le_connect_cancel(conn);
281 		}
282 		break;
283 	case BT_CONFIG:
284 	case BT_CONNECTED:
285 		reason = hci_proto_disconn_ind(conn);
286 		hci_acl_disconn(conn, reason);
287 		break;
288 	default:
289 		conn->state = BT_CLOSED;
290 		break;
291 	}
292 }
293 
294 /* Enter sniff mode */
hci_conn_enter_sniff_mode(struct hci_conn * conn)295 static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
296 {
297 	struct hci_dev *hdev = conn->hdev;
298 
299 	BT_DBG("conn %p mode %d", conn, conn->mode);
300 
301 	if (test_bit(HCI_RAW, &hdev->flags))
302 		return;
303 
304 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
305 		return;
306 
307 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
308 		return;
309 
310 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
311 		struct hci_cp_sniff_subrate cp;
312 		cp.handle             = cpu_to_le16(conn->handle);
313 		cp.max_latency        = cpu_to_le16(0);
314 		cp.min_remote_timeout = cpu_to_le16(0);
315 		cp.min_local_timeout  = cpu_to_le16(0);
316 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
317 	}
318 
319 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
320 		struct hci_cp_sniff_mode cp;
321 		cp.handle       = cpu_to_le16(conn->handle);
322 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
323 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
324 		cp.attempt      = cpu_to_le16(4);
325 		cp.timeout      = cpu_to_le16(1);
326 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
327 	}
328 }
329 
hci_conn_idle(unsigned long arg)330 static void hci_conn_idle(unsigned long arg)
331 {
332 	struct hci_conn *conn = (void *) arg;
333 
334 	BT_DBG("conn %p mode %d", conn, conn->mode);
335 
336 	hci_conn_enter_sniff_mode(conn);
337 }
338 
hci_conn_auto_accept(unsigned long arg)339 static void hci_conn_auto_accept(unsigned long arg)
340 {
341 	struct hci_conn *conn = (void *) arg;
342 	struct hci_dev *hdev = conn->hdev;
343 
344 	hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
345 								&conn->dst);
346 }
347 
hci_conn_add(struct hci_dev * hdev,int type,bdaddr_t * dst)348 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
349 {
350 	struct hci_conn *conn;
351 
352 	BT_DBG("%s dst %s", hdev->name, batostr(dst));
353 
354 	conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
355 	if (!conn)
356 		return NULL;
357 
358 	bacpy(&conn->dst, dst);
359 	conn->hdev  = hdev;
360 	conn->type  = type;
361 	conn->mode  = HCI_CM_ACTIVE;
362 	conn->state = BT_OPEN;
363 	conn->auth_type = HCI_AT_GENERAL_BONDING;
364 	conn->io_capability = hdev->io_capability;
365 	conn->remote_auth = 0xff;
366 	conn->key_type = 0xff;
367 
368 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
369 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
370 
371 	switch (type) {
372 	case ACL_LINK:
373 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
374 		break;
375 	case SCO_LINK:
376 		if (lmp_esco_capable(hdev))
377 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
378 					(hdev->esco_type & EDR_ESCO_MASK);
379 		else
380 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
381 		break;
382 	case ESCO_LINK:
383 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
384 		break;
385 	}
386 
387 	skb_queue_head_init(&conn->data_q);
388 
389 	INIT_LIST_HEAD(&conn->chan_list);
390 
391 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
392 	setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
393 	setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
394 							(unsigned long) conn);
395 
396 	atomic_set(&conn->refcnt, 0);
397 
398 	hci_dev_hold(hdev);
399 
400 	hci_conn_hash_add(hdev, conn);
401 	if (hdev->notify)
402 		hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
403 
404 	atomic_set(&conn->devref, 0);
405 
406 	hci_conn_init_sysfs(conn);
407 
408 	return conn;
409 }
410 
hci_conn_del(struct hci_conn * conn)411 int hci_conn_del(struct hci_conn *conn)
412 {
413 	struct hci_dev *hdev = conn->hdev;
414 
415 	BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
416 
417 	del_timer(&conn->idle_timer);
418 
419 	cancel_delayed_work_sync(&conn->disc_work);
420 
421 	del_timer(&conn->auto_accept_timer);
422 
423 	if (conn->type == ACL_LINK) {
424 		struct hci_conn *sco = conn->link;
425 		if (sco)
426 			sco->link = NULL;
427 
428 		/* Unacked frames */
429 		hdev->acl_cnt += conn->sent;
430 	} else if (conn->type == LE_LINK) {
431 		if (hdev->le_pkts)
432 			hdev->le_cnt += conn->sent;
433 		else
434 			hdev->acl_cnt += conn->sent;
435 	} else {
436 		struct hci_conn *acl = conn->link;
437 		if (acl) {
438 			acl->link = NULL;
439 			hci_conn_put(acl);
440 		}
441 	}
442 
443 
444 	hci_chan_list_flush(conn);
445 
446 	hci_conn_hash_del(hdev, conn);
447 	if (hdev->notify)
448 		hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
449 
450 	skb_queue_purge(&conn->data_q);
451 
452 	hci_conn_put_device(conn);
453 
454 	hci_dev_put(hdev);
455 
456 	if (conn->handle == 0)
457 		kfree(conn);
458 
459 	return 0;
460 }
461 
hci_get_route(bdaddr_t * dst,bdaddr_t * src)462 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
463 {
464 	int use_src = bacmp(src, BDADDR_ANY);
465 	struct hci_dev *hdev = NULL, *d;
466 
467 	BT_DBG("%s -> %s", batostr(src), batostr(dst));
468 
469 	read_lock(&hci_dev_list_lock);
470 
471 	list_for_each_entry(d, &hci_dev_list, list) {
472 		if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
473 			continue;
474 
475 		/* Simple routing:
476 		 *   No source address - find interface with bdaddr != dst
477 		 *   Source address    - find interface with bdaddr == src
478 		 */
479 
480 		if (use_src) {
481 			if (!bacmp(&d->bdaddr, src)) {
482 				hdev = d; break;
483 			}
484 		} else {
485 			if (bacmp(&d->bdaddr, dst)) {
486 				hdev = d; break;
487 			}
488 		}
489 	}
490 
491 	if (hdev)
492 		hdev = hci_dev_hold(hdev);
493 
494 	read_unlock(&hci_dev_list_lock);
495 	return hdev;
496 }
497 EXPORT_SYMBOL(hci_get_route);
498 
499 /* Create SCO, ACL or LE connection.
500  * Device _must_ be locked */
hci_connect(struct hci_dev * hdev,int type,bdaddr_t * dst,__u8 sec_level,__u8 auth_type)501 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
502 {
503 	struct hci_conn *acl;
504 	struct hci_conn *sco;
505 	struct hci_conn *le;
506 
507 	BT_DBG("%s dst %s", hdev->name, batostr(dst));
508 
509 	if (type == LE_LINK) {
510 		struct adv_entry *entry;
511 
512 		le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
513 		if (le)
514 			return ERR_PTR(-EBUSY);
515 
516 		entry = hci_find_adv_entry(hdev, dst);
517 		if (!entry)
518 			return ERR_PTR(-EHOSTUNREACH);
519 
520 		le = hci_conn_add(hdev, LE_LINK, dst);
521 		if (!le)
522 			return ERR_PTR(-ENOMEM);
523 
524 		le->dst_type = entry->bdaddr_type;
525 
526 		hci_le_connect(le);
527 
528 		hci_conn_hold(le);
529 
530 		return le;
531 	}
532 
533 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
534 	if (!acl) {
535 		acl = hci_conn_add(hdev, ACL_LINK, dst);
536 		if (!acl)
537 			return ERR_PTR(-ENOMEM);
538 	}
539 
540 	hci_conn_hold(acl);
541 
542 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
543 		acl->sec_level = BT_SECURITY_LOW;
544 		acl->pending_sec_level = sec_level;
545 		acl->auth_type = auth_type;
546 		hci_acl_connect(acl);
547 	}
548 
549 	if (type == ACL_LINK)
550 		return acl;
551 
552 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
553 	if (!sco) {
554 		sco = hci_conn_add(hdev, type, dst);
555 		if (!sco) {
556 			hci_conn_put(acl);
557 			return ERR_PTR(-ENOMEM);
558 		}
559 	}
560 
561 	acl->link = sco;
562 	sco->link = acl;
563 
564 	hci_conn_hold(sco);
565 
566 	if (acl->state == BT_CONNECTED &&
567 			(sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
568 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
569 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
570 
571 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
572 			/* defer SCO setup until mode change completed */
573 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
574 			return sco;
575 		}
576 
577 		hci_sco_setup(acl, 0x00);
578 	}
579 
580 	return sco;
581 }
582 EXPORT_SYMBOL(hci_connect);
583 
584 /* Check link security requirement */
hci_conn_check_link_mode(struct hci_conn * conn)585 int hci_conn_check_link_mode(struct hci_conn *conn)
586 {
587 	BT_DBG("conn %p", conn);
588 
589 	if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
590 		return 0;
591 
592 	return 1;
593 }
594 EXPORT_SYMBOL(hci_conn_check_link_mode);
595 
596 /* Authenticate remote device */
hci_conn_auth(struct hci_conn * conn,__u8 sec_level,__u8 auth_type)597 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
598 {
599 	BT_DBG("conn %p", conn);
600 
601 	if (conn->pending_sec_level > sec_level)
602 		sec_level = conn->pending_sec_level;
603 
604 	if (sec_level > conn->sec_level)
605 		conn->pending_sec_level = sec_level;
606 	else if (conn->link_mode & HCI_LM_AUTH)
607 		return 1;
608 
609 	/* Make sure we preserve an existing MITM requirement*/
610 	auth_type |= (conn->auth_type & 0x01);
611 
612 	conn->auth_type = auth_type;
613 
614 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
615 		struct hci_cp_auth_requested cp;
616 
617 		/* encrypt must be pending if auth is also pending */
618 		set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
619 
620 		cp.handle = cpu_to_le16(conn->handle);
621 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
622 							sizeof(cp), &cp);
623 		if (conn->key_type != 0xff)
624 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
625 	}
626 
627 	return 0;
628 }
629 
630 /* Encrypt the the link */
hci_conn_encrypt(struct hci_conn * conn)631 static void hci_conn_encrypt(struct hci_conn *conn)
632 {
633 	BT_DBG("conn %p", conn);
634 
635 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
636 		struct hci_cp_set_conn_encrypt cp;
637 		cp.handle  = cpu_to_le16(conn->handle);
638 		cp.encrypt = 0x01;
639 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
640 									&cp);
641 	}
642 }
643 
644 /* Enable security */
hci_conn_security(struct hci_conn * conn,__u8 sec_level,__u8 auth_type)645 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
646 {
647 	BT_DBG("conn %p", conn);
648 
649 	if (conn->type == LE_LINK)
650 		return smp_conn_security(conn, sec_level);
651 
652 	/* For sdp we don't need the link key. */
653 	if (sec_level == BT_SECURITY_SDP)
654 		return 1;
655 
656 	/* For non 2.1 devices and low security level we don't need the link
657 	   key. */
658 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
659 		return 1;
660 
661 	/* For other security levels we need the link key. */
662 	if (!(conn->link_mode & HCI_LM_AUTH))
663 		goto auth;
664 
665 	/* An authenticated combination key has sufficient security for any
666 	   security level. */
667 	if (conn->key_type == HCI_LK_AUTH_COMBINATION)
668 		goto encrypt;
669 
670 	/* An unauthenticated combination key has sufficient security for
671 	   security level 1 and 2. */
672 	if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
673 			(sec_level == BT_SECURITY_MEDIUM ||
674 			sec_level == BT_SECURITY_LOW))
675 		goto encrypt;
676 
677 	/* A combination key has always sufficient security for the security
678 	   levels 1 or 2. High security level requires the combination key
679 	   is generated using maximum PIN code length (16).
680 	   For pre 2.1 units. */
681 	if (conn->key_type == HCI_LK_COMBINATION &&
682 			(sec_level != BT_SECURITY_HIGH ||
683 			conn->pin_length == 16))
684 		goto encrypt;
685 
686 auth:
687 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
688 		return 0;
689 
690 	if (!hci_conn_auth(conn, sec_level, auth_type))
691 		return 0;
692 
693 encrypt:
694 	if (conn->link_mode & HCI_LM_ENCRYPT)
695 		return 1;
696 
697 	hci_conn_encrypt(conn);
698 	return 0;
699 }
700 EXPORT_SYMBOL(hci_conn_security);
701 
702 /* Check secure link requirement */
hci_conn_check_secure(struct hci_conn * conn,__u8 sec_level)703 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
704 {
705 	BT_DBG("conn %p", conn);
706 
707 	if (sec_level != BT_SECURITY_HIGH)
708 		return 1; /* Accept if non-secure is required */
709 
710 	if (conn->sec_level == BT_SECURITY_HIGH)
711 		return 1;
712 
713 	return 0; /* Reject not secure link */
714 }
715 EXPORT_SYMBOL(hci_conn_check_secure);
716 
717 /* Change link key */
hci_conn_change_link_key(struct hci_conn * conn)718 int hci_conn_change_link_key(struct hci_conn *conn)
719 {
720 	BT_DBG("conn %p", conn);
721 
722 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
723 		struct hci_cp_change_conn_link_key cp;
724 		cp.handle = cpu_to_le16(conn->handle);
725 		hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
726 							sizeof(cp), &cp);
727 	}
728 
729 	return 0;
730 }
731 EXPORT_SYMBOL(hci_conn_change_link_key);
732 
733 /* Switch role */
hci_conn_switch_role(struct hci_conn * conn,__u8 role)734 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
735 {
736 	BT_DBG("conn %p", conn);
737 
738 	if (!role && conn->link_mode & HCI_LM_MASTER)
739 		return 1;
740 
741 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
742 		struct hci_cp_switch_role cp;
743 		bacpy(&cp.bdaddr, &conn->dst);
744 		cp.role = role;
745 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
746 	}
747 
748 	return 0;
749 }
750 EXPORT_SYMBOL(hci_conn_switch_role);
751 
752 /* Enter active mode */
hci_conn_enter_active_mode(struct hci_conn * conn,__u8 force_active)753 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
754 {
755 	struct hci_dev *hdev = conn->hdev;
756 
757 	BT_DBG("conn %p mode %d", conn, conn->mode);
758 
759 	if (test_bit(HCI_RAW, &hdev->flags))
760 		return;
761 
762 	if (conn->mode != HCI_CM_SNIFF)
763 		goto timer;
764 
765 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
766 		goto timer;
767 
768 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
769 		struct hci_cp_exit_sniff_mode cp;
770 		cp.handle = cpu_to_le16(conn->handle);
771 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
772 	}
773 
774 timer:
775 	if (hdev->idle_timeout > 0)
776 		mod_timer(&conn->idle_timer,
777 			jiffies + msecs_to_jiffies(hdev->idle_timeout));
778 }
779 
780 /* Drop all connection on the device */
hci_conn_hash_flush(struct hci_dev * hdev)781 void hci_conn_hash_flush(struct hci_dev *hdev)
782 {
783 	struct hci_conn_hash *h = &hdev->conn_hash;
784 	struct hci_conn *c, *n;
785 
786 	BT_DBG("hdev %s", hdev->name);
787 
788 	list_for_each_entry_safe(c, n, &h->list, list) {
789 		c->state = BT_CLOSED;
790 
791 		hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
792 		hci_conn_del(c);
793 	}
794 }
795 
796 /* Check pending connect attempts */
hci_conn_check_pending(struct hci_dev * hdev)797 void hci_conn_check_pending(struct hci_dev *hdev)
798 {
799 	struct hci_conn *conn;
800 
801 	BT_DBG("hdev %s", hdev->name);
802 
803 	hci_dev_lock(hdev);
804 
805 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
806 	if (conn)
807 		hci_acl_connect(conn);
808 
809 	hci_dev_unlock(hdev);
810 }
811 
hci_conn_hold_device(struct hci_conn * conn)812 void hci_conn_hold_device(struct hci_conn *conn)
813 {
814 	atomic_inc(&conn->devref);
815 }
816 EXPORT_SYMBOL(hci_conn_hold_device);
817 
hci_conn_put_device(struct hci_conn * conn)818 void hci_conn_put_device(struct hci_conn *conn)
819 {
820 	if (atomic_dec_and_test(&conn->devref))
821 		hci_conn_del_sysfs(conn);
822 }
823 EXPORT_SYMBOL(hci_conn_put_device);
824 
hci_get_conn_list(void __user * arg)825 int hci_get_conn_list(void __user *arg)
826 {
827 	register struct hci_conn *c;
828 	struct hci_conn_list_req req, *cl;
829 	struct hci_conn_info *ci;
830 	struct hci_dev *hdev;
831 	int n = 0, size, err;
832 
833 	if (copy_from_user(&req, arg, sizeof(req)))
834 		return -EFAULT;
835 
836 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
837 		return -EINVAL;
838 
839 	size = sizeof(req) + req.conn_num * sizeof(*ci);
840 
841 	cl = kmalloc(size, GFP_KERNEL);
842 	if (!cl)
843 		return -ENOMEM;
844 
845 	hdev = hci_dev_get(req.dev_id);
846 	if (!hdev) {
847 		kfree(cl);
848 		return -ENODEV;
849 	}
850 
851 	ci = cl->conn_info;
852 
853 	hci_dev_lock(hdev);
854 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
855 		bacpy(&(ci + n)->bdaddr, &c->dst);
856 		(ci + n)->handle = c->handle;
857 		(ci + n)->type  = c->type;
858 		(ci + n)->out   = c->out;
859 		(ci + n)->state = c->state;
860 		(ci + n)->link_mode = c->link_mode;
861 		if (++n >= req.conn_num)
862 			break;
863 	}
864 	hci_dev_unlock(hdev);
865 
866 	cl->dev_id = hdev->id;
867 	cl->conn_num = n;
868 	size = sizeof(req) + n * sizeof(*ci);
869 
870 	hci_dev_put(hdev);
871 
872 	err = copy_to_user(arg, cl, size);
873 	kfree(cl);
874 
875 	return err ? -EFAULT : 0;
876 }
877 
hci_get_conn_info(struct hci_dev * hdev,void __user * arg)878 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
879 {
880 	struct hci_conn_info_req req;
881 	struct hci_conn_info ci;
882 	struct hci_conn *conn;
883 	char __user *ptr = arg + sizeof(req);
884 
885 	if (copy_from_user(&req, arg, sizeof(req)))
886 		return -EFAULT;
887 
888 	hci_dev_lock(hdev);
889 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
890 	if (conn) {
891 		bacpy(&ci.bdaddr, &conn->dst);
892 		ci.handle = conn->handle;
893 		ci.type  = conn->type;
894 		ci.out   = conn->out;
895 		ci.state = conn->state;
896 		ci.link_mode = conn->link_mode;
897 	}
898 	hci_dev_unlock(hdev);
899 
900 	if (!conn)
901 		return -ENOENT;
902 
903 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
904 }
905 
hci_get_auth_info(struct hci_dev * hdev,void __user * arg)906 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
907 {
908 	struct hci_auth_info_req req;
909 	struct hci_conn *conn;
910 
911 	if (copy_from_user(&req, arg, sizeof(req)))
912 		return -EFAULT;
913 
914 	hci_dev_lock(hdev);
915 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
916 	if (conn)
917 		req.type = conn->auth_type;
918 	hci_dev_unlock(hdev);
919 
920 	if (!conn)
921 		return -ENOENT;
922 
923 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
924 }
925 
hci_chan_create(struct hci_conn * conn)926 struct hci_chan *hci_chan_create(struct hci_conn *conn)
927 {
928 	struct hci_dev *hdev = conn->hdev;
929 	struct hci_chan *chan;
930 
931 	BT_DBG("%s conn %p", hdev->name, conn);
932 
933 	chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
934 	if (!chan)
935 		return NULL;
936 
937 	chan->conn = conn;
938 	skb_queue_head_init(&chan->data_q);
939 
940 	list_add_rcu(&chan->list, &conn->chan_list);
941 
942 	return chan;
943 }
944 
hci_chan_del(struct hci_chan * chan)945 int hci_chan_del(struct hci_chan *chan)
946 {
947 	struct hci_conn *conn = chan->conn;
948 	struct hci_dev *hdev = conn->hdev;
949 
950 	BT_DBG("%s conn %p chan %p", hdev->name, conn, chan);
951 
952 	list_del_rcu(&chan->list);
953 
954 	synchronize_rcu();
955 
956 	skb_queue_purge(&chan->data_q);
957 	kfree(chan);
958 
959 	return 0;
960 }
961 
hci_chan_list_flush(struct hci_conn * conn)962 void hci_chan_list_flush(struct hci_conn *conn)
963 {
964 	struct hci_chan *chan, *n;
965 
966 	BT_DBG("conn %p", conn);
967 
968 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
969 		hci_chan_del(chan);
970 }
971