2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/export.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/l2cap.h>
42 static const struct sco_param esco_param_cvsd[] = {
43 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */
44 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */
45 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */
46 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */
47 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */
50 static const struct sco_param sco_param_cvsd[] = {
51 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */
52 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */
55 static const struct sco_param esco_param_msbc[] = {
56 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */
57 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */
60 static void hci_le_create_connection_cancel(struct hci_conn *conn)
62 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
65 static void hci_acl_create_connection(struct hci_conn *conn)
67 struct hci_dev *hdev = conn->hdev;
68 struct inquiry_entry *ie;
69 struct hci_cp_create_conn cp;
71 BT_DBG("hcon %p", conn);
73 conn->state = BT_CONNECT;
75 conn->role = HCI_ROLE_MASTER;
79 conn->link_policy = hdev->link_policy;
81 memset(&cp, 0, sizeof(cp));
82 bacpy(&cp.bdaddr, &conn->dst);
83 cp.pscan_rep_mode = 0x02;
85 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
87 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
88 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
89 cp.pscan_mode = ie->data.pscan_mode;
90 cp.clock_offset = ie->data.clock_offset |
94 memcpy(conn->dev_class, ie->data.dev_class, 3);
95 if (ie->data.ssp_mode > 0)
96 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
99 cp.pkt_type = cpu_to_le16(conn->pkt_type);
100 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
101 cp.role_switch = 0x01;
103 cp.role_switch = 0x00;
105 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
108 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
110 struct hci_cp_create_conn_cancel cp;
112 BT_DBG("hcon %p", conn);
114 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
117 bacpy(&cp.bdaddr, &conn->dst);
118 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
121 static void hci_reject_sco(struct hci_conn *conn)
123 struct hci_cp_reject_sync_conn_req cp;
125 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
126 bacpy(&cp.bdaddr, &conn->dst);
128 hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
131 int hci_disconnect(struct hci_conn *conn, __u8 reason)
133 struct hci_cp_disconnect cp;
135 BT_DBG("hcon %p", conn);
137 /* When we are master of an established connection and it enters
138 * the disconnect timeout, then go ahead and try to read the
139 * current clock offset. Processing of the result is done
140 * within the event handling and hci_clock_offset_evt function.
142 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) {
143 struct hci_dev *hdev = conn->hdev;
144 struct hci_cp_read_clock_offset clkoff_cp;
146 clkoff_cp.handle = cpu_to_le16(conn->handle);
147 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
151 conn->state = BT_DISCONN;
153 cp.handle = cpu_to_le16(conn->handle);
155 return hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
158 static void hci_amp_disconn(struct hci_conn *conn)
160 struct hci_cp_disconn_phy_link cp;
162 BT_DBG("hcon %p", conn);
164 conn->state = BT_DISCONN;
166 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
167 cp.reason = hci_proto_disconn_ind(conn);
168 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
172 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
174 struct hci_dev *hdev = conn->hdev;
175 struct hci_cp_add_sco cp;
177 BT_DBG("hcon %p", conn);
179 conn->state = BT_CONNECT;
184 cp.handle = cpu_to_le16(handle);
185 cp.pkt_type = cpu_to_le16(conn->pkt_type);
187 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
190 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
192 struct hci_dev *hdev = conn->hdev;
193 struct hci_cp_setup_sync_conn cp;
194 const struct sco_param *param;
196 BT_DBG("hcon %p", conn);
198 conn->state = BT_CONNECT;
203 cp.handle = cpu_to_le16(handle);
205 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
206 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
207 cp.voice_setting = cpu_to_le16(conn->setting);
209 switch (conn->setting & SCO_AIRMODE_MASK) {
210 case SCO_AIRMODE_TRANSP:
211 if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
213 param = &esco_param_msbc[conn->attempt - 1];
215 case SCO_AIRMODE_CVSD:
216 if (lmp_esco_capable(conn->link)) {
217 if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
219 param = &esco_param_cvsd[conn->attempt - 1];
221 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
223 param = &sco_param_cvsd[conn->attempt - 1];
230 cp.retrans_effort = param->retrans_effort;
231 cp.pkt_type = __cpu_to_le16(param->pkt_type);
232 cp.max_latency = __cpu_to_le16(param->max_latency);
234 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
240 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
243 struct hci_dev *hdev = conn->hdev;
244 struct hci_conn_params *params;
245 struct hci_cp_le_conn_update cp;
249 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
251 params->conn_min_interval = min;
252 params->conn_max_interval = max;
253 params->conn_latency = latency;
254 params->supervision_timeout = to_multiplier;
257 hci_dev_unlock(hdev);
259 memset(&cp, 0, sizeof(cp));
260 cp.handle = cpu_to_le16(conn->handle);
261 cp.conn_interval_min = cpu_to_le16(min);
262 cp.conn_interval_max = cpu_to_le16(max);
263 cp.conn_latency = cpu_to_le16(latency);
264 cp.supervision_timeout = cpu_to_le16(to_multiplier);
265 cp.min_ce_len = cpu_to_le16(0x0000);
266 cp.max_ce_len = cpu_to_le16(0x0000);
268 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
276 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
279 struct hci_dev *hdev = conn->hdev;
280 struct hci_cp_le_start_enc cp;
282 BT_DBG("hcon %p", conn);
284 memset(&cp, 0, sizeof(cp));
286 cp.handle = cpu_to_le16(conn->handle);
289 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
291 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
294 /* Device _must_ be locked */
295 void hci_sco_setup(struct hci_conn *conn, __u8 status)
297 struct hci_conn *sco = conn->link;
302 BT_DBG("hcon %p", conn);
305 if (lmp_esco_capable(conn->hdev))
306 hci_setup_sync(sco, conn->handle);
308 hci_add_sco(sco, conn->handle);
310 hci_proto_connect_cfm(sco, status);
315 static void hci_conn_timeout(struct work_struct *work)
317 struct hci_conn *conn = container_of(work, struct hci_conn,
319 int refcnt = atomic_read(&conn->refcnt);
321 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
325 /* FIXME: It was observed that in pairing failed scenario, refcnt
326 * drops below 0. Probably this is because l2cap_conn_del calls
327 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
328 * dropped. After that loop hci_chan_del is called which also drops
329 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
335 switch (conn->state) {
339 if (conn->type == ACL_LINK)
340 hci_acl_create_connection_cancel(conn);
341 else if (conn->type == LE_LINK)
342 hci_le_create_connection_cancel(conn);
343 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
344 hci_reject_sco(conn);
349 if (conn->type == AMP_LINK) {
350 hci_amp_disconn(conn);
352 __u8 reason = hci_proto_disconn_ind(conn);
353 hci_disconnect(conn, reason);
357 conn->state = BT_CLOSED;
362 /* Enter sniff mode */
363 static void hci_conn_idle(struct work_struct *work)
365 struct hci_conn *conn = container_of(work, struct hci_conn,
367 struct hci_dev *hdev = conn->hdev;
369 BT_DBG("hcon %p mode %d", conn, conn->mode);
371 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
374 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
377 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
378 struct hci_cp_sniff_subrate cp;
379 cp.handle = cpu_to_le16(conn->handle);
380 cp.max_latency = cpu_to_le16(0);
381 cp.min_remote_timeout = cpu_to_le16(0);
382 cp.min_local_timeout = cpu_to_le16(0);
383 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
386 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
387 struct hci_cp_sniff_mode cp;
388 cp.handle = cpu_to_le16(conn->handle);
389 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
390 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
391 cp.attempt = cpu_to_le16(4);
392 cp.timeout = cpu_to_le16(1);
393 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
397 static void hci_conn_auto_accept(struct work_struct *work)
399 struct hci_conn *conn = container_of(work, struct hci_conn,
400 auto_accept_work.work);
402 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
406 static void le_conn_timeout(struct work_struct *work)
408 struct hci_conn *conn = container_of(work, struct hci_conn,
409 le_conn_timeout.work);
410 struct hci_dev *hdev = conn->hdev;
414 /* We could end up here due to having done directed advertising,
415 * so clean up the state if necessary. This should however only
416 * happen with broken hardware or if low duty cycle was used
417 * (which doesn't have a timeout of its own).
419 if (conn->role == HCI_ROLE_SLAVE) {
421 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
423 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
427 hci_le_create_connection_cancel(conn);
430 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
433 struct hci_conn *conn;
435 BT_DBG("%s dst %pMR", hdev->name, dst);
437 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
441 bacpy(&conn->dst, dst);
442 bacpy(&conn->src, &hdev->bdaddr);
446 conn->mode = HCI_CM_ACTIVE;
447 conn->state = BT_OPEN;
448 conn->auth_type = HCI_AT_GENERAL_BONDING;
449 conn->io_capability = hdev->io_capability;
450 conn->remote_auth = 0xff;
451 conn->key_type = 0xff;
452 conn->tx_power = HCI_TX_POWER_INVALID;
453 conn->max_tx_power = HCI_TX_POWER_INVALID;
455 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
456 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
458 if (conn->role == HCI_ROLE_MASTER)
463 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
466 /* conn->src should reflect the local identity address */
467 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
470 if (lmp_esco_capable(hdev))
471 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
472 (hdev->esco_type & EDR_ESCO_MASK);
474 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
477 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
481 skb_queue_head_init(&conn->data_q);
483 INIT_LIST_HEAD(&conn->chan_list);
485 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
486 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
487 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
488 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
490 atomic_set(&conn->refcnt, 0);
494 hci_conn_hash_add(hdev, conn);
496 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
498 hci_conn_init_sysfs(conn);
503 int hci_conn_del(struct hci_conn *conn)
505 struct hci_dev *hdev = conn->hdev;
507 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
509 cancel_delayed_work_sync(&conn->disc_work);
510 cancel_delayed_work_sync(&conn->auto_accept_work);
511 cancel_delayed_work_sync(&conn->idle_work);
513 if (conn->type == ACL_LINK) {
514 struct hci_conn *sco = conn->link;
519 hdev->acl_cnt += conn->sent;
520 } else if (conn->type == LE_LINK) {
521 cancel_delayed_work(&conn->le_conn_timeout);
524 hdev->le_cnt += conn->sent;
526 hdev->acl_cnt += conn->sent;
528 struct hci_conn *acl = conn->link;
535 hci_chan_list_flush(conn);
538 amp_mgr_put(conn->amp_mgr);
540 hci_conn_hash_del(hdev, conn);
542 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
544 skb_queue_purge(&conn->data_q);
546 hci_conn_del_sysfs(conn);
548 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
549 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
558 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
560 int use_src = bacmp(src, BDADDR_ANY);
561 struct hci_dev *hdev = NULL, *d;
563 BT_DBG("%pMR -> %pMR", src, dst);
565 read_lock(&hci_dev_list_lock);
567 list_for_each_entry(d, &hci_dev_list, list) {
568 if (!test_bit(HCI_UP, &d->flags) ||
569 test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
570 d->dev_type != HCI_BREDR)
574 * No source address - find interface with bdaddr != dst
575 * Source address - find interface with bdaddr == src
579 if (!bacmp(&d->bdaddr, src)) {
583 if (bacmp(&d->bdaddr, dst)) {
590 hdev = hci_dev_hold(hdev);
592 read_unlock(&hci_dev_list_lock);
595 EXPORT_SYMBOL(hci_get_route);
597 /* This function requires the caller holds hdev->lock */
598 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
600 struct hci_dev *hdev = conn->hdev;
601 struct hci_conn_params *params;
603 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
605 if (params && params->conn) {
606 hci_conn_drop(params->conn);
607 hci_conn_put(params->conn);
611 conn->state = BT_CLOSED;
613 mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
616 hci_proto_connect_cfm(conn, status);
620 /* Since we may have temporarily stopped the background scanning in
621 * favor of connection establishment, we should restart it.
623 hci_update_background_scan(hdev);
625 /* Re-enable advertising in case this was a failed connection
626 * attempt as a peripheral.
628 mgmt_reenable_advertising(hdev);
631 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
633 struct hci_conn *conn;
638 BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
643 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
647 hci_le_conn_failed(conn, status);
650 hci_dev_unlock(hdev);
653 static void hci_req_add_le_create_conn(struct hci_request *req,
654 struct hci_conn *conn)
656 struct hci_cp_le_create_conn cp;
657 struct hci_dev *hdev = conn->hdev;
660 memset(&cp, 0, sizeof(cp));
662 /* Update random address, but set require_privacy to false so
663 * that we never connect with an unresolvable address.
665 if (hci_update_random_address(req, false, &own_addr_type))
668 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
669 cp.scan_window = cpu_to_le16(hdev->le_scan_window);
670 bacpy(&cp.peer_addr, &conn->dst);
671 cp.peer_addr_type = conn->dst_type;
672 cp.own_address_type = own_addr_type;
673 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
674 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
675 cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
676 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
677 cp.min_ce_len = cpu_to_le16(0x0000);
678 cp.max_ce_len = cpu_to_le16(0x0000);
680 hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
682 conn->state = BT_CONNECT;
685 static void hci_req_directed_advertising(struct hci_request *req,
686 struct hci_conn *conn)
688 struct hci_dev *hdev = req->hdev;
689 struct hci_cp_le_set_adv_param cp;
693 /* Clear the HCI_LE_ADV bit temporarily so that the
694 * hci_update_random_address knows that it's safe to go ahead
695 * and write a new random address. The flag will be set back on
696 * as soon as the SET_ADV_ENABLE HCI command completes.
698 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
700 /* Set require_privacy to false so that the remote device has a
701 * chance of identifying us.
703 if (hci_update_random_address(req, false, &own_addr_type) < 0)
706 memset(&cp, 0, sizeof(cp));
707 cp.type = LE_ADV_DIRECT_IND;
708 cp.own_address_type = own_addr_type;
709 cp.direct_addr_type = conn->dst_type;
710 bacpy(&cp.direct_addr, &conn->dst);
711 cp.channel_map = hdev->le_adv_channel_map;
713 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
716 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
718 conn->state = BT_CONNECT;
721 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
722 u8 dst_type, u8 sec_level, u16 conn_timeout,
725 struct hci_conn_params *params;
726 struct hci_conn *conn;
728 struct hci_request req;
731 /* Some devices send ATT messages as soon as the physical link is
732 * established. To be able to handle these ATT messages, the user-
733 * space first establishes the connection and then starts the pairing
736 * So if a hci_conn object already exists for the following connection
737 * attempt, we simply update pending_sec_level and auth_type fields
738 * and return the object found.
740 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
742 conn->pending_sec_level = sec_level;
746 /* Since the controller supports only one LE connection attempt at a
747 * time, we return -EBUSY if there is any connection attempt running.
749 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
751 return ERR_PTR(-EBUSY);
753 /* When given an identity address with existing identity
754 * resolving key, the connection needs to be established
755 * to a resolvable random address.
757 * This uses the cached random resolvable address from
758 * a previous scan. When no cached address is available,
759 * try connecting to the identity address instead.
761 * Storing the resolvable random address is required here
762 * to handle connection failures. The address will later
763 * be resolved back into the original identity address
764 * from the connect request.
766 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
767 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
769 dst_type = ADDR_LE_DEV_RANDOM;
772 conn = hci_conn_add(hdev, LE_LINK, dst, role);
774 return ERR_PTR(-ENOMEM);
776 conn->dst_type = dst_type;
777 conn->sec_level = BT_SECURITY_LOW;
778 conn->pending_sec_level = sec_level;
779 conn->conn_timeout = conn_timeout;
781 hci_req_init(&req, hdev);
783 /* Disable advertising if we're active. For master role
784 * connections most controllers will refuse to connect if
785 * advertising is enabled, and for slave role connections we
786 * anyway have to disable it in order to start directed
789 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
791 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
795 /* If requested to connect as slave use directed advertising */
796 if (conn->role == HCI_ROLE_SLAVE) {
797 /* If we're active scanning most controllers are unable
798 * to initiate advertising. Simply reject the attempt.
800 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
801 hdev->le_scan_type == LE_SCAN_ACTIVE) {
802 skb_queue_purge(&req.cmd_q);
804 return ERR_PTR(-EBUSY);
807 hci_req_directed_advertising(&req, conn);
811 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
813 conn->le_conn_min_interval = params->conn_min_interval;
814 conn->le_conn_max_interval = params->conn_max_interval;
815 conn->le_conn_latency = params->conn_latency;
816 conn->le_supv_timeout = params->supervision_timeout;
818 conn->le_conn_min_interval = hdev->le_conn_min_interval;
819 conn->le_conn_max_interval = hdev->le_conn_max_interval;
820 conn->le_conn_latency = hdev->le_conn_latency;
821 conn->le_supv_timeout = hdev->le_supv_timeout;
824 /* If controller is scanning, we stop it since some controllers are
825 * not able to scan and connect at the same time. Also set the
826 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
827 * handler for scan disabling knows to set the correct discovery
830 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
831 hci_req_add_le_scan_disable(&req);
832 set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
835 hci_req_add_le_create_conn(&req, conn);
838 err = hci_req_run(&req, create_le_conn_complete);
849 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
850 u8 sec_level, u8 auth_type)
852 struct hci_conn *acl;
854 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
855 return ERR_PTR(-EOPNOTSUPP);
857 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
859 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
861 return ERR_PTR(-ENOMEM);
866 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
867 acl->sec_level = BT_SECURITY_LOW;
868 acl->pending_sec_level = sec_level;
869 acl->auth_type = auth_type;
870 hci_acl_create_connection(acl);
876 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
879 struct hci_conn *acl;
880 struct hci_conn *sco;
882 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
886 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
888 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
891 return ERR_PTR(-ENOMEM);
900 sco->setting = setting;
902 if (acl->state == BT_CONNECTED &&
903 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
904 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
905 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
907 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
908 /* defer SCO setup until mode change completed */
909 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
913 hci_sco_setup(acl, 0x00);
919 /* Check link security requirement */
920 int hci_conn_check_link_mode(struct hci_conn *conn)
922 BT_DBG("hcon %p", conn);
924 /* In Secure Connections Only mode, it is required that Secure
925 * Connections is used and the link is encrypted with AES-CCM
926 * using a P-256 authenticated combination key.
928 if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
929 if (!hci_conn_sc_enabled(conn) ||
930 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
931 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
935 if (hci_conn_ssp_enabled(conn) &&
936 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
942 /* Authenticate remote device */
943 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
945 BT_DBG("hcon %p", conn);
947 if (conn->pending_sec_level > sec_level)
948 sec_level = conn->pending_sec_level;
950 if (sec_level > conn->sec_level)
951 conn->pending_sec_level = sec_level;
952 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
955 /* Make sure we preserve an existing MITM requirement*/
956 auth_type |= (conn->auth_type & 0x01);
958 conn->auth_type = auth_type;
960 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
961 struct hci_cp_auth_requested cp;
963 cp.handle = cpu_to_le16(conn->handle);
964 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
967 /* If we're already encrypted set the REAUTH_PEND flag,
968 * otherwise set the ENCRYPT_PEND.
970 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
971 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
973 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
979 /* Encrypt the the link */
980 static void hci_conn_encrypt(struct hci_conn *conn)
982 BT_DBG("hcon %p", conn);
984 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
985 struct hci_cp_set_conn_encrypt cp;
986 cp.handle = cpu_to_le16(conn->handle);
988 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
993 /* Enable security */
994 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
997 BT_DBG("hcon %p", conn);
999 if (conn->type == LE_LINK)
1000 return smp_conn_security(conn, sec_level);
1002 /* For sdp we don't need the link key. */
1003 if (sec_level == BT_SECURITY_SDP)
1006 /* For non 2.1 devices and low security level we don't need the link
1008 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1011 /* For other security levels we need the link key. */
1012 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1015 /* An authenticated FIPS approved combination key has sufficient
1016 * security for security level 4. */
1017 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1018 sec_level == BT_SECURITY_FIPS)
1021 /* An authenticated combination key has sufficient security for
1022 security level 3. */
1023 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1024 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1025 sec_level == BT_SECURITY_HIGH)
1028 /* An unauthenticated combination key has sufficient security for
1029 security level 1 and 2. */
1030 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1031 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1032 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1035 /* A combination key has always sufficient security for the security
1036 levels 1 or 2. High security level requires the combination key
1037 is generated using maximum PIN code length (16).
1038 For pre 2.1 units. */
1039 if (conn->key_type == HCI_LK_COMBINATION &&
1040 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1041 conn->pin_length == 16))
1045 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1049 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1051 if (!hci_conn_auth(conn, sec_level, auth_type))
1055 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1058 hci_conn_encrypt(conn);
1061 EXPORT_SYMBOL(hci_conn_security);
1063 /* Check secure link requirement */
1064 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1066 BT_DBG("hcon %p", conn);
1068 /* Accept if non-secure or higher security level is required */
1069 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1072 /* Accept if secure or higher security level is already present */
1073 if (conn->sec_level == BT_SECURITY_HIGH ||
1074 conn->sec_level == BT_SECURITY_FIPS)
1077 /* Reject not secure link */
1080 EXPORT_SYMBOL(hci_conn_check_secure);
1082 /* Change link key */
1083 int hci_conn_change_link_key(struct hci_conn *conn)
1085 BT_DBG("hcon %p", conn);
1087 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1088 struct hci_cp_change_conn_link_key cp;
1089 cp.handle = cpu_to_le16(conn->handle);
1090 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
1098 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1100 BT_DBG("hcon %p", conn);
1102 if (role == conn->role)
1105 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1106 struct hci_cp_switch_role cp;
1107 bacpy(&cp.bdaddr, &conn->dst);
1109 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1114 EXPORT_SYMBOL(hci_conn_switch_role);
1116 /* Enter active mode */
1117 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1119 struct hci_dev *hdev = conn->hdev;
1121 BT_DBG("hcon %p mode %d", conn, conn->mode);
1123 if (conn->mode != HCI_CM_SNIFF)
1126 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1129 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1130 struct hci_cp_exit_sniff_mode cp;
1131 cp.handle = cpu_to_le16(conn->handle);
1132 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1136 if (hdev->idle_timeout > 0)
1137 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1138 msecs_to_jiffies(hdev->idle_timeout));
1141 /* Drop all connection on the device */
1142 void hci_conn_hash_flush(struct hci_dev *hdev)
1144 struct hci_conn_hash *h = &hdev->conn_hash;
1145 struct hci_conn *c, *n;
1147 BT_DBG("hdev %s", hdev->name);
1149 list_for_each_entry_safe(c, n, &h->list, list) {
1150 c->state = BT_CLOSED;
1152 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1157 /* Check pending connect attempts */
1158 void hci_conn_check_pending(struct hci_dev *hdev)
1160 struct hci_conn *conn;
1162 BT_DBG("hdev %s", hdev->name);
1166 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1168 hci_acl_create_connection(conn);
1170 hci_dev_unlock(hdev);
1173 static u32 get_link_mode(struct hci_conn *conn)
1177 if (conn->role == HCI_ROLE_MASTER)
1178 link_mode |= HCI_LM_MASTER;
1180 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1181 link_mode |= HCI_LM_ENCRYPT;
1183 if (test_bit(HCI_CONN_AUTH, &conn->flags))
1184 link_mode |= HCI_LM_AUTH;
1186 if (test_bit(HCI_CONN_SECURE, &conn->flags))
1187 link_mode |= HCI_LM_SECURE;
1189 if (test_bit(HCI_CONN_FIPS, &conn->flags))
1190 link_mode |= HCI_LM_FIPS;
1195 int hci_get_conn_list(void __user *arg)
1198 struct hci_conn_list_req req, *cl;
1199 struct hci_conn_info *ci;
1200 struct hci_dev *hdev;
1201 int n = 0, size, err;
1203 if (copy_from_user(&req, arg, sizeof(req)))
1206 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1209 size = sizeof(req) + req.conn_num * sizeof(*ci);
1211 cl = kmalloc(size, GFP_KERNEL);
1215 hdev = hci_dev_get(req.dev_id);
1224 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1225 bacpy(&(ci + n)->bdaddr, &c->dst);
1226 (ci + n)->handle = c->handle;
1227 (ci + n)->type = c->type;
1228 (ci + n)->out = c->out;
1229 (ci + n)->state = c->state;
1230 (ci + n)->link_mode = get_link_mode(c);
1231 if (++n >= req.conn_num)
1234 hci_dev_unlock(hdev);
1236 cl->dev_id = hdev->id;
1238 size = sizeof(req) + n * sizeof(*ci);
1242 err = copy_to_user(arg, cl, size);
1245 return err ? -EFAULT : 0;
1248 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1250 struct hci_conn_info_req req;
1251 struct hci_conn_info ci;
1252 struct hci_conn *conn;
1253 char __user *ptr = arg + sizeof(req);
1255 if (copy_from_user(&req, arg, sizeof(req)))
1259 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1261 bacpy(&ci.bdaddr, &conn->dst);
1262 ci.handle = conn->handle;
1263 ci.type = conn->type;
1265 ci.state = conn->state;
1266 ci.link_mode = get_link_mode(conn);
1268 hci_dev_unlock(hdev);
1273 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1276 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1278 struct hci_auth_info_req req;
1279 struct hci_conn *conn;
1281 if (copy_from_user(&req, arg, sizeof(req)))
1285 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1287 req.type = conn->auth_type;
1288 hci_dev_unlock(hdev);
1293 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1296 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1298 struct hci_dev *hdev = conn->hdev;
1299 struct hci_chan *chan;
1301 BT_DBG("%s hcon %p", hdev->name, conn);
1303 if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1304 BT_DBG("Refusing to create new hci_chan");
1308 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1312 chan->conn = hci_conn_get(conn);
1313 skb_queue_head_init(&chan->data_q);
1314 chan->state = BT_CONNECTED;
1316 list_add_rcu(&chan->list, &conn->chan_list);
1321 void hci_chan_del(struct hci_chan *chan)
1323 struct hci_conn *conn = chan->conn;
1324 struct hci_dev *hdev = conn->hdev;
1326 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1328 list_del_rcu(&chan->list);
1332 /* Prevent new hci_chan's to be created for this hci_conn */
1333 set_bit(HCI_CONN_DROP, &conn->flags);
1337 skb_queue_purge(&chan->data_q);
1341 void hci_chan_list_flush(struct hci_conn *conn)
1343 struct hci_chan *chan, *n;
1345 BT_DBG("hcon %p", conn);
1347 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1351 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1354 struct hci_chan *hchan;
1356 list_for_each_entry(hchan, &hcon->chan_list, list) {
1357 if (hchan->handle == handle)
1364 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1366 struct hci_conn_hash *h = &hdev->conn_hash;
1367 struct hci_conn *hcon;
1368 struct hci_chan *hchan = NULL;
1372 list_for_each_entry_rcu(hcon, &h->list, list) {
1373 hchan = __hci_chan_lookup_handle(hcon, handle);