2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
46 __u8 status = *((__u8 *) skb->data);
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
58 /* Set discovery state to stopped if we're not doing LE active
61 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
62 hdev->le_scan_type != LE_SCAN_ACTIVE)
63 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
66 hci_conn_check_pending(hdev);
69 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
71 __u8 status = *((__u8 *) skb->data);
73 BT_DBG("%s status 0x%2.2x", hdev->name, status);
78 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
81 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
83 __u8 status = *((__u8 *) skb->data);
85 BT_DBG("%s status 0x%2.2x", hdev->name, status);
90 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
92 hci_conn_check_pending(hdev);
95 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
98 BT_DBG("%s", hdev->name);
101 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
103 struct hci_rp_role_discovery *rp = (void *) skb->data;
104 struct hci_conn *conn;
106 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
113 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
115 conn->role = rp->role;
117 hci_dev_unlock(hdev);
120 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
122 struct hci_rp_read_link_policy *rp = (void *) skb->data;
123 struct hci_conn *conn;
125 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
134 conn->link_policy = __le16_to_cpu(rp->policy);
136 hci_dev_unlock(hdev);
139 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
141 struct hci_rp_write_link_policy *rp = (void *) skb->data;
142 struct hci_conn *conn;
145 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
150 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
156 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
158 conn->link_policy = get_unaligned_le16(sent + 2);
160 hci_dev_unlock(hdev);
163 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
166 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
168 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
173 hdev->link_policy = __le16_to_cpu(rp->policy);
176 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
179 __u8 status = *((__u8 *) skb->data);
182 BT_DBG("%s status 0x%2.2x", hdev->name, status);
187 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
191 hdev->link_policy = get_unaligned_le16(sent);
194 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
196 __u8 status = *((__u8 *) skb->data);
198 BT_DBG("%s status 0x%2.2x", hdev->name, status);
200 clear_bit(HCI_RESET, &hdev->flags);
205 /* Reset all non-persistent flags */
206 hci_dev_clear_volatile_flags(hdev);
208 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
210 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
211 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
213 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
214 hdev->adv_data_len = 0;
216 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
217 hdev->scan_rsp_data_len = 0;
219 hdev->le_scan_type = LE_SCAN_PASSIVE;
221 hdev->ssp_debug_mode = 0;
223 hci_bdaddr_list_clear(&hdev->le_white_list);
224 hci_bdaddr_list_clear(&hdev->le_resolv_list);
227 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
230 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
231 struct hci_cp_read_stored_link_key *sent;
233 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
235 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
239 if (!rp->status && sent->read_all == 0x01) {
240 hdev->stored_max_keys = rp->max_keys;
241 hdev->stored_num_keys = rp->num_keys;
245 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
248 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
250 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
255 if (rp->num_keys <= hdev->stored_num_keys)
256 hdev->stored_num_keys -= rp->num_keys;
258 hdev->stored_num_keys = 0;
261 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
263 __u8 status = *((__u8 *) skb->data);
266 BT_DBG("%s status 0x%2.2x", hdev->name, status);
268 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
274 if (hci_dev_test_flag(hdev, HCI_MGMT))
275 mgmt_set_local_name_complete(hdev, sent, status);
277 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
279 hci_dev_unlock(hdev);
282 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
284 struct hci_rp_read_local_name *rp = (void *) skb->data;
286 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
291 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
292 hci_dev_test_flag(hdev, HCI_CONFIG))
293 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
296 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
298 __u8 status = *((__u8 *) skb->data);
301 BT_DBG("%s status 0x%2.2x", hdev->name, status);
303 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
310 __u8 param = *((__u8 *) sent);
312 if (param == AUTH_ENABLED)
313 set_bit(HCI_AUTH, &hdev->flags);
315 clear_bit(HCI_AUTH, &hdev->flags);
318 if (hci_dev_test_flag(hdev, HCI_MGMT))
319 mgmt_auth_enable_complete(hdev, status);
321 hci_dev_unlock(hdev);
324 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
326 __u8 status = *((__u8 *) skb->data);
330 BT_DBG("%s status 0x%2.2x", hdev->name, status);
335 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
339 param = *((__u8 *) sent);
342 set_bit(HCI_ENCRYPT, &hdev->flags);
344 clear_bit(HCI_ENCRYPT, &hdev->flags);
347 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
349 __u8 status = *((__u8 *) skb->data);
353 BT_DBG("%s status 0x%2.2x", hdev->name, status);
355 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
359 param = *((__u8 *) sent);
364 hdev->discov_timeout = 0;
368 if (param & SCAN_INQUIRY)
369 set_bit(HCI_ISCAN, &hdev->flags);
371 clear_bit(HCI_ISCAN, &hdev->flags);
373 if (param & SCAN_PAGE)
374 set_bit(HCI_PSCAN, &hdev->flags);
376 clear_bit(HCI_PSCAN, &hdev->flags);
379 hci_dev_unlock(hdev);
382 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
384 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
386 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
391 memcpy(hdev->dev_class, rp->dev_class, 3);
393 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
394 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
397 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
399 __u8 status = *((__u8 *) skb->data);
402 BT_DBG("%s status 0x%2.2x", hdev->name, status);
404 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
411 memcpy(hdev->dev_class, sent, 3);
413 if (hci_dev_test_flag(hdev, HCI_MGMT))
414 mgmt_set_class_of_dev_complete(hdev, sent, status);
416 hci_dev_unlock(hdev);
419 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
421 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
424 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
429 setting = __le16_to_cpu(rp->voice_setting);
431 if (hdev->voice_setting == setting)
434 hdev->voice_setting = setting;
436 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
439 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
442 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
445 __u8 status = *((__u8 *) skb->data);
449 BT_DBG("%s status 0x%2.2x", hdev->name, status);
454 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
458 setting = get_unaligned_le16(sent);
460 if (hdev->voice_setting == setting)
463 hdev->voice_setting = setting;
465 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
468 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
471 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
474 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
476 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
481 hdev->num_iac = rp->num_iac;
483 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
486 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
488 __u8 status = *((__u8 *) skb->data);
489 struct hci_cp_write_ssp_mode *sent;
491 BT_DBG("%s status 0x%2.2x", hdev->name, status);
493 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
501 hdev->features[1][0] |= LMP_HOST_SSP;
503 hdev->features[1][0] &= ~LMP_HOST_SSP;
506 if (hci_dev_test_flag(hdev, HCI_MGMT))
507 mgmt_ssp_enable_complete(hdev, sent->mode, status);
510 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
512 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
515 hci_dev_unlock(hdev);
518 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
520 u8 status = *((u8 *) skb->data);
521 struct hci_cp_write_sc_support *sent;
523 BT_DBG("%s status 0x%2.2x", hdev->name, status);
525 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
533 hdev->features[1][0] |= LMP_HOST_SC;
535 hdev->features[1][0] &= ~LMP_HOST_SC;
538 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
540 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
542 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
545 hci_dev_unlock(hdev);
548 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
550 struct hci_rp_read_local_version *rp = (void *) skb->data;
552 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
557 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
558 hci_dev_test_flag(hdev, HCI_CONFIG)) {
559 hdev->hci_ver = rp->hci_ver;
560 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
561 hdev->lmp_ver = rp->lmp_ver;
562 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
563 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
567 static void hci_cc_read_local_commands(struct hci_dev *hdev,
570 struct hci_rp_read_local_commands *rp = (void *) skb->data;
572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
577 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
578 hci_dev_test_flag(hdev, HCI_CONFIG))
579 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
582 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
585 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
586 struct hci_conn *conn;
588 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
595 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
597 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
599 hci_dev_unlock(hdev);
602 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
605 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
606 struct hci_conn *conn;
609 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
614 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
620 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
622 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
624 hci_dev_unlock(hdev);
627 static void hci_cc_read_local_features(struct hci_dev *hdev,
630 struct hci_rp_read_local_features *rp = (void *) skb->data;
632 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
637 memcpy(hdev->features, rp->features, 8);
639 /* Adjust default settings according to features
640 * supported by device. */
642 if (hdev->features[0][0] & LMP_3SLOT)
643 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
645 if (hdev->features[0][0] & LMP_5SLOT)
646 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
648 if (hdev->features[0][1] & LMP_HV2) {
649 hdev->pkt_type |= (HCI_HV2);
650 hdev->esco_type |= (ESCO_HV2);
653 if (hdev->features[0][1] & LMP_HV3) {
654 hdev->pkt_type |= (HCI_HV3);
655 hdev->esco_type |= (ESCO_HV3);
658 if (lmp_esco_capable(hdev))
659 hdev->esco_type |= (ESCO_EV3);
661 if (hdev->features[0][4] & LMP_EV4)
662 hdev->esco_type |= (ESCO_EV4);
664 if (hdev->features[0][4] & LMP_EV5)
665 hdev->esco_type |= (ESCO_EV5);
667 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
668 hdev->esco_type |= (ESCO_2EV3);
670 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
671 hdev->esco_type |= (ESCO_3EV3);
673 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
674 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
677 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
680 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
682 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
687 if (hdev->max_page < rp->max_page)
688 hdev->max_page = rp->max_page;
690 if (rp->page < HCI_MAX_PAGES)
691 memcpy(hdev->features[rp->page], rp->features, 8);
694 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
697 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
699 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
704 hdev->flow_ctl_mode = rp->mode;
707 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
709 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
711 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
716 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
717 hdev->sco_mtu = rp->sco_mtu;
718 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
719 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
721 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
726 hdev->acl_cnt = hdev->acl_pkts;
727 hdev->sco_cnt = hdev->sco_pkts;
729 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
730 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
733 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
735 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
737 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
742 if (test_bit(HCI_INIT, &hdev->flags))
743 bacpy(&hdev->bdaddr, &rp->bdaddr);
745 if (hci_dev_test_flag(hdev, HCI_SETUP))
746 bacpy(&hdev->setup_addr, &rp->bdaddr);
749 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
752 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
754 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
759 if (test_bit(HCI_INIT, &hdev->flags)) {
760 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
761 hdev->page_scan_window = __le16_to_cpu(rp->window);
765 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
768 u8 status = *((u8 *) skb->data);
769 struct hci_cp_write_page_scan_activity *sent;
771 BT_DBG("%s status 0x%2.2x", hdev->name, status);
776 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
780 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
781 hdev->page_scan_window = __le16_to_cpu(sent->window);
784 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
787 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
789 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
794 if (test_bit(HCI_INIT, &hdev->flags))
795 hdev->page_scan_type = rp->type;
798 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
801 u8 status = *((u8 *) skb->data);
804 BT_DBG("%s status 0x%2.2x", hdev->name, status);
809 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
811 hdev->page_scan_type = *type;
814 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
817 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
819 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
824 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
825 hdev->block_len = __le16_to_cpu(rp->block_len);
826 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
828 hdev->block_cnt = hdev->num_blocks;
830 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
831 hdev->block_cnt, hdev->block_len);
834 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
836 struct hci_rp_read_clock *rp = (void *) skb->data;
837 struct hci_cp_read_clock *cp;
838 struct hci_conn *conn;
840 BT_DBG("%s", hdev->name);
842 if (skb->len < sizeof(*rp))
850 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
854 if (cp->which == 0x00) {
855 hdev->clock = le32_to_cpu(rp->clock);
859 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
861 conn->clock = le32_to_cpu(rp->clock);
862 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
866 hci_dev_unlock(hdev);
869 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
872 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
874 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
879 hdev->amp_status = rp->amp_status;
880 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
881 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
882 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
883 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
884 hdev->amp_type = rp->amp_type;
885 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
886 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
887 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
888 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
891 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
894 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
896 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
901 hdev->inq_tx_power = rp->tx_power;
904 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
907 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
909 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
914 hdev->err_data_reporting = rp->err_data_reporting;
917 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
920 __u8 status = *((__u8 *)skb->data);
921 struct hci_cp_write_def_err_data_reporting *cp;
923 BT_DBG("%s status 0x%2.2x", hdev->name, status);
928 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
932 hdev->err_data_reporting = cp->err_data_reporting;
935 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
937 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
938 struct hci_cp_pin_code_reply *cp;
939 struct hci_conn *conn;
941 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
945 if (hci_dev_test_flag(hdev, HCI_MGMT))
946 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
951 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
955 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
957 conn->pin_length = cp->pin_len;
960 hci_dev_unlock(hdev);
963 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
965 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
967 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
971 if (hci_dev_test_flag(hdev, HCI_MGMT))
972 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
975 hci_dev_unlock(hdev);
978 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
981 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
983 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
988 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
989 hdev->le_pkts = rp->le_max_pkt;
991 hdev->le_cnt = hdev->le_pkts;
993 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
996 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
999 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1001 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1006 memcpy(hdev->le_features, rp->features, 8);
1009 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1010 struct sk_buff *skb)
1012 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1014 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1019 hdev->adv_tx_power = rp->tx_power;
1022 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1024 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1026 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1030 if (hci_dev_test_flag(hdev, HCI_MGMT))
1031 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1034 hci_dev_unlock(hdev);
1037 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1038 struct sk_buff *skb)
1040 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1042 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1046 if (hci_dev_test_flag(hdev, HCI_MGMT))
1047 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1048 ACL_LINK, 0, rp->status);
1050 hci_dev_unlock(hdev);
1053 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1055 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1057 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1061 if (hci_dev_test_flag(hdev, HCI_MGMT))
1062 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1065 hci_dev_unlock(hdev);
1068 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1069 struct sk_buff *skb)
1071 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1073 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1077 if (hci_dev_test_flag(hdev, HCI_MGMT))
1078 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1079 ACL_LINK, 0, rp->status);
1081 hci_dev_unlock(hdev);
1084 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1085 struct sk_buff *skb)
1087 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1089 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1092 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1093 struct sk_buff *skb)
1095 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1097 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1100 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1102 __u8 status = *((__u8 *) skb->data);
1105 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1110 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1116 bacpy(&hdev->random_addr, sent);
1118 hci_dev_unlock(hdev);
1121 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1123 __u8 status = *((__u8 *) skb->data);
1124 struct hci_cp_le_set_default_phy *cp;
1126 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1131 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1137 hdev->le_tx_def_phys = cp->tx_phys;
1138 hdev->le_rx_def_phys = cp->rx_phys;
1140 hci_dev_unlock(hdev);
1143 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1144 struct sk_buff *skb)
1146 __u8 status = *((__u8 *) skb->data);
1147 struct hci_cp_le_set_adv_set_rand_addr *cp;
1148 struct adv_info *adv_instance;
1153 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1159 if (!hdev->cur_adv_instance) {
1160 /* Store in hdev for instance 0 (Set adv and Directed advs) */
1161 bacpy(&hdev->random_addr, &cp->bdaddr);
1163 adv_instance = hci_find_adv_instance(hdev,
1164 hdev->cur_adv_instance);
1166 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1169 hci_dev_unlock(hdev);
1172 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1174 __u8 *sent, status = *((__u8 *) skb->data);
1176 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1181 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1187 /* If we're doing connection initiation as peripheral. Set a
1188 * timeout in case something goes wrong.
1191 struct hci_conn *conn;
1193 hci_dev_set_flag(hdev, HCI_LE_ADV);
1195 conn = hci_lookup_le_connect(hdev);
1197 queue_delayed_work(hdev->workqueue,
1198 &conn->le_conn_timeout,
1199 conn->conn_timeout);
1201 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1204 hci_dev_unlock(hdev);
1207 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1208 struct sk_buff *skb)
1210 struct hci_cp_le_set_ext_adv_enable *cp;
1211 __u8 status = *((__u8 *) skb->data);
1213 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1218 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1225 struct hci_conn *conn;
1227 hci_dev_set_flag(hdev, HCI_LE_ADV);
1229 conn = hci_lookup_le_connect(hdev);
1231 queue_delayed_work(hdev->workqueue,
1232 &conn->le_conn_timeout,
1233 conn->conn_timeout);
1235 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1238 hci_dev_unlock(hdev);
1241 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1243 struct hci_cp_le_set_scan_param *cp;
1244 __u8 status = *((__u8 *) skb->data);
1246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1251 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1257 hdev->le_scan_type = cp->type;
1259 hci_dev_unlock(hdev);
1262 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1263 struct sk_buff *skb)
1265 struct hci_cp_le_set_ext_scan_params *cp;
1266 __u8 status = *((__u8 *) skb->data);
1267 struct hci_cp_le_scan_phy_params *phy_param;
1269 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1274 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1278 phy_param = (void *)cp->data;
1282 hdev->le_scan_type = phy_param->type;
1284 hci_dev_unlock(hdev);
1287 static bool has_pending_adv_report(struct hci_dev *hdev)
1289 struct discovery_state *d = &hdev->discovery;
1291 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1294 static void clear_pending_adv_report(struct hci_dev *hdev)
1296 struct discovery_state *d = &hdev->discovery;
1298 bacpy(&d->last_adv_addr, BDADDR_ANY);
1299 d->last_adv_data_len = 0;
1302 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1303 u8 bdaddr_type, s8 rssi, u32 flags,
1306 struct discovery_state *d = &hdev->discovery;
1308 bacpy(&d->last_adv_addr, bdaddr);
1309 d->last_adv_addr_type = bdaddr_type;
1310 d->last_adv_rssi = rssi;
1311 d->last_adv_flags = flags;
1312 memcpy(d->last_adv_data, data, len);
1313 d->last_adv_data_len = len;
1316 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1321 case LE_SCAN_ENABLE:
1322 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1323 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1324 clear_pending_adv_report(hdev);
1327 case LE_SCAN_DISABLE:
1328 /* We do this here instead of when setting DISCOVERY_STOPPED
1329 * since the latter would potentially require waiting for
1330 * inquiry to stop too.
1332 if (has_pending_adv_report(hdev)) {
1333 struct discovery_state *d = &hdev->discovery;
1335 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1336 d->last_adv_addr_type, NULL,
1337 d->last_adv_rssi, d->last_adv_flags,
1339 d->last_adv_data_len, NULL, 0);
1342 /* Cancel this timer so that we don't try to disable scanning
1343 * when it's already disabled.
1345 cancel_delayed_work(&hdev->le_scan_disable);
1347 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1349 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1350 * interrupted scanning due to a connect request. Mark
1351 * therefore discovery as stopped. If this was not
1352 * because of a connect request advertising might have
1353 * been disabled because of active scanning, so
1354 * re-enable it again if necessary.
1356 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1357 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1358 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1359 hdev->discovery.state == DISCOVERY_FINDING)
1360 hci_req_reenable_advertising(hdev);
1365 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1370 hci_dev_unlock(hdev);
1373 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1374 struct sk_buff *skb)
1376 struct hci_cp_le_set_scan_enable *cp;
1377 __u8 status = *((__u8 *) skb->data);
1379 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1384 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1388 le_set_scan_enable_complete(hdev, cp->enable);
1391 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1392 struct sk_buff *skb)
1394 struct hci_cp_le_set_ext_scan_enable *cp;
1395 __u8 status = *((__u8 *) skb->data);
1397 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1402 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1406 le_set_scan_enable_complete(hdev, cp->enable);
1409 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1410 struct sk_buff *skb)
1412 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1414 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1420 hdev->le_num_of_adv_sets = rp->num_of_sets;
1423 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1424 struct sk_buff *skb)
1426 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1428 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1433 hdev->le_white_list_size = rp->size;
1436 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1437 struct sk_buff *skb)
1439 __u8 status = *((__u8 *) skb->data);
1441 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1446 hci_bdaddr_list_clear(&hdev->le_white_list);
1449 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1450 struct sk_buff *skb)
1452 struct hci_cp_le_add_to_white_list *sent;
1453 __u8 status = *((__u8 *) skb->data);
1455 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1460 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1464 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1468 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1469 struct sk_buff *skb)
1471 struct hci_cp_le_del_from_white_list *sent;
1472 __u8 status = *((__u8 *) skb->data);
1474 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1479 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1483 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1487 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1488 struct sk_buff *skb)
1490 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1492 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1497 memcpy(hdev->le_states, rp->le_states, 8);
1500 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1501 struct sk_buff *skb)
1503 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1505 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1510 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1511 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1514 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1515 struct sk_buff *skb)
1517 struct hci_cp_le_write_def_data_len *sent;
1518 __u8 status = *((__u8 *) skb->data);
1520 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1525 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1529 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1530 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1533 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1534 struct sk_buff *skb)
1536 struct hci_cp_le_add_to_resolv_list *sent;
1537 __u8 status = *((__u8 *) skb->data);
1539 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1544 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1548 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1549 sent->bdaddr_type, sent->peer_irk,
1553 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1554 struct sk_buff *skb)
1556 struct hci_cp_le_del_from_resolv_list *sent;
1557 __u8 status = *((__u8 *) skb->data);
1559 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1564 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1568 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1572 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1573 struct sk_buff *skb)
1575 __u8 status = *((__u8 *) skb->data);
1577 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1582 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1585 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1586 struct sk_buff *skb)
1588 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1590 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1595 hdev->le_resolv_list_size = rp->size;
1598 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1599 struct sk_buff *skb)
1601 __u8 *sent, status = *((__u8 *) skb->data);
1603 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1608 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1615 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1617 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1619 hci_dev_unlock(hdev);
1622 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1623 struct sk_buff *skb)
1625 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1627 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1632 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1633 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1634 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1635 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1638 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1639 struct sk_buff *skb)
1641 struct hci_cp_write_le_host_supported *sent;
1642 __u8 status = *((__u8 *) skb->data);
1644 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1649 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1656 hdev->features[1][0] |= LMP_HOST_LE;
1657 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1659 hdev->features[1][0] &= ~LMP_HOST_LE;
1660 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1661 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1665 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1667 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1669 hci_dev_unlock(hdev);
1672 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1674 struct hci_cp_le_set_adv_param *cp;
1675 u8 status = *((u8 *) skb->data);
1677 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1682 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1687 hdev->adv_addr_type = cp->own_address_type;
1688 hci_dev_unlock(hdev);
1691 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1693 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1694 struct hci_cp_le_set_ext_adv_params *cp;
1695 struct adv_info *adv_instance;
1697 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1702 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1707 hdev->adv_addr_type = cp->own_addr_type;
1708 if (!hdev->cur_adv_instance) {
1709 /* Store in hdev for instance 0 */
1710 hdev->adv_tx_power = rp->tx_power;
1712 adv_instance = hci_find_adv_instance(hdev,
1713 hdev->cur_adv_instance);
1715 adv_instance->tx_power = rp->tx_power;
1717 /* Update adv data as tx power is known now */
1718 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1719 hci_dev_unlock(hdev);
1722 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1724 struct hci_rp_read_rssi *rp = (void *) skb->data;
1725 struct hci_conn *conn;
1727 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1734 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1736 conn->rssi = rp->rssi;
1738 hci_dev_unlock(hdev);
1741 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1743 struct hci_cp_read_tx_power *sent;
1744 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1745 struct hci_conn *conn;
1747 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1752 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1758 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1762 switch (sent->type) {
1764 conn->tx_power = rp->tx_power;
1767 conn->max_tx_power = rp->tx_power;
1772 hci_dev_unlock(hdev);
1775 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1777 u8 status = *((u8 *) skb->data);
1780 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1785 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1787 hdev->ssp_debug_mode = *mode;
1790 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1792 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1795 hci_conn_check_pending(hdev);
1799 set_bit(HCI_INQUIRY, &hdev->flags);
1802 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1804 struct hci_cp_create_conn *cp;
1805 struct hci_conn *conn;
1807 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1809 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1815 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1817 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1820 if (conn && conn->state == BT_CONNECT) {
1821 if (status != 0x0c || conn->attempt > 2) {
1822 conn->state = BT_CLOSED;
1823 hci_connect_cfm(conn, status);
1826 conn->state = BT_CONNECT2;
1830 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1833 bt_dev_err(hdev, "no memory for new connection");
1837 hci_dev_unlock(hdev);
1840 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1842 struct hci_cp_add_sco *cp;
1843 struct hci_conn *acl, *sco;
1846 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1851 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1855 handle = __le16_to_cpu(cp->handle);
1857 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1861 acl = hci_conn_hash_lookup_handle(hdev, handle);
1865 sco->state = BT_CLOSED;
1867 hci_connect_cfm(sco, status);
1872 hci_dev_unlock(hdev);
1875 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1877 struct hci_cp_auth_requested *cp;
1878 struct hci_conn *conn;
1880 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1885 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1891 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1893 if (conn->state == BT_CONFIG) {
1894 hci_connect_cfm(conn, status);
1895 hci_conn_drop(conn);
1899 hci_dev_unlock(hdev);
1902 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1904 struct hci_cp_set_conn_encrypt *cp;
1905 struct hci_conn *conn;
1907 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1912 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1918 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1920 if (conn->state == BT_CONFIG) {
1921 hci_connect_cfm(conn, status);
1922 hci_conn_drop(conn);
1926 hci_dev_unlock(hdev);
1929 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1930 struct hci_conn *conn)
1932 if (conn->state != BT_CONFIG || !conn->out)
1935 if (conn->pending_sec_level == BT_SECURITY_SDP)
1938 /* Only request authentication for SSP connections or non-SSP
1939 * devices with sec_level MEDIUM or HIGH or if MITM protection
1942 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1943 conn->pending_sec_level != BT_SECURITY_FIPS &&
1944 conn->pending_sec_level != BT_SECURITY_HIGH &&
1945 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1951 static int hci_resolve_name(struct hci_dev *hdev,
1952 struct inquiry_entry *e)
1954 struct hci_cp_remote_name_req cp;
1956 memset(&cp, 0, sizeof(cp));
1958 bacpy(&cp.bdaddr, &e->data.bdaddr);
1959 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1960 cp.pscan_mode = e->data.pscan_mode;
1961 cp.clock_offset = e->data.clock_offset;
1963 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1966 static bool hci_resolve_next_name(struct hci_dev *hdev)
1968 struct discovery_state *discov = &hdev->discovery;
1969 struct inquiry_entry *e;
1971 if (list_empty(&discov->resolve))
1974 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1978 if (hci_resolve_name(hdev, e) == 0) {
1979 e->name_state = NAME_PENDING;
1986 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1987 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1989 struct discovery_state *discov = &hdev->discovery;
1990 struct inquiry_entry *e;
1992 /* Update the mgmt connected state if necessary. Be careful with
1993 * conn objects that exist but are not (yet) connected however.
1994 * Only those in BT_CONFIG or BT_CONNECTED states can be
1995 * considered connected.
1998 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1999 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2000 mgmt_device_connected(hdev, conn, 0, name, name_len);
2002 if (discov->state == DISCOVERY_STOPPED)
2005 if (discov->state == DISCOVERY_STOPPING)
2006 goto discov_complete;
2008 if (discov->state != DISCOVERY_RESOLVING)
2011 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2012 /* If the device was not found in a list of found devices names of which
2013 * are pending. there is no need to continue resolving a next name as it
2014 * will be done upon receiving another Remote Name Request Complete
2021 e->name_state = NAME_KNOWN;
2022 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2023 e->data.rssi, name, name_len);
2025 e->name_state = NAME_NOT_KNOWN;
2028 if (hci_resolve_next_name(hdev))
2032 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2035 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2037 struct hci_cp_remote_name_req *cp;
2038 struct hci_conn *conn;
2040 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2042 /* If successful wait for the name req complete event before
2043 * checking for the need to do authentication */
2047 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2053 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2055 if (hci_dev_test_flag(hdev, HCI_MGMT))
2056 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2061 if (!hci_outgoing_auth_needed(hdev, conn))
2064 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2065 struct hci_cp_auth_requested auth_cp;
2067 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2069 auth_cp.handle = __cpu_to_le16(conn->handle);
2070 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2071 sizeof(auth_cp), &auth_cp);
2075 hci_dev_unlock(hdev);
2078 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2080 struct hci_cp_read_remote_features *cp;
2081 struct hci_conn *conn;
2083 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2088 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2094 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2096 if (conn->state == BT_CONFIG) {
2097 hci_connect_cfm(conn, status);
2098 hci_conn_drop(conn);
2102 hci_dev_unlock(hdev);
2105 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2107 struct hci_cp_read_remote_ext_features *cp;
2108 struct hci_conn *conn;
2110 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2115 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2121 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2123 if (conn->state == BT_CONFIG) {
2124 hci_connect_cfm(conn, status);
2125 hci_conn_drop(conn);
2129 hci_dev_unlock(hdev);
2132 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2134 struct hci_cp_setup_sync_conn *cp;
2135 struct hci_conn *acl, *sco;
2138 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2143 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2147 handle = __le16_to_cpu(cp->handle);
2149 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2153 acl = hci_conn_hash_lookup_handle(hdev, handle);
2157 sco->state = BT_CLOSED;
2159 hci_connect_cfm(sco, status);
2164 hci_dev_unlock(hdev);
2167 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2169 struct hci_cp_sniff_mode *cp;
2170 struct hci_conn *conn;
2172 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2177 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2183 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2185 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2187 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2188 hci_sco_setup(conn, status);
2191 hci_dev_unlock(hdev);
2194 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2196 struct hci_cp_exit_sniff_mode *cp;
2197 struct hci_conn *conn;
2199 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2204 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2210 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2212 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2214 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2215 hci_sco_setup(conn, status);
2218 hci_dev_unlock(hdev);
2221 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2223 struct hci_cp_disconnect *cp;
2224 struct hci_conn *conn;
2229 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2235 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2237 u8 type = conn->type;
2239 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2240 conn->dst_type, status);
2242 /* If the disconnection failed for any reason, the upper layer
2243 * does not retry to disconnect in current implementation.
2244 * Hence, we need to do some basic cleanup here and re-enable
2245 * advertising if necessary.
2248 if (type == LE_LINK)
2249 hci_req_reenable_advertising(hdev);
2252 hci_dev_unlock(hdev);
2255 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2256 u8 peer_addr_type, u8 own_address_type,
2259 struct hci_conn *conn;
2261 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2266 /* Store the initiator and responder address information which
2267 * is needed for SMP. These values will not change during the
2268 * lifetime of the connection.
2270 conn->init_addr_type = own_address_type;
2271 if (own_address_type == ADDR_LE_DEV_RANDOM)
2272 bacpy(&conn->init_addr, &hdev->random_addr);
2274 bacpy(&conn->init_addr, &hdev->bdaddr);
2276 conn->resp_addr_type = peer_addr_type;
2277 bacpy(&conn->resp_addr, peer_addr);
2279 /* We don't want the connection attempt to stick around
2280 * indefinitely since LE doesn't have a page timeout concept
2281 * like BR/EDR. Set a timer for any connection that doesn't use
2282 * the white list for connecting.
2284 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2285 queue_delayed_work(conn->hdev->workqueue,
2286 &conn->le_conn_timeout,
2287 conn->conn_timeout);
2290 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2292 struct hci_cp_le_create_conn *cp;
2294 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2296 /* All connection failure handling is taken care of by the
2297 * hci_le_conn_failed function which is triggered by the HCI
2298 * request completion callbacks used for connecting.
2303 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2309 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2310 cp->own_address_type, cp->filter_policy);
2312 hci_dev_unlock(hdev);
2315 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2317 struct hci_cp_le_ext_create_conn *cp;
2319 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2321 /* All connection failure handling is taken care of by the
2322 * hci_le_conn_failed function which is triggered by the HCI
2323 * request completion callbacks used for connecting.
2328 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2334 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2335 cp->own_addr_type, cp->filter_policy);
2337 hci_dev_unlock(hdev);
2340 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2342 struct hci_cp_le_read_remote_features *cp;
2343 struct hci_conn *conn;
2345 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2350 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2356 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2358 if (conn->state == BT_CONFIG) {
2359 hci_connect_cfm(conn, status);
2360 hci_conn_drop(conn);
2364 hci_dev_unlock(hdev);
2367 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2369 struct hci_cp_le_start_enc *cp;
2370 struct hci_conn *conn;
2372 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2379 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2383 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2387 if (conn->state != BT_CONNECTED)
2390 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2391 hci_conn_drop(conn);
2394 hci_dev_unlock(hdev);
2397 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2399 struct hci_cp_switch_role *cp;
2400 struct hci_conn *conn;
2402 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2407 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2413 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2415 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2417 hci_dev_unlock(hdev);
2420 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2422 __u8 status = *((__u8 *) skb->data);
2423 struct discovery_state *discov = &hdev->discovery;
2424 struct inquiry_entry *e;
2426 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2428 hci_conn_check_pending(hdev);
2430 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2433 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2434 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2436 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2441 if (discov->state != DISCOVERY_FINDING)
2444 if (list_empty(&discov->resolve)) {
2445 /* When BR/EDR inquiry is active and no LE scanning is in
2446 * progress, then change discovery state to indicate completion.
2448 * When running LE scanning and BR/EDR inquiry simultaneously
2449 * and the LE scan already finished, then change the discovery
2450 * state to indicate completion.
2452 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2453 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2454 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2458 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2459 if (e && hci_resolve_name(hdev, e) == 0) {
2460 e->name_state = NAME_PENDING;
2461 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2463 /* When BR/EDR inquiry is active and no LE scanning is in
2464 * progress, then change discovery state to indicate completion.
2466 * When running LE scanning and BR/EDR inquiry simultaneously
2467 * and the LE scan already finished, then change the discovery
2468 * state to indicate completion.
2470 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2471 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2472 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2476 hci_dev_unlock(hdev);
2479 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2481 struct inquiry_data data;
2482 struct inquiry_info *info = (void *) (skb->data + 1);
2483 int num_rsp = *((__u8 *) skb->data);
2485 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2490 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2495 for (; num_rsp; num_rsp--, info++) {
2498 bacpy(&data.bdaddr, &info->bdaddr);
2499 data.pscan_rep_mode = info->pscan_rep_mode;
2500 data.pscan_period_mode = info->pscan_period_mode;
2501 data.pscan_mode = info->pscan_mode;
2502 memcpy(data.dev_class, info->dev_class, 3);
2503 data.clock_offset = info->clock_offset;
2504 data.rssi = HCI_RSSI_INVALID;
2505 data.ssp_mode = 0x00;
2507 flags = hci_inquiry_cache_update(hdev, &data, false);
2509 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2510 info->dev_class, HCI_RSSI_INVALID,
2511 flags, NULL, 0, NULL, 0);
2514 hci_dev_unlock(hdev);
2517 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2519 struct hci_ev_conn_complete *ev = (void *) skb->data;
2520 struct inquiry_entry *ie;
2521 struct hci_conn *conn;
2523 BT_DBG("%s", hdev->name);
2527 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2529 /* Connection may not exist if auto-connected. Check the inquiry
2530 * cache to see if we've already discovered this bdaddr before.
2531 * If found and link is an ACL type, create a connection class
2534 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2535 if (ie && ev->link_type == ACL_LINK) {
2536 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2539 bt_dev_err(hdev, "no memory for new conn");
2543 if (ev->link_type != SCO_LINK)
2546 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2551 conn->type = SCO_LINK;
2556 conn->handle = __le16_to_cpu(ev->handle);
2558 if (conn->type == ACL_LINK) {
2559 conn->state = BT_CONFIG;
2560 hci_conn_hold(conn);
2562 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2563 !hci_find_link_key(hdev, &ev->bdaddr))
2564 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2566 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2568 conn->state = BT_CONNECTED;
2570 hci_debugfs_create_conn(conn);
2571 hci_conn_add_sysfs(conn);
2573 if (test_bit(HCI_AUTH, &hdev->flags))
2574 set_bit(HCI_CONN_AUTH, &conn->flags);
2576 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2577 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2579 /* Get remote features */
2580 if (conn->type == ACL_LINK) {
2581 struct hci_cp_read_remote_features cp;
2582 cp.handle = ev->handle;
2583 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2586 hci_req_update_scan(hdev);
2589 /* Set packet type for incoming connection */
2590 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2591 struct hci_cp_change_conn_ptype cp;
2592 cp.handle = ev->handle;
2593 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2594 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2598 conn->state = BT_CLOSED;
2599 if (conn->type == ACL_LINK)
2600 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2601 conn->dst_type, ev->status);
2604 if (conn->type == ACL_LINK)
2605 hci_sco_setup(conn, ev->status);
2608 hci_connect_cfm(conn, ev->status);
2610 } else if (ev->link_type != ACL_LINK)
2611 hci_connect_cfm(conn, ev->status);
2614 hci_dev_unlock(hdev);
2616 hci_conn_check_pending(hdev);
2619 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2621 struct hci_cp_reject_conn_req cp;
2623 bacpy(&cp.bdaddr, bdaddr);
2624 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2625 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2628 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2630 struct hci_ev_conn_request *ev = (void *) skb->data;
2631 int mask = hdev->link_mode;
2632 struct inquiry_entry *ie;
2633 struct hci_conn *conn;
2636 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2639 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2642 if (!(mask & HCI_LM_ACCEPT)) {
2643 hci_reject_conn(hdev, &ev->bdaddr);
2647 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2649 hci_reject_conn(hdev, &ev->bdaddr);
2653 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2654 * connection. These features are only touched through mgmt so
2655 * only do the checks if HCI_MGMT is set.
2657 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2658 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2659 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2661 hci_reject_conn(hdev, &ev->bdaddr);
2665 /* Connection accepted */
2669 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2671 memcpy(ie->data.dev_class, ev->dev_class, 3);
2673 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2676 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2679 bt_dev_err(hdev, "no memory for new connection");
2680 hci_dev_unlock(hdev);
2685 memcpy(conn->dev_class, ev->dev_class, 3);
2687 hci_dev_unlock(hdev);
2689 if (ev->link_type == ACL_LINK ||
2690 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2691 struct hci_cp_accept_conn_req cp;
2692 conn->state = BT_CONNECT;
2694 bacpy(&cp.bdaddr, &ev->bdaddr);
2696 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2697 cp.role = 0x00; /* Become master */
2699 cp.role = 0x01; /* Remain slave */
2701 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2702 } else if (!(flags & HCI_PROTO_DEFER)) {
2703 struct hci_cp_accept_sync_conn_req cp;
2704 conn->state = BT_CONNECT;
2706 bacpy(&cp.bdaddr, &ev->bdaddr);
2707 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2709 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2710 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2711 cp.max_latency = cpu_to_le16(0xffff);
2712 cp.content_format = cpu_to_le16(hdev->voice_setting);
2713 cp.retrans_effort = 0xff;
2715 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2718 conn->state = BT_CONNECT2;
2719 hci_connect_cfm(conn, 0);
2723 static u8 hci_to_mgmt_reason(u8 err)
2726 case HCI_ERROR_CONNECTION_TIMEOUT:
2727 return MGMT_DEV_DISCONN_TIMEOUT;
2728 case HCI_ERROR_REMOTE_USER_TERM:
2729 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2730 case HCI_ERROR_REMOTE_POWER_OFF:
2731 return MGMT_DEV_DISCONN_REMOTE;
2732 case HCI_ERROR_LOCAL_HOST_TERM:
2733 return MGMT_DEV_DISCONN_LOCAL_HOST;
2735 return MGMT_DEV_DISCONN_UNKNOWN;
2739 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2741 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2743 struct hci_conn_params *params;
2744 struct hci_conn *conn;
2745 bool mgmt_connected;
2748 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2752 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2757 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2758 conn->dst_type, ev->status);
2762 conn->state = BT_CLOSED;
2764 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2766 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2767 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2769 reason = hci_to_mgmt_reason(ev->reason);
2771 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2772 reason, mgmt_connected);
2774 if (conn->type == ACL_LINK) {
2775 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2776 hci_remove_link_key(hdev, &conn->dst);
2778 hci_req_update_scan(hdev);
2781 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2783 switch (params->auto_connect) {
2784 case HCI_AUTO_CONN_LINK_LOSS:
2785 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2789 case HCI_AUTO_CONN_DIRECT:
2790 case HCI_AUTO_CONN_ALWAYS:
2791 list_del_init(¶ms->action);
2792 list_add(¶ms->action, &hdev->pend_le_conns);
2793 hci_update_background_scan(hdev);
2803 hci_disconn_cfm(conn, ev->reason);
2806 /* The suspend notifier is waiting for all devices to disconnect so
2807 * clear the bit from pending tasks and inform the wait queue.
2809 if (list_empty(&hdev->conn_hash.list) &&
2810 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2811 wake_up(&hdev->suspend_wait_q);
2814 /* Re-enable advertising if necessary, since it might
2815 * have been disabled by the connection. From the
2816 * HCI_LE_Set_Advertise_Enable command description in
2817 * the core specification (v4.0):
2818 * "The Controller shall continue advertising until the Host
2819 * issues an LE_Set_Advertise_Enable command with
2820 * Advertising_Enable set to 0x00 (Advertising is disabled)
2821 * or until a connection is created or until the Advertising
2822 * is timed out due to Directed Advertising."
2824 if (type == LE_LINK)
2825 hci_req_reenable_advertising(hdev);
2828 hci_dev_unlock(hdev);
2831 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2833 struct hci_ev_auth_complete *ev = (void *) skb->data;
2834 struct hci_conn *conn;
2836 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2840 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2845 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2847 if (!hci_conn_ssp_enabled(conn) &&
2848 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2849 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2851 set_bit(HCI_CONN_AUTH, &conn->flags);
2852 conn->sec_level = conn->pending_sec_level;
2855 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2856 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2858 mgmt_auth_failed(conn, ev->status);
2861 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2862 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2864 if (conn->state == BT_CONFIG) {
2865 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2866 struct hci_cp_set_conn_encrypt cp;
2867 cp.handle = ev->handle;
2869 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2872 conn->state = BT_CONNECTED;
2873 hci_connect_cfm(conn, ev->status);
2874 hci_conn_drop(conn);
2877 hci_auth_cfm(conn, ev->status);
2879 hci_conn_hold(conn);
2880 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2881 hci_conn_drop(conn);
2884 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2886 struct hci_cp_set_conn_encrypt cp;
2887 cp.handle = ev->handle;
2889 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2892 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2893 hci_encrypt_cfm(conn, ev->status, 0x00);
2898 hci_dev_unlock(hdev);
2901 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2903 struct hci_ev_remote_name *ev = (void *) skb->data;
2904 struct hci_conn *conn;
2906 BT_DBG("%s", hdev->name);
2908 hci_conn_check_pending(hdev);
2912 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2914 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2917 if (ev->status == 0)
2918 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2919 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2921 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2927 if (!hci_outgoing_auth_needed(hdev, conn))
2930 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2931 struct hci_cp_auth_requested cp;
2933 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2935 cp.handle = __cpu_to_le16(conn->handle);
2936 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2940 hci_dev_unlock(hdev);
2943 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2944 u16 opcode, struct sk_buff *skb)
2946 const struct hci_rp_read_enc_key_size *rp;
2947 struct hci_conn *conn;
2950 BT_DBG("%s status 0x%02x", hdev->name, status);
2952 if (!skb || skb->len < sizeof(*rp)) {
2953 bt_dev_err(hdev, "invalid read key size response");
2957 rp = (void *)skb->data;
2958 handle = le16_to_cpu(rp->handle);
2962 conn = hci_conn_hash_lookup_handle(hdev, handle);
2966 /* While unexpected, the read_enc_key_size command may fail. The most
2967 * secure approach is to then assume the key size is 0 to force a
2971 bt_dev_err(hdev, "failed to read key size for handle %u",
2973 conn->enc_key_size = 0;
2975 conn->enc_key_size = rp->key_size;
2978 if (conn->state == BT_CONFIG) {
2979 conn->state = BT_CONNECTED;
2980 hci_connect_cfm(conn, 0);
2981 hci_conn_drop(conn);
2985 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2987 else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2992 hci_encrypt_cfm(conn, 0, encrypt);
2996 hci_dev_unlock(hdev);
2999 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3001 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3002 struct hci_conn *conn;
3004 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3008 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3014 /* Encryption implies authentication */
3015 set_bit(HCI_CONN_AUTH, &conn->flags);
3016 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3017 conn->sec_level = conn->pending_sec_level;
3019 /* P-256 authentication key implies FIPS */
3020 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3021 set_bit(HCI_CONN_FIPS, &conn->flags);
3023 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3024 conn->type == LE_LINK)
3025 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3027 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3028 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3032 /* We should disregard the current RPA and generate a new one
3033 * whenever the encryption procedure fails.
3035 if (ev->status && conn->type == LE_LINK) {
3036 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3037 hci_adv_instances_set_rpa_expired(hdev, true);
3040 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3042 if (ev->status && conn->state == BT_CONNECTED) {
3043 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3044 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3046 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3047 hci_conn_drop(conn);
3051 /* In Secure Connections Only mode, do not allow any connections
3052 * that are not encrypted with AES-CCM using a P-256 authenticated
3055 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
3056 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
3057 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
3058 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
3059 hci_conn_drop(conn);
3063 /* Try reading the encryption key size for encrypted ACL links */
3064 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3065 struct hci_cp_read_enc_key_size cp;
3066 struct hci_request req;
3068 /* Only send HCI_Read_Encryption_Key_Size if the
3069 * controller really supports it. If it doesn't, assume
3070 * the default size (16).
3072 if (!(hdev->commands[20] & 0x10)) {
3073 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3077 hci_req_init(&req, hdev);
3079 cp.handle = cpu_to_le16(conn->handle);
3080 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3082 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3083 bt_dev_err(hdev, "sending read key size failed");
3084 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3091 /* Set the default Authenticated Payload Timeout after
3092 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3093 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3094 * sent when the link is active and Encryption is enabled, the conn
3095 * type can be either LE or ACL and controller must support LMP Ping.
3096 * Ensure for AES-CCM encryption as well.
3098 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3099 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3100 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3101 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3102 struct hci_cp_write_auth_payload_to cp;
3104 cp.handle = cpu_to_le16(conn->handle);
3105 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3106 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3111 if (conn->state == BT_CONFIG) {
3113 conn->state = BT_CONNECTED;
3115 hci_connect_cfm(conn, ev->status);
3116 hci_conn_drop(conn);
3118 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
3121 hci_dev_unlock(hdev);
3124 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3125 struct sk_buff *skb)
3127 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3128 struct hci_conn *conn;
3130 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3134 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3137 set_bit(HCI_CONN_SECURE, &conn->flags);
3139 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3141 hci_key_change_cfm(conn, ev->status);
3144 hci_dev_unlock(hdev);
3147 static void hci_remote_features_evt(struct hci_dev *hdev,
3148 struct sk_buff *skb)
3150 struct hci_ev_remote_features *ev = (void *) skb->data;
3151 struct hci_conn *conn;
3153 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3157 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3162 memcpy(conn->features[0], ev->features, 8);
3164 if (conn->state != BT_CONFIG)
3167 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3168 lmp_ext_feat_capable(conn)) {
3169 struct hci_cp_read_remote_ext_features cp;
3170 cp.handle = ev->handle;
3172 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3177 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3178 struct hci_cp_remote_name_req cp;
3179 memset(&cp, 0, sizeof(cp));
3180 bacpy(&cp.bdaddr, &conn->dst);
3181 cp.pscan_rep_mode = 0x02;
3182 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3183 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3184 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3186 if (!hci_outgoing_auth_needed(hdev, conn)) {
3187 conn->state = BT_CONNECTED;
3188 hci_connect_cfm(conn, ev->status);
3189 hci_conn_drop(conn);
3193 hci_dev_unlock(hdev);
3196 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3197 u16 *opcode, u8 *status,
3198 hci_req_complete_t *req_complete,
3199 hci_req_complete_skb_t *req_complete_skb)
3201 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3203 *opcode = __le16_to_cpu(ev->opcode);
3204 *status = skb->data[sizeof(*ev)];
3206 skb_pull(skb, sizeof(*ev));
3209 case HCI_OP_INQUIRY_CANCEL:
3210 hci_cc_inquiry_cancel(hdev, skb);
3213 case HCI_OP_PERIODIC_INQ:
3214 hci_cc_periodic_inq(hdev, skb);
3217 case HCI_OP_EXIT_PERIODIC_INQ:
3218 hci_cc_exit_periodic_inq(hdev, skb);
3221 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3222 hci_cc_remote_name_req_cancel(hdev, skb);
3225 case HCI_OP_ROLE_DISCOVERY:
3226 hci_cc_role_discovery(hdev, skb);
3229 case HCI_OP_READ_LINK_POLICY:
3230 hci_cc_read_link_policy(hdev, skb);
3233 case HCI_OP_WRITE_LINK_POLICY:
3234 hci_cc_write_link_policy(hdev, skb);
3237 case HCI_OP_READ_DEF_LINK_POLICY:
3238 hci_cc_read_def_link_policy(hdev, skb);
3241 case HCI_OP_WRITE_DEF_LINK_POLICY:
3242 hci_cc_write_def_link_policy(hdev, skb);
3246 hci_cc_reset(hdev, skb);
3249 case HCI_OP_READ_STORED_LINK_KEY:
3250 hci_cc_read_stored_link_key(hdev, skb);
3253 case HCI_OP_DELETE_STORED_LINK_KEY:
3254 hci_cc_delete_stored_link_key(hdev, skb);
3257 case HCI_OP_WRITE_LOCAL_NAME:
3258 hci_cc_write_local_name(hdev, skb);
3261 case HCI_OP_READ_LOCAL_NAME:
3262 hci_cc_read_local_name(hdev, skb);
3265 case HCI_OP_WRITE_AUTH_ENABLE:
3266 hci_cc_write_auth_enable(hdev, skb);
3269 case HCI_OP_WRITE_ENCRYPT_MODE:
3270 hci_cc_write_encrypt_mode(hdev, skb);
3273 case HCI_OP_WRITE_SCAN_ENABLE:
3274 hci_cc_write_scan_enable(hdev, skb);
3277 case HCI_OP_READ_CLASS_OF_DEV:
3278 hci_cc_read_class_of_dev(hdev, skb);
3281 case HCI_OP_WRITE_CLASS_OF_DEV:
3282 hci_cc_write_class_of_dev(hdev, skb);
3285 case HCI_OP_READ_VOICE_SETTING:
3286 hci_cc_read_voice_setting(hdev, skb);
3289 case HCI_OP_WRITE_VOICE_SETTING:
3290 hci_cc_write_voice_setting(hdev, skb);
3293 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3294 hci_cc_read_num_supported_iac(hdev, skb);
3297 case HCI_OP_WRITE_SSP_MODE:
3298 hci_cc_write_ssp_mode(hdev, skb);
3301 case HCI_OP_WRITE_SC_SUPPORT:
3302 hci_cc_write_sc_support(hdev, skb);
3305 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3306 hci_cc_read_auth_payload_timeout(hdev, skb);
3309 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3310 hci_cc_write_auth_payload_timeout(hdev, skb);
3313 case HCI_OP_READ_LOCAL_VERSION:
3314 hci_cc_read_local_version(hdev, skb);
3317 case HCI_OP_READ_LOCAL_COMMANDS:
3318 hci_cc_read_local_commands(hdev, skb);
3321 case HCI_OP_READ_LOCAL_FEATURES:
3322 hci_cc_read_local_features(hdev, skb);
3325 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3326 hci_cc_read_local_ext_features(hdev, skb);
3329 case HCI_OP_READ_BUFFER_SIZE:
3330 hci_cc_read_buffer_size(hdev, skb);
3333 case HCI_OP_READ_BD_ADDR:
3334 hci_cc_read_bd_addr(hdev, skb);
3337 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3338 hci_cc_read_page_scan_activity(hdev, skb);
3341 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3342 hci_cc_write_page_scan_activity(hdev, skb);
3345 case HCI_OP_READ_PAGE_SCAN_TYPE:
3346 hci_cc_read_page_scan_type(hdev, skb);
3349 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3350 hci_cc_write_page_scan_type(hdev, skb);
3353 case HCI_OP_READ_DATA_BLOCK_SIZE:
3354 hci_cc_read_data_block_size(hdev, skb);
3357 case HCI_OP_READ_FLOW_CONTROL_MODE:
3358 hci_cc_read_flow_control_mode(hdev, skb);
3361 case HCI_OP_READ_LOCAL_AMP_INFO:
3362 hci_cc_read_local_amp_info(hdev, skb);
3365 case HCI_OP_READ_CLOCK:
3366 hci_cc_read_clock(hdev, skb);
3369 case HCI_OP_READ_INQ_RSP_TX_POWER:
3370 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3373 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3374 hci_cc_read_def_err_data_reporting(hdev, skb);
3377 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3378 hci_cc_write_def_err_data_reporting(hdev, skb);
3381 case HCI_OP_PIN_CODE_REPLY:
3382 hci_cc_pin_code_reply(hdev, skb);
3385 case HCI_OP_PIN_CODE_NEG_REPLY:
3386 hci_cc_pin_code_neg_reply(hdev, skb);
3389 case HCI_OP_READ_LOCAL_OOB_DATA:
3390 hci_cc_read_local_oob_data(hdev, skb);
3393 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3394 hci_cc_read_local_oob_ext_data(hdev, skb);
3397 case HCI_OP_LE_READ_BUFFER_SIZE:
3398 hci_cc_le_read_buffer_size(hdev, skb);
3401 case HCI_OP_LE_READ_LOCAL_FEATURES:
3402 hci_cc_le_read_local_features(hdev, skb);
3405 case HCI_OP_LE_READ_ADV_TX_POWER:
3406 hci_cc_le_read_adv_tx_power(hdev, skb);
3409 case HCI_OP_USER_CONFIRM_REPLY:
3410 hci_cc_user_confirm_reply(hdev, skb);
3413 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3414 hci_cc_user_confirm_neg_reply(hdev, skb);
3417 case HCI_OP_USER_PASSKEY_REPLY:
3418 hci_cc_user_passkey_reply(hdev, skb);
3421 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3422 hci_cc_user_passkey_neg_reply(hdev, skb);
3425 case HCI_OP_LE_SET_RANDOM_ADDR:
3426 hci_cc_le_set_random_addr(hdev, skb);
3429 case HCI_OP_LE_SET_ADV_ENABLE:
3430 hci_cc_le_set_adv_enable(hdev, skb);
3433 case HCI_OP_LE_SET_SCAN_PARAM:
3434 hci_cc_le_set_scan_param(hdev, skb);
3437 case HCI_OP_LE_SET_SCAN_ENABLE:
3438 hci_cc_le_set_scan_enable(hdev, skb);
3441 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3442 hci_cc_le_read_white_list_size(hdev, skb);
3445 case HCI_OP_LE_CLEAR_WHITE_LIST:
3446 hci_cc_le_clear_white_list(hdev, skb);
3449 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3450 hci_cc_le_add_to_white_list(hdev, skb);
3453 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3454 hci_cc_le_del_from_white_list(hdev, skb);
3457 case HCI_OP_LE_READ_SUPPORTED_STATES:
3458 hci_cc_le_read_supported_states(hdev, skb);
3461 case HCI_OP_LE_READ_DEF_DATA_LEN:
3462 hci_cc_le_read_def_data_len(hdev, skb);
3465 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3466 hci_cc_le_write_def_data_len(hdev, skb);
3469 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3470 hci_cc_le_add_to_resolv_list(hdev, skb);
3473 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3474 hci_cc_le_del_from_resolv_list(hdev, skb);
3477 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3478 hci_cc_le_clear_resolv_list(hdev, skb);
3481 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3482 hci_cc_le_read_resolv_list_size(hdev, skb);
3485 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3486 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3489 case HCI_OP_LE_READ_MAX_DATA_LEN:
3490 hci_cc_le_read_max_data_len(hdev, skb);
3493 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3494 hci_cc_write_le_host_supported(hdev, skb);
3497 case HCI_OP_LE_SET_ADV_PARAM:
3498 hci_cc_set_adv_param(hdev, skb);
3501 case HCI_OP_READ_RSSI:
3502 hci_cc_read_rssi(hdev, skb);
3505 case HCI_OP_READ_TX_POWER:
3506 hci_cc_read_tx_power(hdev, skb);
3509 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3510 hci_cc_write_ssp_debug_mode(hdev, skb);
3513 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3514 hci_cc_le_set_ext_scan_param(hdev, skb);
3517 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3518 hci_cc_le_set_ext_scan_enable(hdev, skb);
3521 case HCI_OP_LE_SET_DEFAULT_PHY:
3522 hci_cc_le_set_default_phy(hdev, skb);
3525 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3526 hci_cc_le_read_num_adv_sets(hdev, skb);
3529 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3530 hci_cc_set_ext_adv_param(hdev, skb);
3533 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3534 hci_cc_le_set_ext_adv_enable(hdev, skb);
3537 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3538 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3542 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3546 if (*opcode != HCI_OP_NOP)
3547 cancel_delayed_work(&hdev->cmd_timer);
3549 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3550 atomic_set(&hdev->cmd_cnt, 1);
3552 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3555 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3557 "unexpected event for opcode 0x%4.4x", *opcode);
3561 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3562 queue_work(hdev->workqueue, &hdev->cmd_work);
3565 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3566 u16 *opcode, u8 *status,
3567 hci_req_complete_t *req_complete,
3568 hci_req_complete_skb_t *req_complete_skb)
3570 struct hci_ev_cmd_status *ev = (void *) skb->data;
3572 skb_pull(skb, sizeof(*ev));
3574 *opcode = __le16_to_cpu(ev->opcode);
3575 *status = ev->status;
3578 case HCI_OP_INQUIRY:
3579 hci_cs_inquiry(hdev, ev->status);
3582 case HCI_OP_CREATE_CONN:
3583 hci_cs_create_conn(hdev, ev->status);
3586 case HCI_OP_DISCONNECT:
3587 hci_cs_disconnect(hdev, ev->status);
3590 case HCI_OP_ADD_SCO:
3591 hci_cs_add_sco(hdev, ev->status);
3594 case HCI_OP_AUTH_REQUESTED:
3595 hci_cs_auth_requested(hdev, ev->status);
3598 case HCI_OP_SET_CONN_ENCRYPT:
3599 hci_cs_set_conn_encrypt(hdev, ev->status);
3602 case HCI_OP_REMOTE_NAME_REQ:
3603 hci_cs_remote_name_req(hdev, ev->status);
3606 case HCI_OP_READ_REMOTE_FEATURES:
3607 hci_cs_read_remote_features(hdev, ev->status);
3610 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3611 hci_cs_read_remote_ext_features(hdev, ev->status);
3614 case HCI_OP_SETUP_SYNC_CONN:
3615 hci_cs_setup_sync_conn(hdev, ev->status);
3618 case HCI_OP_SNIFF_MODE:
3619 hci_cs_sniff_mode(hdev, ev->status);
3622 case HCI_OP_EXIT_SNIFF_MODE:
3623 hci_cs_exit_sniff_mode(hdev, ev->status);
3626 case HCI_OP_SWITCH_ROLE:
3627 hci_cs_switch_role(hdev, ev->status);
3630 case HCI_OP_LE_CREATE_CONN:
3631 hci_cs_le_create_conn(hdev, ev->status);
3634 case HCI_OP_LE_READ_REMOTE_FEATURES:
3635 hci_cs_le_read_remote_features(hdev, ev->status);
3638 case HCI_OP_LE_START_ENC:
3639 hci_cs_le_start_enc(hdev, ev->status);
3642 case HCI_OP_LE_EXT_CREATE_CONN:
3643 hci_cs_le_ext_create_conn(hdev, ev->status);
3647 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3651 if (*opcode != HCI_OP_NOP)
3652 cancel_delayed_work(&hdev->cmd_timer);
3654 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3655 atomic_set(&hdev->cmd_cnt, 1);
3657 /* Indicate request completion if the command failed. Also, if
3658 * we're not waiting for a special event and we get a success
3659 * command status we should try to flag the request as completed
3660 * (since for this kind of commands there will not be a command
3664 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3665 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3668 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3670 "unexpected event for opcode 0x%4.4x", *opcode);
3674 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3675 queue_work(hdev->workqueue, &hdev->cmd_work);
3678 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3680 struct hci_ev_hardware_error *ev = (void *) skb->data;
3682 hdev->hw_error_code = ev->code;
3684 queue_work(hdev->req_workqueue, &hdev->error_reset);
3687 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3689 struct hci_ev_role_change *ev = (void *) skb->data;
3690 struct hci_conn *conn;
3692 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3696 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3699 conn->role = ev->role;
3701 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3703 hci_role_switch_cfm(conn, ev->status, ev->role);
3706 hci_dev_unlock(hdev);
3709 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3711 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3714 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3715 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3719 if (skb->len < sizeof(*ev) ||
3720 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3721 BT_DBG("%s bad parameters", hdev->name);
3725 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3727 for (i = 0; i < ev->num_hndl; i++) {
3728 struct hci_comp_pkts_info *info = &ev->handles[i];
3729 struct hci_conn *conn;
3730 __u16 handle, count;
3732 handle = __le16_to_cpu(info->handle);
3733 count = __le16_to_cpu(info->count);
3735 conn = hci_conn_hash_lookup_handle(hdev, handle);
3739 conn->sent -= count;
3741 switch (conn->type) {
3743 hdev->acl_cnt += count;
3744 if (hdev->acl_cnt > hdev->acl_pkts)
3745 hdev->acl_cnt = hdev->acl_pkts;
3749 if (hdev->le_pkts) {
3750 hdev->le_cnt += count;
3751 if (hdev->le_cnt > hdev->le_pkts)
3752 hdev->le_cnt = hdev->le_pkts;
3754 hdev->acl_cnt += count;
3755 if (hdev->acl_cnt > hdev->acl_pkts)
3756 hdev->acl_cnt = hdev->acl_pkts;
3761 hdev->sco_cnt += count;
3762 if (hdev->sco_cnt > hdev->sco_pkts)
3763 hdev->sco_cnt = hdev->sco_pkts;
3767 bt_dev_err(hdev, "unknown type %d conn %p",
3773 queue_work(hdev->workqueue, &hdev->tx_work);
3776 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3779 struct hci_chan *chan;
3781 switch (hdev->dev_type) {
3783 return hci_conn_hash_lookup_handle(hdev, handle);
3785 chan = hci_chan_lookup_handle(hdev, handle);
3790 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3797 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3799 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3802 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3803 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3807 if (skb->len < sizeof(*ev) ||
3808 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3809 BT_DBG("%s bad parameters", hdev->name);
3813 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3816 for (i = 0; i < ev->num_hndl; i++) {
3817 struct hci_comp_blocks_info *info = &ev->handles[i];
3818 struct hci_conn *conn = NULL;
3819 __u16 handle, block_count;
3821 handle = __le16_to_cpu(info->handle);
3822 block_count = __le16_to_cpu(info->blocks);
3824 conn = __hci_conn_lookup_handle(hdev, handle);
3828 conn->sent -= block_count;
3830 switch (conn->type) {
3833 hdev->block_cnt += block_count;
3834 if (hdev->block_cnt > hdev->num_blocks)
3835 hdev->block_cnt = hdev->num_blocks;
3839 bt_dev_err(hdev, "unknown type %d conn %p",
3845 queue_work(hdev->workqueue, &hdev->tx_work);
3848 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3850 struct hci_ev_mode_change *ev = (void *) skb->data;
3851 struct hci_conn *conn;
3853 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3857 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3859 conn->mode = ev->mode;
3861 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3863 if (conn->mode == HCI_CM_ACTIVE)
3864 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3866 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3869 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3870 hci_sco_setup(conn, ev->status);
3873 hci_dev_unlock(hdev);
3876 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3878 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3879 struct hci_conn *conn;
3881 BT_DBG("%s", hdev->name);
3885 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3889 if (conn->state == BT_CONNECTED) {
3890 hci_conn_hold(conn);
3891 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3892 hci_conn_drop(conn);
3895 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3896 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3897 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3898 sizeof(ev->bdaddr), &ev->bdaddr);
3899 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3902 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3907 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3911 hci_dev_unlock(hdev);
3914 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3916 if (key_type == HCI_LK_CHANGED_COMBINATION)
3919 conn->pin_length = pin_len;
3920 conn->key_type = key_type;
3923 case HCI_LK_LOCAL_UNIT:
3924 case HCI_LK_REMOTE_UNIT:
3925 case HCI_LK_DEBUG_COMBINATION:
3927 case HCI_LK_COMBINATION:
3929 conn->pending_sec_level = BT_SECURITY_HIGH;
3931 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3933 case HCI_LK_UNAUTH_COMBINATION_P192:
3934 case HCI_LK_UNAUTH_COMBINATION_P256:
3935 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3937 case HCI_LK_AUTH_COMBINATION_P192:
3938 conn->pending_sec_level = BT_SECURITY_HIGH;
3940 case HCI_LK_AUTH_COMBINATION_P256:
3941 conn->pending_sec_level = BT_SECURITY_FIPS;
3946 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3948 struct hci_ev_link_key_req *ev = (void *) skb->data;
3949 struct hci_cp_link_key_reply cp;
3950 struct hci_conn *conn;
3951 struct link_key *key;
3953 BT_DBG("%s", hdev->name);
3955 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3960 key = hci_find_link_key(hdev, &ev->bdaddr);
3962 BT_DBG("%s link key not found for %pMR", hdev->name,
3967 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3970 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3972 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3974 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3975 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3976 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3977 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3981 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3982 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3983 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3984 BT_DBG("%s ignoring key unauthenticated for high security",
3989 conn_set_key(conn, key->type, key->pin_len);
3992 bacpy(&cp.bdaddr, &ev->bdaddr);
3993 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3995 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3997 hci_dev_unlock(hdev);
4002 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4003 hci_dev_unlock(hdev);
4006 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4008 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4009 struct hci_conn *conn;
4010 struct link_key *key;
4014 BT_DBG("%s", hdev->name);
4018 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4022 hci_conn_hold(conn);
4023 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4024 hci_conn_drop(conn);
4026 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4027 conn_set_key(conn, ev->key_type, conn->pin_length);
4029 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4032 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4033 ev->key_type, pin_len, &persistent);
4037 /* Update connection information since adding the key will have
4038 * fixed up the type in the case of changed combination keys.
4040 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4041 conn_set_key(conn, key->type, key->pin_len);
4043 mgmt_new_link_key(hdev, key, persistent);
4045 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4046 * is set. If it's not set simply remove the key from the kernel
4047 * list (we've still notified user space about it but with
4048 * store_hint being 0).
4050 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4051 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4052 list_del_rcu(&key->list);
4053 kfree_rcu(key, rcu);
4058 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4060 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4063 hci_dev_unlock(hdev);
4066 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4068 struct hci_ev_clock_offset *ev = (void *) skb->data;
4069 struct hci_conn *conn;
4071 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4075 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4076 if (conn && !ev->status) {
4077 struct inquiry_entry *ie;
4079 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4081 ie->data.clock_offset = ev->clock_offset;
4082 ie->timestamp = jiffies;
4086 hci_dev_unlock(hdev);
4089 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4091 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4092 struct hci_conn *conn;
4094 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4098 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4099 if (conn && !ev->status)
4100 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4102 hci_dev_unlock(hdev);
4105 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4107 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4108 struct inquiry_entry *ie;
4110 BT_DBG("%s", hdev->name);
4114 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4116 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4117 ie->timestamp = jiffies;
4120 hci_dev_unlock(hdev);
4123 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4124 struct sk_buff *skb)
4126 struct inquiry_data data;
4127 int num_rsp = *((__u8 *) skb->data);
4129 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4134 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4139 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4140 struct inquiry_info_with_rssi_and_pscan_mode *info;
4141 info = (void *) (skb->data + 1);
4143 for (; num_rsp; num_rsp--, info++) {
4146 bacpy(&data.bdaddr, &info->bdaddr);
4147 data.pscan_rep_mode = info->pscan_rep_mode;
4148 data.pscan_period_mode = info->pscan_period_mode;
4149 data.pscan_mode = info->pscan_mode;
4150 memcpy(data.dev_class, info->dev_class, 3);
4151 data.clock_offset = info->clock_offset;
4152 data.rssi = info->rssi;
4153 data.ssp_mode = 0x00;
4155 flags = hci_inquiry_cache_update(hdev, &data, false);
4157 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4158 info->dev_class, info->rssi,
4159 flags, NULL, 0, NULL, 0);
4162 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4164 for (; num_rsp; num_rsp--, info++) {
4167 bacpy(&data.bdaddr, &info->bdaddr);
4168 data.pscan_rep_mode = info->pscan_rep_mode;
4169 data.pscan_period_mode = info->pscan_period_mode;
4170 data.pscan_mode = 0x00;
4171 memcpy(data.dev_class, info->dev_class, 3);
4172 data.clock_offset = info->clock_offset;
4173 data.rssi = info->rssi;
4174 data.ssp_mode = 0x00;
4176 flags = hci_inquiry_cache_update(hdev, &data, false);
4178 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4179 info->dev_class, info->rssi,
4180 flags, NULL, 0, NULL, 0);
4184 hci_dev_unlock(hdev);
4187 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4188 struct sk_buff *skb)
4190 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4191 struct hci_conn *conn;
4193 BT_DBG("%s", hdev->name);
4197 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4201 if (ev->page < HCI_MAX_PAGES)
4202 memcpy(conn->features[ev->page], ev->features, 8);
4204 if (!ev->status && ev->page == 0x01) {
4205 struct inquiry_entry *ie;
4207 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4209 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4211 if (ev->features[0] & LMP_HOST_SSP) {
4212 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4214 /* It is mandatory by the Bluetooth specification that
4215 * Extended Inquiry Results are only used when Secure
4216 * Simple Pairing is enabled, but some devices violate
4219 * To make these devices work, the internal SSP
4220 * enabled flag needs to be cleared if the remote host
4221 * features do not indicate SSP support */
4222 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4225 if (ev->features[0] & LMP_HOST_SC)
4226 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4229 if (conn->state != BT_CONFIG)
4232 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4233 struct hci_cp_remote_name_req cp;
4234 memset(&cp, 0, sizeof(cp));
4235 bacpy(&cp.bdaddr, &conn->dst);
4236 cp.pscan_rep_mode = 0x02;
4237 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4238 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4239 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4241 if (!hci_outgoing_auth_needed(hdev, conn)) {
4242 conn->state = BT_CONNECTED;
4243 hci_connect_cfm(conn, ev->status);
4244 hci_conn_drop(conn);
4248 hci_dev_unlock(hdev);
4251 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4252 struct sk_buff *skb)
4254 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4255 struct hci_conn *conn;
4257 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4261 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4263 if (ev->link_type == ESCO_LINK)
4266 /* When the link type in the event indicates SCO connection
4267 * and lookup of the connection object fails, then check
4268 * if an eSCO connection object exists.
4270 * The core limits the synchronous connections to either
4271 * SCO or eSCO. The eSCO connection is preferred and tried
4272 * to be setup first and until successfully established,
4273 * the link type will be hinted as eSCO.
4275 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4280 switch (ev->status) {
4282 conn->handle = __le16_to_cpu(ev->handle);
4283 conn->state = BT_CONNECTED;
4284 conn->type = ev->link_type;
4286 hci_debugfs_create_conn(conn);
4287 hci_conn_add_sysfs(conn);
4290 case 0x10: /* Connection Accept Timeout */
4291 case 0x0d: /* Connection Rejected due to Limited Resources */
4292 case 0x11: /* Unsupported Feature or Parameter Value */
4293 case 0x1c: /* SCO interval rejected */
4294 case 0x1a: /* Unsupported Remote Feature */
4295 case 0x1f: /* Unspecified error */
4296 case 0x20: /* Unsupported LMP Parameter value */
4298 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4299 (hdev->esco_type & EDR_ESCO_MASK);
4300 if (hci_setup_sync(conn, conn->link->handle))
4306 conn->state = BT_CLOSED;
4310 hci_connect_cfm(conn, ev->status);
4315 hci_dev_unlock(hdev);
4318 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4322 while (parsed < eir_len) {
4323 u8 field_len = eir[0];
4328 parsed += field_len + 1;
4329 eir += field_len + 1;
4335 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4336 struct sk_buff *skb)
4338 struct inquiry_data data;
4339 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4340 int num_rsp = *((__u8 *) skb->data);
4343 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4348 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4353 for (; num_rsp; num_rsp--, info++) {
4357 bacpy(&data.bdaddr, &info->bdaddr);
4358 data.pscan_rep_mode = info->pscan_rep_mode;
4359 data.pscan_period_mode = info->pscan_period_mode;
4360 data.pscan_mode = 0x00;
4361 memcpy(data.dev_class, info->dev_class, 3);
4362 data.clock_offset = info->clock_offset;
4363 data.rssi = info->rssi;
4364 data.ssp_mode = 0x01;
4366 if (hci_dev_test_flag(hdev, HCI_MGMT))
4367 name_known = eir_get_data(info->data,
4369 EIR_NAME_COMPLETE, NULL);
4373 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4375 eir_len = eir_get_length(info->data, sizeof(info->data));
4377 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4378 info->dev_class, info->rssi,
4379 flags, info->data, eir_len, NULL, 0);
4382 hci_dev_unlock(hdev);
4385 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4386 struct sk_buff *skb)
4388 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4389 struct hci_conn *conn;
4391 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4392 __le16_to_cpu(ev->handle));
4396 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4400 /* For BR/EDR the necessary steps are taken through the
4401 * auth_complete event.
4403 if (conn->type != LE_LINK)
4407 conn->sec_level = conn->pending_sec_level;
4409 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4411 if (ev->status && conn->state == BT_CONNECTED) {
4412 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4413 hci_conn_drop(conn);
4417 if (conn->state == BT_CONFIG) {
4419 conn->state = BT_CONNECTED;
4421 hci_connect_cfm(conn, ev->status);
4422 hci_conn_drop(conn);
4424 hci_auth_cfm(conn, ev->status);
4426 hci_conn_hold(conn);
4427 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4428 hci_conn_drop(conn);
4432 hci_dev_unlock(hdev);
4435 static u8 hci_get_auth_req(struct hci_conn *conn)
4437 /* If remote requests no-bonding follow that lead */
4438 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4439 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4440 return conn->remote_auth | (conn->auth_type & 0x01);
4442 /* If both remote and local have enough IO capabilities, require
4445 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4446 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4447 return conn->remote_auth | 0x01;
4449 /* No MITM protection possible so ignore remote requirement */
4450 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4453 static u8 bredr_oob_data_present(struct hci_conn *conn)
4455 struct hci_dev *hdev = conn->hdev;
4456 struct oob_data *data;
4458 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4462 if (bredr_sc_enabled(hdev)) {
4463 /* When Secure Connections is enabled, then just
4464 * return the present value stored with the OOB
4465 * data. The stored value contains the right present
4466 * information. However it can only be trusted when
4467 * not in Secure Connection Only mode.
4469 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4470 return data->present;
4472 /* When Secure Connections Only mode is enabled, then
4473 * the P-256 values are required. If they are not
4474 * available, then do not declare that OOB data is
4477 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4478 !memcmp(data->hash256, ZERO_KEY, 16))
4484 /* When Secure Connections is not enabled or actually
4485 * not supported by the hardware, then check that if
4486 * P-192 data values are present.
4488 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4489 !memcmp(data->hash192, ZERO_KEY, 16))
4495 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4497 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4498 struct hci_conn *conn;
4500 BT_DBG("%s", hdev->name);
4504 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4508 hci_conn_hold(conn);
4510 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4513 /* Allow pairing if we're pairable, the initiators of the
4514 * pairing or if the remote is not requesting bonding.
4516 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4517 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4518 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4519 struct hci_cp_io_capability_reply cp;
4521 bacpy(&cp.bdaddr, &ev->bdaddr);
4522 /* Change the IO capability from KeyboardDisplay
4523 * to DisplayYesNo as it is not supported by BT spec. */
4524 cp.capability = (conn->io_capability == 0x04) ?
4525 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4527 /* If we are initiators, there is no remote information yet */
4528 if (conn->remote_auth == 0xff) {
4529 /* Request MITM protection if our IO caps allow it
4530 * except for the no-bonding case.
4532 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4533 conn->auth_type != HCI_AT_NO_BONDING)
4534 conn->auth_type |= 0x01;
4536 conn->auth_type = hci_get_auth_req(conn);
4539 /* If we're not bondable, force one of the non-bondable
4540 * authentication requirement values.
4542 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4543 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4545 cp.authentication = conn->auth_type;
4546 cp.oob_data = bredr_oob_data_present(conn);
4548 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4551 struct hci_cp_io_capability_neg_reply cp;
4553 bacpy(&cp.bdaddr, &ev->bdaddr);
4554 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4556 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4561 hci_dev_unlock(hdev);
4564 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4566 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4567 struct hci_conn *conn;
4569 BT_DBG("%s", hdev->name);
4573 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4577 conn->remote_cap = ev->capability;
4578 conn->remote_auth = ev->authentication;
4581 hci_dev_unlock(hdev);
4584 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4585 struct sk_buff *skb)
4587 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4588 int loc_mitm, rem_mitm, confirm_hint = 0;
4589 struct hci_conn *conn;
4591 BT_DBG("%s", hdev->name);
4595 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4598 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4602 loc_mitm = (conn->auth_type & 0x01);
4603 rem_mitm = (conn->remote_auth & 0x01);
4605 /* If we require MITM but the remote device can't provide that
4606 * (it has NoInputNoOutput) then reject the confirmation
4607 * request. We check the security level here since it doesn't
4608 * necessarily match conn->auth_type.
4610 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4611 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4612 BT_DBG("Rejecting request: remote device can't provide MITM");
4613 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4614 sizeof(ev->bdaddr), &ev->bdaddr);
4618 /* If no side requires MITM protection; auto-accept */
4619 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4620 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4622 /* If we're not the initiators request authorization to
4623 * proceed from user space (mgmt_user_confirm with
4624 * confirm_hint set to 1). The exception is if neither
4625 * side had MITM or if the local IO capability is
4626 * NoInputNoOutput, in which case we do auto-accept
4628 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4629 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4630 (loc_mitm || rem_mitm)) {
4631 BT_DBG("Confirming auto-accept as acceptor");
4636 /* If there already exists link key in local host, leave the
4637 * decision to user space since the remote device could be
4638 * legitimate or malicious.
4640 if (hci_find_link_key(hdev, &ev->bdaddr)) {
4641 bt_dev_dbg(hdev, "Local host already has link key");
4646 BT_DBG("Auto-accept of user confirmation with %ums delay",
4647 hdev->auto_accept_delay);
4649 if (hdev->auto_accept_delay > 0) {
4650 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4651 queue_delayed_work(conn->hdev->workqueue,
4652 &conn->auto_accept_work, delay);
4656 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4657 sizeof(ev->bdaddr), &ev->bdaddr);
4662 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4663 le32_to_cpu(ev->passkey), confirm_hint);
4666 hci_dev_unlock(hdev);
4669 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4670 struct sk_buff *skb)
4672 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4674 BT_DBG("%s", hdev->name);
4676 if (hci_dev_test_flag(hdev, HCI_MGMT))
4677 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4680 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4681 struct sk_buff *skb)
4683 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4684 struct hci_conn *conn;
4686 BT_DBG("%s", hdev->name);
4688 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4692 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4693 conn->passkey_entered = 0;
4695 if (hci_dev_test_flag(hdev, HCI_MGMT))
4696 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4697 conn->dst_type, conn->passkey_notify,
4698 conn->passkey_entered);
4701 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4703 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4704 struct hci_conn *conn;
4706 BT_DBG("%s", hdev->name);
4708 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4713 case HCI_KEYPRESS_STARTED:
4714 conn->passkey_entered = 0;
4717 case HCI_KEYPRESS_ENTERED:
4718 conn->passkey_entered++;
4721 case HCI_KEYPRESS_ERASED:
4722 conn->passkey_entered--;
4725 case HCI_KEYPRESS_CLEARED:
4726 conn->passkey_entered = 0;
4729 case HCI_KEYPRESS_COMPLETED:
4733 if (hci_dev_test_flag(hdev, HCI_MGMT))
4734 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4735 conn->dst_type, conn->passkey_notify,
4736 conn->passkey_entered);
4739 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4740 struct sk_buff *skb)
4742 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4743 struct hci_conn *conn;
4745 BT_DBG("%s", hdev->name);
4749 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4753 /* Reset the authentication requirement to unknown */
4754 conn->remote_auth = 0xff;
4756 /* To avoid duplicate auth_failed events to user space we check
4757 * the HCI_CONN_AUTH_PEND flag which will be set if we
4758 * initiated the authentication. A traditional auth_complete
4759 * event gets always produced as initiator and is also mapped to
4760 * the mgmt_auth_failed event */
4761 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4762 mgmt_auth_failed(conn, ev->status);
4764 hci_conn_drop(conn);
4767 hci_dev_unlock(hdev);
4770 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4771 struct sk_buff *skb)
4773 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4774 struct inquiry_entry *ie;
4775 struct hci_conn *conn;
4777 BT_DBG("%s", hdev->name);
4781 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4783 memcpy(conn->features[1], ev->features, 8);
4785 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4787 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4789 hci_dev_unlock(hdev);
4792 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4793 struct sk_buff *skb)
4795 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4796 struct oob_data *data;
4798 BT_DBG("%s", hdev->name);
4802 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4805 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4807 struct hci_cp_remote_oob_data_neg_reply cp;
4809 bacpy(&cp.bdaddr, &ev->bdaddr);
4810 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4815 if (bredr_sc_enabled(hdev)) {
4816 struct hci_cp_remote_oob_ext_data_reply cp;
4818 bacpy(&cp.bdaddr, &ev->bdaddr);
4819 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4820 memset(cp.hash192, 0, sizeof(cp.hash192));
4821 memset(cp.rand192, 0, sizeof(cp.rand192));
4823 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4824 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4826 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4827 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4829 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4832 struct hci_cp_remote_oob_data_reply cp;
4834 bacpy(&cp.bdaddr, &ev->bdaddr);
4835 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4836 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4838 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4843 hci_dev_unlock(hdev);
4846 #if IS_ENABLED(CONFIG_BT_HS)
4847 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4849 struct hci_ev_channel_selected *ev = (void *)skb->data;
4850 struct hci_conn *hcon;
4852 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4854 skb_pull(skb, sizeof(*ev));
4856 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4860 amp_read_loc_assoc_final_data(hdev, hcon);
4863 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4864 struct sk_buff *skb)
4866 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4867 struct hci_conn *hcon, *bredr_hcon;
4869 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4874 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4876 hci_dev_unlock(hdev);
4882 hci_dev_unlock(hdev);
4886 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4888 hcon->state = BT_CONNECTED;
4889 bacpy(&hcon->dst, &bredr_hcon->dst);
4891 hci_conn_hold(hcon);
4892 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4893 hci_conn_drop(hcon);
4895 hci_debugfs_create_conn(hcon);
4896 hci_conn_add_sysfs(hcon);
4898 amp_physical_cfm(bredr_hcon, hcon);
4900 hci_dev_unlock(hdev);
4903 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4905 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4906 struct hci_conn *hcon;
4907 struct hci_chan *hchan;
4908 struct amp_mgr *mgr;
4910 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4911 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4914 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4918 /* Create AMP hchan */
4919 hchan = hci_chan_create(hcon);
4923 hchan->handle = le16_to_cpu(ev->handle);
4925 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4927 mgr = hcon->amp_mgr;
4928 if (mgr && mgr->bredr_chan) {
4929 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4931 l2cap_chan_lock(bredr_chan);
4933 bredr_chan->conn->mtu = hdev->block_mtu;
4934 l2cap_logical_cfm(bredr_chan, hchan, 0);
4935 hci_conn_hold(hcon);
4937 l2cap_chan_unlock(bredr_chan);
4941 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4942 struct sk_buff *skb)
4944 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4945 struct hci_chan *hchan;
4947 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4948 le16_to_cpu(ev->handle), ev->status);
4955 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4959 amp_destroy_logical_link(hchan, ev->reason);
4962 hci_dev_unlock(hdev);
4965 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4966 struct sk_buff *skb)
4968 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4969 struct hci_conn *hcon;
4971 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4978 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4980 hcon->state = BT_CLOSED;
4984 hci_dev_unlock(hdev);
4988 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
4989 bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
4990 u16 interval, u16 latency, u16 supervision_timeout)
4992 struct hci_conn_params *params;
4993 struct hci_conn *conn;
4994 struct smp_irk *irk;
4999 /* All controllers implicitly stop advertising in the event of a
5000 * connection, so ensure that the state bit is cleared.
5002 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5004 conn = hci_lookup_le_connect(hdev);
5006 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5008 bt_dev_err(hdev, "no memory for new connection");
5012 conn->dst_type = bdaddr_type;
5014 /* If we didn't have a hci_conn object previously
5015 * but we're in master role this must be something
5016 * initiated using a white list. Since white list based
5017 * connections are not "first class citizens" we don't
5018 * have full tracking of them. Therefore, we go ahead
5019 * with a "best effort" approach of determining the
5020 * initiator address based on the HCI_PRIVACY flag.
5023 conn->resp_addr_type = bdaddr_type;
5024 bacpy(&conn->resp_addr, bdaddr);
5025 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5026 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5027 bacpy(&conn->init_addr, &hdev->rpa);
5029 hci_copy_identity_address(hdev,
5031 &conn->init_addr_type);
5035 cancel_delayed_work(&conn->le_conn_timeout);
5039 /* Set the responder (our side) address type based on
5040 * the advertising address type.
5042 conn->resp_addr_type = hdev->adv_addr_type;
5043 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5044 /* In case of ext adv, resp_addr will be updated in
5045 * Adv Terminated event.
5047 if (!ext_adv_capable(hdev))
5048 bacpy(&conn->resp_addr, &hdev->random_addr);
5050 bacpy(&conn->resp_addr, &hdev->bdaddr);
5053 conn->init_addr_type = bdaddr_type;
5054 bacpy(&conn->init_addr, bdaddr);
5056 /* For incoming connections, set the default minimum
5057 * and maximum connection interval. They will be used
5058 * to check if the parameters are in range and if not
5059 * trigger the connection update procedure.
5061 conn->le_conn_min_interval = hdev->le_conn_min_interval;
5062 conn->le_conn_max_interval = hdev->le_conn_max_interval;
5065 /* Lookup the identity address from the stored connection
5066 * address and address type.
5068 * When establishing connections to an identity address, the
5069 * connection procedure will store the resolvable random
5070 * address first. Now if it can be converted back into the
5071 * identity address, start using the identity address from
5074 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5076 bacpy(&conn->dst, &irk->bdaddr);
5077 conn->dst_type = irk->addr_type;
5081 hci_le_conn_failed(conn, status);
5085 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5086 addr_type = BDADDR_LE_PUBLIC;
5088 addr_type = BDADDR_LE_RANDOM;
5090 /* Drop the connection if the device is blocked */
5091 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
5092 hci_conn_drop(conn);
5096 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5097 mgmt_device_connected(hdev, conn, 0, NULL, 0);
5099 conn->sec_level = BT_SECURITY_LOW;
5100 conn->handle = handle;
5101 conn->state = BT_CONFIG;
5103 conn->le_conn_interval = interval;
5104 conn->le_conn_latency = latency;
5105 conn->le_supv_timeout = supervision_timeout;
5107 hci_debugfs_create_conn(conn);
5108 hci_conn_add_sysfs(conn);
5110 /* The remote features procedure is defined for master
5111 * role only. So only in case of an initiated connection
5112 * request the remote features.
5114 * If the local controller supports slave-initiated features
5115 * exchange, then requesting the remote features in slave
5116 * role is possible. Otherwise just transition into the
5117 * connected state without requesting the remote features.
5120 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
5121 struct hci_cp_le_read_remote_features cp;
5123 cp.handle = __cpu_to_le16(conn->handle);
5125 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5128 hci_conn_hold(conn);
5130 conn->state = BT_CONNECTED;
5131 hci_connect_cfm(conn, status);
5134 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5137 list_del_init(¶ms->action);
5139 hci_conn_drop(params->conn);
5140 hci_conn_put(params->conn);
5141 params->conn = NULL;
5146 hci_update_background_scan(hdev);
5147 hci_dev_unlock(hdev);
5150 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5152 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5154 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5156 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5157 ev->role, le16_to_cpu(ev->handle),
5158 le16_to_cpu(ev->interval),
5159 le16_to_cpu(ev->latency),
5160 le16_to_cpu(ev->supervision_timeout));
5163 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5164 struct sk_buff *skb)
5166 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5168 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5170 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5171 ev->role, le16_to_cpu(ev->handle),
5172 le16_to_cpu(ev->interval),
5173 le16_to_cpu(ev->latency),
5174 le16_to_cpu(ev->supervision_timeout));
5177 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5179 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5180 struct hci_conn *conn;
5182 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5187 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5189 struct adv_info *adv_instance;
5191 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
5194 if (!hdev->cur_adv_instance) {
5195 bacpy(&conn->resp_addr, &hdev->random_addr);
5199 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5201 bacpy(&conn->resp_addr, &adv_instance->random_addr);
5205 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5206 struct sk_buff *skb)
5208 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5209 struct hci_conn *conn;
5211 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5218 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5220 conn->le_conn_interval = le16_to_cpu(ev->interval);
5221 conn->le_conn_latency = le16_to_cpu(ev->latency);
5222 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5225 hci_dev_unlock(hdev);
5228 /* This function requires the caller holds hdev->lock */
5229 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5231 u8 addr_type, u8 adv_type,
5232 bdaddr_t *direct_rpa)
5234 struct hci_conn *conn;
5235 struct hci_conn_params *params;
5237 /* If the event is not connectable don't proceed further */
5238 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5241 /* Ignore if the device is blocked */
5242 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5245 /* Most controller will fail if we try to create new connections
5246 * while we have an existing one in slave role.
5248 if (hdev->conn_hash.le_num_slave > 0)
5251 /* If we're not connectable only connect devices that we have in
5252 * our pend_le_conns list.
5254 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5259 if (!params->explicit_connect) {
5260 switch (params->auto_connect) {
5261 case HCI_AUTO_CONN_DIRECT:
5262 /* Only devices advertising with ADV_DIRECT_IND are
5263 * triggering a connection attempt. This is allowing
5264 * incoming connections from slave devices.
5266 if (adv_type != LE_ADV_DIRECT_IND)
5269 case HCI_AUTO_CONN_ALWAYS:
5270 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5271 * are triggering a connection attempt. This means
5272 * that incoming connectioms from slave device are
5273 * accepted and also outgoing connections to slave
5274 * devices are established when found.
5282 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5283 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
5285 if (!IS_ERR(conn)) {
5286 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5287 * by higher layer that tried to connect, if no then
5288 * store the pointer since we don't really have any
5289 * other owner of the object besides the params that
5290 * triggered it. This way we can abort the connection if
5291 * the parameters get removed and keep the reference
5292 * count consistent once the connection is established.
5295 if (!params->explicit_connect)
5296 params->conn = hci_conn_get(conn);
5301 switch (PTR_ERR(conn)) {
5303 /* If hci_connect() returns -EBUSY it means there is already
5304 * an LE connection attempt going on. Since controllers don't
5305 * support more than one connection attempt at the time, we
5306 * don't consider this an error case.
5310 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5317 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5318 u8 bdaddr_type, bdaddr_t *direct_addr,
5319 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
5321 struct discovery_state *d = &hdev->discovery;
5322 struct smp_irk *irk;
5323 struct hci_conn *conn;
5330 case LE_ADV_DIRECT_IND:
5331 case LE_ADV_SCAN_IND:
5332 case LE_ADV_NONCONN_IND:
5333 case LE_ADV_SCAN_RSP:
5336 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5337 "type: 0x%02x", type);
5341 /* Find the end of the data in case the report contains padded zero
5342 * bytes at the end causing an invalid length value.
5344 * When data is NULL, len is 0 so there is no need for extra ptr
5345 * check as 'ptr < data + 0' is already false in such case.
5347 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5348 if (ptr + 1 + *ptr > data + len)
5352 real_len = ptr - data;
5354 /* Adjust for actual length */
5355 if (len != real_len) {
5356 bt_dev_err_ratelimited(hdev, "advertising data len corrected");
5360 /* If the direct address is present, then this report is from
5361 * a LE Direct Advertising Report event. In that case it is
5362 * important to see if the address is matching the local
5363 * controller address.
5366 /* Only resolvable random addresses are valid for these
5367 * kind of reports and others can be ignored.
5369 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5372 /* If the controller is not using resolvable random
5373 * addresses, then this report can be ignored.
5375 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5378 /* If the local IRK of the controller does not match
5379 * with the resolvable random address provided, then
5380 * this report can be ignored.
5382 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5386 /* Check if we need to convert to identity address */
5387 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5389 bdaddr = &irk->bdaddr;
5390 bdaddr_type = irk->addr_type;
5393 /* Check if we have been requested to connect to this device.
5395 * direct_addr is set only for directed advertising reports (it is NULL
5396 * for advertising reports) and is already verified to be RPA above.
5398 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5400 if (conn && type == LE_ADV_IND) {
5401 /* Store report for later inclusion by
5402 * mgmt_device_connected
5404 memcpy(conn->le_adv_data, data, len);
5405 conn->le_adv_data_len = len;
5408 /* Passive scanning shouldn't trigger any device found events,
5409 * except for devices marked as CONN_REPORT for which we do send
5410 * device found events.
5412 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5413 if (type == LE_ADV_DIRECT_IND)
5416 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5417 bdaddr, bdaddr_type))
5420 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5421 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5424 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5425 rssi, flags, data, len, NULL, 0);
5429 /* When receiving non-connectable or scannable undirected
5430 * advertising reports, this means that the remote device is
5431 * not connectable and then clearly indicate this in the
5432 * device found event.
5434 * When receiving a scan response, then there is no way to
5435 * know if the remote device is connectable or not. However
5436 * since scan responses are merged with a previously seen
5437 * advertising report, the flags field from that report
5440 * In the really unlikely case that a controller get confused
5441 * and just sends a scan response event, then it is marked as
5442 * not connectable as well.
5444 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5445 type == LE_ADV_SCAN_RSP)
5446 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5450 /* If there's nothing pending either store the data from this
5451 * event or send an immediate device found event if the data
5452 * should not be stored for later.
5454 if (!has_pending_adv_report(hdev)) {
5455 /* If the report will trigger a SCAN_REQ store it for
5458 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5459 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5460 rssi, flags, data, len);
5464 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5465 rssi, flags, data, len, NULL, 0);
5469 /* Check if the pending report is for the same device as the new one */
5470 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5471 bdaddr_type == d->last_adv_addr_type);
5473 /* If the pending data doesn't match this report or this isn't a
5474 * scan response (e.g. we got a duplicate ADV_IND) then force
5475 * sending of the pending data.
5477 if (type != LE_ADV_SCAN_RSP || !match) {
5478 /* Send out whatever is in the cache, but skip duplicates */
5480 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5481 d->last_adv_addr_type, NULL,
5482 d->last_adv_rssi, d->last_adv_flags,
5484 d->last_adv_data_len, NULL, 0);
5486 /* If the new report will trigger a SCAN_REQ store it for
5489 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5490 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5491 rssi, flags, data, len);
5495 /* The advertising reports cannot be merged, so clear
5496 * the pending report and send out a device found event.
5498 clear_pending_adv_report(hdev);
5499 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5500 rssi, flags, data, len, NULL, 0);
5504 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5505 * the new event is a SCAN_RSP. We can therefore proceed with
5506 * sending a merged device found event.
5508 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5509 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5510 d->last_adv_data, d->last_adv_data_len, data, len);
5511 clear_pending_adv_report(hdev);
5514 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5516 u8 num_reports = skb->data[0];
5517 void *ptr = &skb->data[1];
5521 while (num_reports--) {
5522 struct hci_ev_le_advertising_info *ev = ptr;
5525 if (ev->length <= HCI_MAX_AD_LENGTH) {
5526 rssi = ev->data[ev->length];
5527 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5528 ev->bdaddr_type, NULL, 0, rssi,
5529 ev->data, ev->length);
5531 bt_dev_err(hdev, "Dropping invalid advertising data");
5534 ptr += sizeof(*ev) + ev->length + 1;
5537 hci_dev_unlock(hdev);
5540 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5542 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5544 case LE_LEGACY_ADV_IND:
5546 case LE_LEGACY_ADV_DIRECT_IND:
5547 return LE_ADV_DIRECT_IND;
5548 case LE_LEGACY_ADV_SCAN_IND:
5549 return LE_ADV_SCAN_IND;
5550 case LE_LEGACY_NONCONN_IND:
5551 return LE_ADV_NONCONN_IND;
5552 case LE_LEGACY_SCAN_RSP_ADV:
5553 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5554 return LE_ADV_SCAN_RSP;
5560 if (evt_type & LE_EXT_ADV_CONN_IND) {
5561 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5562 return LE_ADV_DIRECT_IND;
5567 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5568 return LE_ADV_SCAN_RSP;
5570 if (evt_type & LE_EXT_ADV_SCAN_IND)
5571 return LE_ADV_SCAN_IND;
5573 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5574 evt_type & LE_EXT_ADV_DIRECT_IND)
5575 return LE_ADV_NONCONN_IND;
5578 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5581 return LE_ADV_INVALID;
5584 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5586 u8 num_reports = skb->data[0];
5587 void *ptr = &skb->data[1];
5591 while (num_reports--) {
5592 struct hci_ev_le_ext_adv_report *ev = ptr;
5596 evt_type = __le16_to_cpu(ev->evt_type);
5597 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5598 if (legacy_evt_type != LE_ADV_INVALID) {
5599 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5600 ev->bdaddr_type, NULL, 0, ev->rssi,
5601 ev->data, ev->length);
5604 ptr += sizeof(*ev) + ev->length;
5607 hci_dev_unlock(hdev);
5610 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5611 struct sk_buff *skb)
5613 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5614 struct hci_conn *conn;
5616 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5620 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5623 memcpy(conn->features[0], ev->features, 8);
5625 if (conn->state == BT_CONFIG) {
5628 /* If the local controller supports slave-initiated
5629 * features exchange, but the remote controller does
5630 * not, then it is possible that the error code 0x1a
5631 * for unsupported remote feature gets returned.
5633 * In this specific case, allow the connection to
5634 * transition into connected state and mark it as
5637 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5638 !conn->out && ev->status == 0x1a)
5641 status = ev->status;
5643 conn->state = BT_CONNECTED;
5644 hci_connect_cfm(conn, status);
5645 hci_conn_drop(conn);
5649 hci_dev_unlock(hdev);
5652 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5654 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5655 struct hci_cp_le_ltk_reply cp;
5656 struct hci_cp_le_ltk_neg_reply neg;
5657 struct hci_conn *conn;
5658 struct smp_ltk *ltk;
5660 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5664 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5668 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5672 if (smp_ltk_is_sc(ltk)) {
5673 /* With SC both EDiv and Rand are set to zero */
5674 if (ev->ediv || ev->rand)
5677 /* For non-SC keys check that EDiv and Rand match */
5678 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5682 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5683 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5684 cp.handle = cpu_to_le16(conn->handle);
5686 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5688 conn->enc_key_size = ltk->enc_size;
5690 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5692 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5693 * temporary key used to encrypt a connection following
5694 * pairing. It is used during the Encrypted Session Setup to
5695 * distribute the keys. Later, security can be re-established
5696 * using a distributed LTK.
5698 if (ltk->type == SMP_STK) {
5699 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5700 list_del_rcu(<k->list);
5701 kfree_rcu(ltk, rcu);
5703 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5706 hci_dev_unlock(hdev);
5711 neg.handle = ev->handle;
5712 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5713 hci_dev_unlock(hdev);
5716 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5719 struct hci_cp_le_conn_param_req_neg_reply cp;
5721 cp.handle = cpu_to_le16(handle);
5724 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5728 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5729 struct sk_buff *skb)
5731 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5732 struct hci_cp_le_conn_param_req_reply cp;
5733 struct hci_conn *hcon;
5734 u16 handle, min, max, latency, timeout;
5736 handle = le16_to_cpu(ev->handle);
5737 min = le16_to_cpu(ev->interval_min);
5738 max = le16_to_cpu(ev->interval_max);
5739 latency = le16_to_cpu(ev->latency);
5740 timeout = le16_to_cpu(ev->timeout);
5742 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5743 if (!hcon || hcon->state != BT_CONNECTED)
5744 return send_conn_param_neg_reply(hdev, handle,
5745 HCI_ERROR_UNKNOWN_CONN_ID);
5747 if (hci_check_conn_params(min, max, latency, timeout))
5748 return send_conn_param_neg_reply(hdev, handle,
5749 HCI_ERROR_INVALID_LL_PARAMS);
5751 if (hcon->role == HCI_ROLE_MASTER) {
5752 struct hci_conn_params *params;
5757 params = hci_conn_params_lookup(hdev, &hcon->dst,
5760 params->conn_min_interval = min;
5761 params->conn_max_interval = max;
5762 params->conn_latency = latency;
5763 params->supervision_timeout = timeout;
5769 hci_dev_unlock(hdev);
5771 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5772 store_hint, min, max, latency, timeout);
5775 cp.handle = ev->handle;
5776 cp.interval_min = ev->interval_min;
5777 cp.interval_max = ev->interval_max;
5778 cp.latency = ev->latency;
5779 cp.timeout = ev->timeout;
5783 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5786 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5787 struct sk_buff *skb)
5789 u8 num_reports = skb->data[0];
5790 void *ptr = &skb->data[1];
5794 while (num_reports--) {
5795 struct hci_ev_le_direct_adv_info *ev = ptr;
5797 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5798 ev->bdaddr_type, &ev->direct_addr,
5799 ev->direct_addr_type, ev->rssi, NULL, 0);
5804 hci_dev_unlock(hdev);
5807 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
5809 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
5810 struct hci_conn *conn;
5812 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5819 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5823 conn->le_tx_phy = ev->tx_phy;
5824 conn->le_rx_phy = ev->rx_phy;
5827 hci_dev_unlock(hdev);
5830 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5832 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5834 skb_pull(skb, sizeof(*le_ev));
5836 switch (le_ev->subevent) {
5837 case HCI_EV_LE_CONN_COMPLETE:
5838 hci_le_conn_complete_evt(hdev, skb);
5841 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5842 hci_le_conn_update_complete_evt(hdev, skb);
5845 case HCI_EV_LE_ADVERTISING_REPORT:
5846 hci_le_adv_report_evt(hdev, skb);
5849 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5850 hci_le_remote_feat_complete_evt(hdev, skb);
5853 case HCI_EV_LE_LTK_REQ:
5854 hci_le_ltk_request_evt(hdev, skb);
5857 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5858 hci_le_remote_conn_param_req_evt(hdev, skb);
5861 case HCI_EV_LE_DIRECT_ADV_REPORT:
5862 hci_le_direct_adv_report_evt(hdev, skb);
5865 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
5866 hci_le_phy_update_evt(hdev, skb);
5869 case HCI_EV_LE_EXT_ADV_REPORT:
5870 hci_le_ext_adv_report_evt(hdev, skb);
5873 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
5874 hci_le_enh_conn_complete_evt(hdev, skb);
5877 case HCI_EV_LE_EXT_ADV_SET_TERM:
5878 hci_le_ext_adv_term_evt(hdev, skb);
5886 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5887 u8 event, struct sk_buff *skb)
5889 struct hci_ev_cmd_complete *ev;
5890 struct hci_event_hdr *hdr;
5895 if (skb->len < sizeof(*hdr)) {
5896 bt_dev_err(hdev, "too short HCI event");
5900 hdr = (void *) skb->data;
5901 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5904 if (hdr->evt != event)
5909 /* Check if request ended in Command Status - no way to retreive
5910 * any extra parameters in this case.
5912 if (hdr->evt == HCI_EV_CMD_STATUS)
5915 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5916 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5921 if (skb->len < sizeof(*ev)) {
5922 bt_dev_err(hdev, "too short cmd_complete event");
5926 ev = (void *) skb->data;
5927 skb_pull(skb, sizeof(*ev));
5929 if (opcode != __le16_to_cpu(ev->opcode)) {
5930 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5931 __le16_to_cpu(ev->opcode));
5938 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5940 struct hci_event_hdr *hdr = (void *) skb->data;
5941 hci_req_complete_t req_complete = NULL;
5942 hci_req_complete_skb_t req_complete_skb = NULL;
5943 struct sk_buff *orig_skb = NULL;
5944 u8 status = 0, event = hdr->evt, req_evt = 0;
5945 u16 opcode = HCI_OP_NOP;
5948 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
5952 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5953 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5954 opcode = __le16_to_cpu(cmd_hdr->opcode);
5955 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5960 /* If it looks like we might end up having to call
5961 * req_complete_skb, store a pristine copy of the skb since the
5962 * various handlers may modify the original one through
5963 * skb_pull() calls, etc.
5965 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5966 event == HCI_EV_CMD_COMPLETE)
5967 orig_skb = skb_clone(skb, GFP_KERNEL);
5969 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5972 case HCI_EV_INQUIRY_COMPLETE:
5973 hci_inquiry_complete_evt(hdev, skb);
5976 case HCI_EV_INQUIRY_RESULT:
5977 hci_inquiry_result_evt(hdev, skb);
5980 case HCI_EV_CONN_COMPLETE:
5981 hci_conn_complete_evt(hdev, skb);
5984 case HCI_EV_CONN_REQUEST:
5985 hci_conn_request_evt(hdev, skb);
5988 case HCI_EV_DISCONN_COMPLETE:
5989 hci_disconn_complete_evt(hdev, skb);
5992 case HCI_EV_AUTH_COMPLETE:
5993 hci_auth_complete_evt(hdev, skb);
5996 case HCI_EV_REMOTE_NAME:
5997 hci_remote_name_evt(hdev, skb);
6000 case HCI_EV_ENCRYPT_CHANGE:
6001 hci_encrypt_change_evt(hdev, skb);
6004 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6005 hci_change_link_key_complete_evt(hdev, skb);
6008 case HCI_EV_REMOTE_FEATURES:
6009 hci_remote_features_evt(hdev, skb);
6012 case HCI_EV_CMD_COMPLETE:
6013 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6014 &req_complete, &req_complete_skb);
6017 case HCI_EV_CMD_STATUS:
6018 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6022 case HCI_EV_HARDWARE_ERROR:
6023 hci_hardware_error_evt(hdev, skb);
6026 case HCI_EV_ROLE_CHANGE:
6027 hci_role_change_evt(hdev, skb);
6030 case HCI_EV_NUM_COMP_PKTS:
6031 hci_num_comp_pkts_evt(hdev, skb);
6034 case HCI_EV_MODE_CHANGE:
6035 hci_mode_change_evt(hdev, skb);
6038 case HCI_EV_PIN_CODE_REQ:
6039 hci_pin_code_request_evt(hdev, skb);
6042 case HCI_EV_LINK_KEY_REQ:
6043 hci_link_key_request_evt(hdev, skb);
6046 case HCI_EV_LINK_KEY_NOTIFY:
6047 hci_link_key_notify_evt(hdev, skb);
6050 case HCI_EV_CLOCK_OFFSET:
6051 hci_clock_offset_evt(hdev, skb);
6054 case HCI_EV_PKT_TYPE_CHANGE:
6055 hci_pkt_type_change_evt(hdev, skb);
6058 case HCI_EV_PSCAN_REP_MODE:
6059 hci_pscan_rep_mode_evt(hdev, skb);
6062 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6063 hci_inquiry_result_with_rssi_evt(hdev, skb);
6066 case HCI_EV_REMOTE_EXT_FEATURES:
6067 hci_remote_ext_features_evt(hdev, skb);
6070 case HCI_EV_SYNC_CONN_COMPLETE:
6071 hci_sync_conn_complete_evt(hdev, skb);
6074 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6075 hci_extended_inquiry_result_evt(hdev, skb);
6078 case HCI_EV_KEY_REFRESH_COMPLETE:
6079 hci_key_refresh_complete_evt(hdev, skb);
6082 case HCI_EV_IO_CAPA_REQUEST:
6083 hci_io_capa_request_evt(hdev, skb);
6086 case HCI_EV_IO_CAPA_REPLY:
6087 hci_io_capa_reply_evt(hdev, skb);
6090 case HCI_EV_USER_CONFIRM_REQUEST:
6091 hci_user_confirm_request_evt(hdev, skb);
6094 case HCI_EV_USER_PASSKEY_REQUEST:
6095 hci_user_passkey_request_evt(hdev, skb);
6098 case HCI_EV_USER_PASSKEY_NOTIFY:
6099 hci_user_passkey_notify_evt(hdev, skb);
6102 case HCI_EV_KEYPRESS_NOTIFY:
6103 hci_keypress_notify_evt(hdev, skb);
6106 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6107 hci_simple_pair_complete_evt(hdev, skb);
6110 case HCI_EV_REMOTE_HOST_FEATURES:
6111 hci_remote_host_features_evt(hdev, skb);
6114 case HCI_EV_LE_META:
6115 hci_le_meta_evt(hdev, skb);
6118 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6119 hci_remote_oob_data_request_evt(hdev, skb);
6122 #if IS_ENABLED(CONFIG_BT_HS)
6123 case HCI_EV_CHANNEL_SELECTED:
6124 hci_chan_selected_evt(hdev, skb);
6127 case HCI_EV_PHY_LINK_COMPLETE:
6128 hci_phy_link_complete_evt(hdev, skb);
6131 case HCI_EV_LOGICAL_LINK_COMPLETE:
6132 hci_loglink_complete_evt(hdev, skb);
6135 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6136 hci_disconn_loglink_complete_evt(hdev, skb);
6139 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6140 hci_disconn_phylink_complete_evt(hdev, skb);
6144 case HCI_EV_NUM_COMP_BLOCKS:
6145 hci_num_comp_blocks_evt(hdev, skb);
6149 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6154 req_complete(hdev, status, opcode);
6155 } else if (req_complete_skb) {
6156 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6157 kfree_skb(orig_skb);
6160 req_complete_skb(hdev, status, opcode, orig_skb);
6164 kfree_skb(orig_skb);
6166 hdev->stat.evt_rx++;