2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
37 /* Handle HCI Event packets */
39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
41 __u8 status = *((__u8 *) skb->data);
43 BT_DBG("%s status 0x%2.2x", hdev->name, status);
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 wake_up_bit(&hdev->flags, HCI_INQUIRY);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
56 hci_conn_check_pending(hdev);
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
61 __u8 status = *((__u8 *) skb->data);
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73 __u8 status = *((__u8 *) skb->data);
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
82 hci_conn_check_pending(hdev);
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 BT_DBG("%s", hdev->name);
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
105 conn->role = rp->role;
107 hci_dev_unlock(hdev);
110 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
112 struct hci_rp_read_link_policy *rp = (void *) skb->data;
113 struct hci_conn *conn;
115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
124 conn->link_policy = __le16_to_cpu(rp->policy);
126 hci_dev_unlock(hdev);
129 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
131 struct hci_rp_write_link_policy *rp = (void *) skb->data;
132 struct hci_conn *conn;
135 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
140 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
146 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
148 conn->link_policy = get_unaligned_le16(sent + 2);
150 hci_dev_unlock(hdev);
153 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
156 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
158 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
163 hdev->link_policy = __le16_to_cpu(rp->policy);
166 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
169 __u8 status = *((__u8 *) skb->data);
172 BT_DBG("%s status 0x%2.2x", hdev->name, status);
177 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
181 hdev->link_policy = get_unaligned_le16(sent);
184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
186 __u8 status = *((__u8 *) skb->data);
188 BT_DBG("%s status 0x%2.2x", hdev->name, status);
190 clear_bit(HCI_RESET, &hdev->flags);
195 /* Reset all non-persistent flags */
196 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
198 hdev->discovery.state = DISCOVERY_STOPPED;
199 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
200 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
202 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
203 hdev->adv_data_len = 0;
205 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
206 hdev->scan_rsp_data_len = 0;
208 hdev->le_scan_type = LE_SCAN_PASSIVE;
210 hdev->ssp_debug_mode = 0;
212 hci_bdaddr_list_clear(&hdev->le_white_list);
215 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
217 __u8 status = *((__u8 *) skb->data);
220 BT_DBG("%s status 0x%2.2x", hdev->name, status);
222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
228 if (test_bit(HCI_MGMT, &hdev->dev_flags))
229 mgmt_set_local_name_complete(hdev, sent, status);
231 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
233 hci_dev_unlock(hdev);
236 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
238 struct hci_rp_read_local_name *rp = (void *) skb->data;
240 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
245 if (test_bit(HCI_SETUP, &hdev->dev_flags))
246 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
249 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
251 __u8 status = *((__u8 *) skb->data);
254 BT_DBG("%s status 0x%2.2x", hdev->name, status);
256 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
261 __u8 param = *((__u8 *) sent);
263 if (param == AUTH_ENABLED)
264 set_bit(HCI_AUTH, &hdev->flags);
266 clear_bit(HCI_AUTH, &hdev->flags);
269 if (test_bit(HCI_MGMT, &hdev->dev_flags))
270 mgmt_auth_enable_complete(hdev, status);
273 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
275 __u8 status = *((__u8 *) skb->data);
279 BT_DBG("%s status 0x%2.2x", hdev->name, status);
284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
288 param = *((__u8 *) sent);
291 set_bit(HCI_ENCRYPT, &hdev->flags);
293 clear_bit(HCI_ENCRYPT, &hdev->flags);
296 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
298 __u8 status = *((__u8 *) skb->data);
302 BT_DBG("%s status 0x%2.2x", hdev->name, status);
304 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
308 param = *((__u8 *) sent);
313 hdev->discov_timeout = 0;
317 if (param & SCAN_INQUIRY)
318 set_bit(HCI_ISCAN, &hdev->flags);
320 clear_bit(HCI_ISCAN, &hdev->flags);
322 if (param & SCAN_PAGE)
323 set_bit(HCI_PSCAN, &hdev->flags);
325 clear_bit(HCI_PSCAN, &hdev->flags);
328 hci_dev_unlock(hdev);
331 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
333 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
335 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
340 memcpy(hdev->dev_class, rp->dev_class, 3);
342 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
343 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
346 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
348 __u8 status = *((__u8 *) skb->data);
351 BT_DBG("%s status 0x%2.2x", hdev->name, status);
353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
360 memcpy(hdev->dev_class, sent, 3);
362 if (test_bit(HCI_MGMT, &hdev->dev_flags))
363 mgmt_set_class_of_dev_complete(hdev, sent, status);
365 hci_dev_unlock(hdev);
368 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
370 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
373 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
378 setting = __le16_to_cpu(rp->voice_setting);
380 if (hdev->voice_setting == setting)
383 hdev->voice_setting = setting;
385 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
388 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
391 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
394 __u8 status = *((__u8 *) skb->data);
398 BT_DBG("%s status 0x%2.2x", hdev->name, status);
403 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
407 setting = get_unaligned_le16(sent);
409 if (hdev->voice_setting == setting)
412 hdev->voice_setting = setting;
414 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
417 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
420 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
423 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
425 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
430 hdev->num_iac = rp->num_iac;
432 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
435 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
437 __u8 status = *((__u8 *) skb->data);
438 struct hci_cp_write_ssp_mode *sent;
440 BT_DBG("%s status 0x%2.2x", hdev->name, status);
442 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
448 hdev->features[1][0] |= LMP_HOST_SSP;
450 hdev->features[1][0] &= ~LMP_HOST_SSP;
453 if (test_bit(HCI_MGMT, &hdev->dev_flags))
454 mgmt_ssp_enable_complete(hdev, sent->mode, status);
457 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
459 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
463 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
465 u8 status = *((u8 *) skb->data);
466 struct hci_cp_write_sc_support *sent;
468 BT_DBG("%s status 0x%2.2x", hdev->name, status);
470 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
476 hdev->features[1][0] |= LMP_HOST_SC;
478 hdev->features[1][0] &= ~LMP_HOST_SC;
481 if (test_bit(HCI_MGMT, &hdev->dev_flags))
482 mgmt_sc_enable_complete(hdev, sent->support, status);
485 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
487 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
491 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
493 struct hci_rp_read_local_version *rp = (void *) skb->data;
495 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
500 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
501 hdev->hci_ver = rp->hci_ver;
502 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
503 hdev->lmp_ver = rp->lmp_ver;
504 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
505 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
509 static void hci_cc_read_local_commands(struct hci_dev *hdev,
512 struct hci_rp_read_local_commands *rp = (void *) skb->data;
514 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
519 if (test_bit(HCI_SETUP, &hdev->dev_flags))
520 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
523 static void hci_cc_read_local_features(struct hci_dev *hdev,
526 struct hci_rp_read_local_features *rp = (void *) skb->data;
528 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
533 memcpy(hdev->features, rp->features, 8);
535 /* Adjust default settings according to features
536 * supported by device. */
538 if (hdev->features[0][0] & LMP_3SLOT)
539 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
541 if (hdev->features[0][0] & LMP_5SLOT)
542 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
544 if (hdev->features[0][1] & LMP_HV2) {
545 hdev->pkt_type |= (HCI_HV2);
546 hdev->esco_type |= (ESCO_HV2);
549 if (hdev->features[0][1] & LMP_HV3) {
550 hdev->pkt_type |= (HCI_HV3);
551 hdev->esco_type |= (ESCO_HV3);
554 if (lmp_esco_capable(hdev))
555 hdev->esco_type |= (ESCO_EV3);
557 if (hdev->features[0][4] & LMP_EV4)
558 hdev->esco_type |= (ESCO_EV4);
560 if (hdev->features[0][4] & LMP_EV5)
561 hdev->esco_type |= (ESCO_EV5);
563 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
564 hdev->esco_type |= (ESCO_2EV3);
566 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
567 hdev->esco_type |= (ESCO_3EV3);
569 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
570 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
573 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
576 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
578 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
583 if (hdev->max_page < rp->max_page)
584 hdev->max_page = rp->max_page;
586 if (rp->page < HCI_MAX_PAGES)
587 memcpy(hdev->features[rp->page], rp->features, 8);
590 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
593 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
595 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
600 hdev->flow_ctl_mode = rp->mode;
603 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
605 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
607 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
612 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
613 hdev->sco_mtu = rp->sco_mtu;
614 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
615 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
617 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
622 hdev->acl_cnt = hdev->acl_pkts;
623 hdev->sco_cnt = hdev->sco_pkts;
625 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
626 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
629 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
631 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
633 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
638 if (test_bit(HCI_INIT, &hdev->flags))
639 bacpy(&hdev->bdaddr, &rp->bdaddr);
641 if (test_bit(HCI_SETUP, &hdev->dev_flags))
642 bacpy(&hdev->setup_addr, &rp->bdaddr);
645 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
648 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
655 if (test_bit(HCI_INIT, &hdev->flags)) {
656 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
657 hdev->page_scan_window = __le16_to_cpu(rp->window);
661 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
664 u8 status = *((u8 *) skb->data);
665 struct hci_cp_write_page_scan_activity *sent;
667 BT_DBG("%s status 0x%2.2x", hdev->name, status);
672 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
676 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
677 hdev->page_scan_window = __le16_to_cpu(sent->window);
680 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
683 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
685 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
690 if (test_bit(HCI_INIT, &hdev->flags))
691 hdev->page_scan_type = rp->type;
694 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
697 u8 status = *((u8 *) skb->data);
700 BT_DBG("%s status 0x%2.2x", hdev->name, status);
705 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
707 hdev->page_scan_type = *type;
710 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
713 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
715 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
720 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
721 hdev->block_len = __le16_to_cpu(rp->block_len);
722 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
724 hdev->block_cnt = hdev->num_blocks;
726 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
727 hdev->block_cnt, hdev->block_len);
730 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
732 struct hci_rp_read_clock *rp = (void *) skb->data;
733 struct hci_cp_read_clock *cp;
734 struct hci_conn *conn;
736 BT_DBG("%s", hdev->name);
738 if (skb->len < sizeof(*rp))
746 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
750 if (cp->which == 0x00) {
751 hdev->clock = le32_to_cpu(rp->clock);
755 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
757 conn->clock = le32_to_cpu(rp->clock);
758 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
762 hci_dev_unlock(hdev);
765 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
768 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
770 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
775 hdev->amp_status = rp->amp_status;
776 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
777 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
778 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
779 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
780 hdev->amp_type = rp->amp_type;
781 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
782 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
783 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
784 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
787 a2mp_send_getinfo_rsp(hdev);
790 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
793 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
794 struct amp_assoc *assoc = &hdev->loc_assoc;
795 size_t rem_len, frag_len;
797 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
802 frag_len = skb->len - sizeof(*rp);
803 rem_len = __le16_to_cpu(rp->rem_len);
805 if (rem_len > frag_len) {
806 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
808 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
809 assoc->offset += frag_len;
811 /* Read other fragments */
812 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
817 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
818 assoc->len = assoc->offset + rem_len;
822 /* Send A2MP Rsp when all fragments are received */
823 a2mp_send_getampassoc_rsp(hdev, rp->status);
824 a2mp_send_create_phy_link_req(hdev, rp->status);
827 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
830 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
832 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
837 hdev->inq_tx_power = rp->tx_power;
840 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
842 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
843 struct hci_cp_pin_code_reply *cp;
844 struct hci_conn *conn;
846 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
850 if (test_bit(HCI_MGMT, &hdev->dev_flags))
851 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
856 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
860 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
862 conn->pin_length = cp->pin_len;
865 hci_dev_unlock(hdev);
868 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
870 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
872 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
876 if (test_bit(HCI_MGMT, &hdev->dev_flags))
877 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
880 hci_dev_unlock(hdev);
883 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
886 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
888 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
893 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
894 hdev->le_pkts = rp->le_max_pkt;
896 hdev->le_cnt = hdev->le_pkts;
898 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
901 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
904 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
906 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
911 memcpy(hdev->le_features, rp->features, 8);
914 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
917 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
919 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
924 hdev->adv_tx_power = rp->tx_power;
927 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
929 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
931 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
935 if (test_bit(HCI_MGMT, &hdev->dev_flags))
936 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
939 hci_dev_unlock(hdev);
942 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
945 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
947 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
951 if (test_bit(HCI_MGMT, &hdev->dev_flags))
952 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
953 ACL_LINK, 0, rp->status);
955 hci_dev_unlock(hdev);
958 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
960 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
962 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
966 if (test_bit(HCI_MGMT, &hdev->dev_flags))
967 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
970 hci_dev_unlock(hdev);
973 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
976 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
978 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
982 if (test_bit(HCI_MGMT, &hdev->dev_flags))
983 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
984 ACL_LINK, 0, rp->status);
986 hci_dev_unlock(hdev);
989 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
992 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
994 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
997 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
999 hci_dev_unlock(hdev);
1002 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1003 struct sk_buff *skb)
1005 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1007 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1010 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
1011 rp->hash256, rp->rand256,
1013 hci_dev_unlock(hdev);
1017 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1019 __u8 status = *((__u8 *) skb->data);
1022 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1027 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1033 bacpy(&hdev->random_addr, sent);
1035 hci_dev_unlock(hdev);
1038 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1040 __u8 *sent, status = *((__u8 *) skb->data);
1042 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1047 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1053 /* If we're doing connection initiation as peripheral. Set a
1054 * timeout in case something goes wrong.
1057 struct hci_conn *conn;
1059 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1061 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1063 queue_delayed_work(hdev->workqueue,
1064 &conn->le_conn_timeout,
1065 conn->conn_timeout);
1067 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1070 hci_dev_unlock(hdev);
1073 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1075 struct hci_cp_le_set_scan_param *cp;
1076 __u8 status = *((__u8 *) skb->data);
1078 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1083 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1089 hdev->le_scan_type = cp->type;
1091 hci_dev_unlock(hdev);
1094 static bool has_pending_adv_report(struct hci_dev *hdev)
1096 struct discovery_state *d = &hdev->discovery;
1098 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1101 static void clear_pending_adv_report(struct hci_dev *hdev)
1103 struct discovery_state *d = &hdev->discovery;
1105 bacpy(&d->last_adv_addr, BDADDR_ANY);
1106 d->last_adv_data_len = 0;
1109 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1110 u8 bdaddr_type, s8 rssi, u32 flags,
1113 struct discovery_state *d = &hdev->discovery;
1115 bacpy(&d->last_adv_addr, bdaddr);
1116 d->last_adv_addr_type = bdaddr_type;
1117 d->last_adv_rssi = rssi;
1118 d->last_adv_flags = flags;
1119 memcpy(d->last_adv_data, data, len);
1120 d->last_adv_data_len = len;
1123 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1124 struct sk_buff *skb)
1126 struct hci_cp_le_set_scan_enable *cp;
1127 __u8 status = *((__u8 *) skb->data);
1129 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1134 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1138 switch (cp->enable) {
1139 case LE_SCAN_ENABLE:
1140 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1141 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1142 clear_pending_adv_report(hdev);
1145 case LE_SCAN_DISABLE:
1146 /* We do this here instead of when setting DISCOVERY_STOPPED
1147 * since the latter would potentially require waiting for
1148 * inquiry to stop too.
1150 if (has_pending_adv_report(hdev)) {
1151 struct discovery_state *d = &hdev->discovery;
1153 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1154 d->last_adv_addr_type, NULL,
1155 d->last_adv_rssi, d->last_adv_flags,
1157 d->last_adv_data_len, NULL, 0);
1160 /* Cancel this timer so that we don't try to disable scanning
1161 * when it's already disabled.
1163 cancel_delayed_work(&hdev->le_scan_disable);
1165 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1167 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1168 * interrupted scanning due to a connect request. Mark
1169 * therefore discovery as stopped. If this was not
1170 * because of a connect request advertising might have
1171 * been disabled because of active scanning, so
1172 * re-enable it again if necessary.
1174 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1176 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1177 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1178 hdev->discovery.state == DISCOVERY_FINDING)
1179 mgmt_reenable_advertising(hdev);
1184 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1189 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1190 struct sk_buff *skb)
1192 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1194 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1199 hdev->le_white_list_size = rp->size;
1202 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1203 struct sk_buff *skb)
1205 __u8 status = *((__u8 *) skb->data);
1207 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1212 hci_bdaddr_list_clear(&hdev->le_white_list);
1215 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1216 struct sk_buff *skb)
1218 struct hci_cp_le_add_to_white_list *sent;
1219 __u8 status = *((__u8 *) skb->data);
1221 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1226 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1230 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1234 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1235 struct sk_buff *skb)
1237 struct hci_cp_le_del_from_white_list *sent;
1238 __u8 status = *((__u8 *) skb->data);
1240 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1245 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1249 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1253 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1254 struct sk_buff *skb)
1256 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1258 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1263 memcpy(hdev->le_states, rp->le_states, 8);
1266 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1267 struct sk_buff *skb)
1269 struct hci_cp_write_le_host_supported *sent;
1270 __u8 status = *((__u8 *) skb->data);
1272 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1277 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1282 hdev->features[1][0] |= LMP_HOST_LE;
1283 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1285 hdev->features[1][0] &= ~LMP_HOST_LE;
1286 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1287 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1291 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1293 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1296 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1298 struct hci_cp_le_set_adv_param *cp;
1299 u8 status = *((u8 *) skb->data);
1301 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1306 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1311 hdev->adv_addr_type = cp->own_address_type;
1312 hci_dev_unlock(hdev);
1315 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1316 struct sk_buff *skb)
1318 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1320 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1321 hdev->name, rp->status, rp->phy_handle);
1326 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1329 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1331 struct hci_rp_read_rssi *rp = (void *) skb->data;
1332 struct hci_conn *conn;
1334 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1341 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1343 conn->rssi = rp->rssi;
1345 hci_dev_unlock(hdev);
1348 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1350 struct hci_cp_read_tx_power *sent;
1351 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1352 struct hci_conn *conn;
1354 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1359 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1365 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1369 switch (sent->type) {
1371 conn->tx_power = rp->tx_power;
1374 conn->max_tx_power = rp->tx_power;
1379 hci_dev_unlock(hdev);
1382 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1384 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1387 hci_conn_check_pending(hdev);
1391 set_bit(HCI_INQUIRY, &hdev->flags);
1394 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1396 struct hci_cp_create_conn *cp;
1397 struct hci_conn *conn;
1399 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1401 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1407 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1409 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1412 if (conn && conn->state == BT_CONNECT) {
1413 if (status != 0x0c || conn->attempt > 2) {
1414 conn->state = BT_CLOSED;
1415 hci_proto_connect_cfm(conn, status);
1418 conn->state = BT_CONNECT2;
1422 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1425 BT_ERR("No memory for new connection");
1429 hci_dev_unlock(hdev);
1432 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1434 struct hci_cp_add_sco *cp;
1435 struct hci_conn *acl, *sco;
1438 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1443 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1447 handle = __le16_to_cpu(cp->handle);
1449 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1453 acl = hci_conn_hash_lookup_handle(hdev, handle);
1457 sco->state = BT_CLOSED;
1459 hci_proto_connect_cfm(sco, status);
1464 hci_dev_unlock(hdev);
1467 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1469 struct hci_cp_auth_requested *cp;
1470 struct hci_conn *conn;
1472 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1477 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1483 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1485 if (conn->state == BT_CONFIG) {
1486 hci_proto_connect_cfm(conn, status);
1487 hci_conn_drop(conn);
1491 hci_dev_unlock(hdev);
1494 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1496 struct hci_cp_set_conn_encrypt *cp;
1497 struct hci_conn *conn;
1499 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1504 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1510 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1512 if (conn->state == BT_CONFIG) {
1513 hci_proto_connect_cfm(conn, status);
1514 hci_conn_drop(conn);
1518 hci_dev_unlock(hdev);
1521 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1522 struct hci_conn *conn)
1524 if (conn->state != BT_CONFIG || !conn->out)
1527 if (conn->pending_sec_level == BT_SECURITY_SDP)
1530 /* Only request authentication for SSP connections or non-SSP
1531 * devices with sec_level MEDIUM or HIGH or if MITM protection
1534 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1535 conn->pending_sec_level != BT_SECURITY_FIPS &&
1536 conn->pending_sec_level != BT_SECURITY_HIGH &&
1537 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1543 static int hci_resolve_name(struct hci_dev *hdev,
1544 struct inquiry_entry *e)
1546 struct hci_cp_remote_name_req cp;
1548 memset(&cp, 0, sizeof(cp));
1550 bacpy(&cp.bdaddr, &e->data.bdaddr);
1551 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1552 cp.pscan_mode = e->data.pscan_mode;
1553 cp.clock_offset = e->data.clock_offset;
1555 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1558 static bool hci_resolve_next_name(struct hci_dev *hdev)
1560 struct discovery_state *discov = &hdev->discovery;
1561 struct inquiry_entry *e;
1563 if (list_empty(&discov->resolve))
1566 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1570 if (hci_resolve_name(hdev, e) == 0) {
1571 e->name_state = NAME_PENDING;
1578 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1579 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1581 struct discovery_state *discov = &hdev->discovery;
1582 struct inquiry_entry *e;
1584 /* Update the mgmt connected state if necessary. Be careful with
1585 * conn objects that exist but are not (yet) connected however.
1586 * Only those in BT_CONFIG or BT_CONNECTED states can be
1587 * considered connected.
1590 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1591 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1592 mgmt_device_connected(hdev, conn, 0, name, name_len);
1594 if (discov->state == DISCOVERY_STOPPED)
1597 if (discov->state == DISCOVERY_STOPPING)
1598 goto discov_complete;
1600 if (discov->state != DISCOVERY_RESOLVING)
1603 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1604 /* If the device was not found in a list of found devices names of which
1605 * are pending. there is no need to continue resolving a next name as it
1606 * will be done upon receiving another Remote Name Request Complete
1613 e->name_state = NAME_KNOWN;
1614 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1615 e->data.rssi, name, name_len);
1617 e->name_state = NAME_NOT_KNOWN;
1620 if (hci_resolve_next_name(hdev))
1624 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1627 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1629 struct hci_cp_remote_name_req *cp;
1630 struct hci_conn *conn;
1632 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1634 /* If successful wait for the name req complete event before
1635 * checking for the need to do authentication */
1639 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1645 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1647 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1648 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1653 if (!hci_outgoing_auth_needed(hdev, conn))
1656 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1657 struct hci_cp_auth_requested auth_cp;
1659 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1661 auth_cp.handle = __cpu_to_le16(conn->handle);
1662 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1663 sizeof(auth_cp), &auth_cp);
1667 hci_dev_unlock(hdev);
1670 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1672 struct hci_cp_read_remote_features *cp;
1673 struct hci_conn *conn;
1675 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1680 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1686 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1688 if (conn->state == BT_CONFIG) {
1689 hci_proto_connect_cfm(conn, status);
1690 hci_conn_drop(conn);
1694 hci_dev_unlock(hdev);
1697 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1699 struct hci_cp_read_remote_ext_features *cp;
1700 struct hci_conn *conn;
1702 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1707 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1713 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1715 if (conn->state == BT_CONFIG) {
1716 hci_proto_connect_cfm(conn, status);
1717 hci_conn_drop(conn);
1721 hci_dev_unlock(hdev);
1724 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1726 struct hci_cp_setup_sync_conn *cp;
1727 struct hci_conn *acl, *sco;
1730 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1735 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1739 handle = __le16_to_cpu(cp->handle);
1741 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1745 acl = hci_conn_hash_lookup_handle(hdev, handle);
1749 sco->state = BT_CLOSED;
1751 hci_proto_connect_cfm(sco, status);
1756 hci_dev_unlock(hdev);
1759 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1761 struct hci_cp_sniff_mode *cp;
1762 struct hci_conn *conn;
1764 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1769 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1775 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1777 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1779 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1780 hci_sco_setup(conn, status);
1783 hci_dev_unlock(hdev);
1786 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1788 struct hci_cp_exit_sniff_mode *cp;
1789 struct hci_conn *conn;
1791 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1796 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1802 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1804 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1806 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1807 hci_sco_setup(conn, status);
1810 hci_dev_unlock(hdev);
1813 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1815 struct hci_cp_disconnect *cp;
1816 struct hci_conn *conn;
1821 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1827 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1829 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1830 conn->dst_type, status);
1832 hci_dev_unlock(hdev);
1835 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1837 struct hci_cp_create_phy_link *cp;
1839 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1841 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1848 struct hci_conn *hcon;
1850 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1854 amp_write_remote_assoc(hdev, cp->phy_handle);
1857 hci_dev_unlock(hdev);
1860 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1862 struct hci_cp_accept_phy_link *cp;
1864 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1869 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1873 amp_write_remote_assoc(hdev, cp->phy_handle);
1876 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1878 struct hci_cp_le_create_conn *cp;
1879 struct hci_conn *conn;
1881 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1883 /* All connection failure handling is taken care of by the
1884 * hci_le_conn_failed function which is triggered by the HCI
1885 * request completion callbacks used for connecting.
1890 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1896 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1900 /* Store the initiator and responder address information which
1901 * is needed for SMP. These values will not change during the
1902 * lifetime of the connection.
1904 conn->init_addr_type = cp->own_address_type;
1905 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1906 bacpy(&conn->init_addr, &hdev->random_addr);
1908 bacpy(&conn->init_addr, &hdev->bdaddr);
1910 conn->resp_addr_type = cp->peer_addr_type;
1911 bacpy(&conn->resp_addr, &cp->peer_addr);
1913 /* We don't want the connection attempt to stick around
1914 * indefinitely since LE doesn't have a page timeout concept
1915 * like BR/EDR. Set a timer for any connection that doesn't use
1916 * the white list for connecting.
1918 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1919 queue_delayed_work(conn->hdev->workqueue,
1920 &conn->le_conn_timeout,
1921 conn->conn_timeout);
1924 hci_dev_unlock(hdev);
1927 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1929 struct hci_cp_le_start_enc *cp;
1930 struct hci_conn *conn;
1932 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1939 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1943 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1947 if (conn->state != BT_CONNECTED)
1950 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1951 hci_conn_drop(conn);
1954 hci_dev_unlock(hdev);
1957 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
1959 struct hci_cp_switch_role *cp;
1960 struct hci_conn *conn;
1962 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1967 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
1973 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1975 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
1977 hci_dev_unlock(hdev);
1980 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1982 __u8 status = *((__u8 *) skb->data);
1983 struct discovery_state *discov = &hdev->discovery;
1984 struct inquiry_entry *e;
1986 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1988 hci_conn_check_pending(hdev);
1990 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1993 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1994 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1996 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2001 if (discov->state != DISCOVERY_FINDING)
2004 if (list_empty(&discov->resolve)) {
2005 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2009 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2010 if (e && hci_resolve_name(hdev, e) == 0) {
2011 e->name_state = NAME_PENDING;
2012 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2014 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2018 hci_dev_unlock(hdev);
2021 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2023 struct inquiry_data data;
2024 struct inquiry_info *info = (void *) (skb->data + 1);
2025 int num_rsp = *((__u8 *) skb->data);
2027 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2032 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2037 for (; num_rsp; num_rsp--, info++) {
2040 bacpy(&data.bdaddr, &info->bdaddr);
2041 data.pscan_rep_mode = info->pscan_rep_mode;
2042 data.pscan_period_mode = info->pscan_period_mode;
2043 data.pscan_mode = info->pscan_mode;
2044 memcpy(data.dev_class, info->dev_class, 3);
2045 data.clock_offset = info->clock_offset;
2047 data.ssp_mode = 0x00;
2049 flags = hci_inquiry_cache_update(hdev, &data, false);
2051 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2052 info->dev_class, 0, flags, NULL, 0, NULL, 0);
2055 hci_dev_unlock(hdev);
2058 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2060 struct hci_ev_conn_complete *ev = (void *) skb->data;
2061 struct hci_conn *conn;
2063 BT_DBG("%s", hdev->name);
2067 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2069 if (ev->link_type != SCO_LINK)
2072 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2076 conn->type = SCO_LINK;
2080 conn->handle = __le16_to_cpu(ev->handle);
2082 if (conn->type == ACL_LINK) {
2083 conn->state = BT_CONFIG;
2084 hci_conn_hold(conn);
2086 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2087 !hci_find_link_key(hdev, &ev->bdaddr))
2088 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2090 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2092 conn->state = BT_CONNECTED;
2094 hci_conn_add_sysfs(conn);
2096 if (test_bit(HCI_AUTH, &hdev->flags))
2097 set_bit(HCI_CONN_AUTH, &conn->flags);
2099 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2100 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2102 /* Get remote features */
2103 if (conn->type == ACL_LINK) {
2104 struct hci_cp_read_remote_features cp;
2105 cp.handle = ev->handle;
2106 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2109 hci_update_page_scan(hdev, NULL);
2112 /* Set packet type for incoming connection */
2113 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2114 struct hci_cp_change_conn_ptype cp;
2115 cp.handle = ev->handle;
2116 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2117 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2121 conn->state = BT_CLOSED;
2122 if (conn->type == ACL_LINK)
2123 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2124 conn->dst_type, ev->status);
2127 if (conn->type == ACL_LINK)
2128 hci_sco_setup(conn, ev->status);
2131 hci_proto_connect_cfm(conn, ev->status);
2133 } else if (ev->link_type != ACL_LINK)
2134 hci_proto_connect_cfm(conn, ev->status);
2137 hci_dev_unlock(hdev);
2139 hci_conn_check_pending(hdev);
2142 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2144 struct hci_cp_reject_conn_req cp;
2146 bacpy(&cp.bdaddr, bdaddr);
2147 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2148 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2151 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2153 struct hci_ev_conn_request *ev = (void *) skb->data;
2154 int mask = hdev->link_mode;
2155 struct inquiry_entry *ie;
2156 struct hci_conn *conn;
2159 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2162 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2165 if (!(mask & HCI_LM_ACCEPT)) {
2166 hci_reject_conn(hdev, &ev->bdaddr);
2170 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2172 hci_reject_conn(hdev, &ev->bdaddr);
2176 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2177 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2179 hci_reject_conn(hdev, &ev->bdaddr);
2183 /* Connection accepted */
2187 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2189 memcpy(ie->data.dev_class, ev->dev_class, 3);
2191 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2194 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2197 BT_ERR("No memory for new connection");
2198 hci_dev_unlock(hdev);
2203 memcpy(conn->dev_class, ev->dev_class, 3);
2205 hci_dev_unlock(hdev);
2207 if (ev->link_type == ACL_LINK ||
2208 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2209 struct hci_cp_accept_conn_req cp;
2210 conn->state = BT_CONNECT;
2212 bacpy(&cp.bdaddr, &ev->bdaddr);
2214 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2215 cp.role = 0x00; /* Become master */
2217 cp.role = 0x01; /* Remain slave */
2219 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2220 } else if (!(flags & HCI_PROTO_DEFER)) {
2221 struct hci_cp_accept_sync_conn_req cp;
2222 conn->state = BT_CONNECT;
2224 bacpy(&cp.bdaddr, &ev->bdaddr);
2225 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2227 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2228 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2229 cp.max_latency = cpu_to_le16(0xffff);
2230 cp.content_format = cpu_to_le16(hdev->voice_setting);
2231 cp.retrans_effort = 0xff;
2233 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2236 conn->state = BT_CONNECT2;
2237 hci_proto_connect_cfm(conn, 0);
2241 static u8 hci_to_mgmt_reason(u8 err)
2244 case HCI_ERROR_CONNECTION_TIMEOUT:
2245 return MGMT_DEV_DISCONN_TIMEOUT;
2246 case HCI_ERROR_REMOTE_USER_TERM:
2247 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2248 case HCI_ERROR_REMOTE_POWER_OFF:
2249 return MGMT_DEV_DISCONN_REMOTE;
2250 case HCI_ERROR_LOCAL_HOST_TERM:
2251 return MGMT_DEV_DISCONN_LOCAL_HOST;
2253 return MGMT_DEV_DISCONN_UNKNOWN;
2257 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2259 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2260 u8 reason = hci_to_mgmt_reason(ev->reason);
2261 struct hci_conn_params *params;
2262 struct hci_conn *conn;
2263 bool mgmt_connected;
2266 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2270 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2275 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2276 conn->dst_type, ev->status);
2280 conn->state = BT_CLOSED;
2282 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2283 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2284 reason, mgmt_connected);
2286 if (conn->type == ACL_LINK) {
2287 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2288 hci_remove_link_key(hdev, &conn->dst);
2290 hci_update_page_scan(hdev, NULL);
2293 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2295 switch (params->auto_connect) {
2296 case HCI_AUTO_CONN_LINK_LOSS:
2297 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2301 case HCI_AUTO_CONN_DIRECT:
2302 case HCI_AUTO_CONN_ALWAYS:
2303 list_del_init(¶ms->action);
2304 list_add(¶ms->action, &hdev->pend_le_conns);
2305 hci_update_background_scan(hdev);
2315 hci_proto_disconn_cfm(conn, ev->reason);
2318 /* Re-enable advertising if necessary, since it might
2319 * have been disabled by the connection. From the
2320 * HCI_LE_Set_Advertise_Enable command description in
2321 * the core specification (v4.0):
2322 * "The Controller shall continue advertising until the Host
2323 * issues an LE_Set_Advertise_Enable command with
2324 * Advertising_Enable set to 0x00 (Advertising is disabled)
2325 * or until a connection is created or until the Advertising
2326 * is timed out due to Directed Advertising."
2328 if (type == LE_LINK)
2329 mgmt_reenable_advertising(hdev);
2332 hci_dev_unlock(hdev);
2335 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2337 struct hci_ev_auth_complete *ev = (void *) skb->data;
2338 struct hci_conn *conn;
2340 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2344 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2349 if (!hci_conn_ssp_enabled(conn) &&
2350 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2351 BT_INFO("re-auth of legacy device is not possible.");
2353 set_bit(HCI_CONN_AUTH, &conn->flags);
2354 conn->sec_level = conn->pending_sec_level;
2357 mgmt_auth_failed(conn, ev->status);
2360 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2361 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2363 if (conn->state == BT_CONFIG) {
2364 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2365 struct hci_cp_set_conn_encrypt cp;
2366 cp.handle = ev->handle;
2368 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2371 conn->state = BT_CONNECTED;
2372 hci_proto_connect_cfm(conn, ev->status);
2373 hci_conn_drop(conn);
2376 hci_auth_cfm(conn, ev->status);
2378 hci_conn_hold(conn);
2379 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2380 hci_conn_drop(conn);
2383 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2385 struct hci_cp_set_conn_encrypt cp;
2386 cp.handle = ev->handle;
2388 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2391 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2392 hci_encrypt_cfm(conn, ev->status, 0x00);
2397 hci_dev_unlock(hdev);
2400 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2402 struct hci_ev_remote_name *ev = (void *) skb->data;
2403 struct hci_conn *conn;
2405 BT_DBG("%s", hdev->name);
2407 hci_conn_check_pending(hdev);
2411 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2413 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2416 if (ev->status == 0)
2417 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2418 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2420 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2426 if (!hci_outgoing_auth_needed(hdev, conn))
2429 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2430 struct hci_cp_auth_requested cp;
2432 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2434 cp.handle = __cpu_to_le16(conn->handle);
2435 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2439 hci_dev_unlock(hdev);
2442 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2444 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2445 struct hci_conn *conn;
2447 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2451 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2457 /* Encryption implies authentication */
2458 set_bit(HCI_CONN_AUTH, &conn->flags);
2459 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2460 conn->sec_level = conn->pending_sec_level;
2462 /* P-256 authentication key implies FIPS */
2463 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2464 set_bit(HCI_CONN_FIPS, &conn->flags);
2466 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2467 conn->type == LE_LINK)
2468 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2470 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2471 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2475 /* We should disregard the current RPA and generate a new one
2476 * whenever the encryption procedure fails.
2478 if (ev->status && conn->type == LE_LINK)
2479 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2481 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2483 if (ev->status && conn->state == BT_CONNECTED) {
2484 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2485 hci_conn_drop(conn);
2489 if (conn->state == BT_CONFIG) {
2491 conn->state = BT_CONNECTED;
2493 /* In Secure Connections Only mode, do not allow any
2494 * connections that are not encrypted with AES-CCM
2495 * using a P-256 authenticated combination key.
2497 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2498 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2499 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2500 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2501 hci_conn_drop(conn);
2505 hci_proto_connect_cfm(conn, ev->status);
2506 hci_conn_drop(conn);
2508 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2511 hci_dev_unlock(hdev);
2514 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2515 struct sk_buff *skb)
2517 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2518 struct hci_conn *conn;
2520 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2524 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2527 set_bit(HCI_CONN_SECURE, &conn->flags);
2529 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2531 hci_key_change_cfm(conn, ev->status);
2534 hci_dev_unlock(hdev);
2537 static void hci_remote_features_evt(struct hci_dev *hdev,
2538 struct sk_buff *skb)
2540 struct hci_ev_remote_features *ev = (void *) skb->data;
2541 struct hci_conn *conn;
2543 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2547 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2552 memcpy(conn->features[0], ev->features, 8);
2554 if (conn->state != BT_CONFIG)
2557 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2558 struct hci_cp_read_remote_ext_features cp;
2559 cp.handle = ev->handle;
2561 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2566 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2567 struct hci_cp_remote_name_req cp;
2568 memset(&cp, 0, sizeof(cp));
2569 bacpy(&cp.bdaddr, &conn->dst);
2570 cp.pscan_rep_mode = 0x02;
2571 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2572 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2573 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2575 if (!hci_outgoing_auth_needed(hdev, conn)) {
2576 conn->state = BT_CONNECTED;
2577 hci_proto_connect_cfm(conn, ev->status);
2578 hci_conn_drop(conn);
2582 hci_dev_unlock(hdev);
2585 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2587 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2588 u8 status = skb->data[sizeof(*ev)];
2591 skb_pull(skb, sizeof(*ev));
2593 opcode = __le16_to_cpu(ev->opcode);
2596 case HCI_OP_INQUIRY_CANCEL:
2597 hci_cc_inquiry_cancel(hdev, skb);
2600 case HCI_OP_PERIODIC_INQ:
2601 hci_cc_periodic_inq(hdev, skb);
2604 case HCI_OP_EXIT_PERIODIC_INQ:
2605 hci_cc_exit_periodic_inq(hdev, skb);
2608 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2609 hci_cc_remote_name_req_cancel(hdev, skb);
2612 case HCI_OP_ROLE_DISCOVERY:
2613 hci_cc_role_discovery(hdev, skb);
2616 case HCI_OP_READ_LINK_POLICY:
2617 hci_cc_read_link_policy(hdev, skb);
2620 case HCI_OP_WRITE_LINK_POLICY:
2621 hci_cc_write_link_policy(hdev, skb);
2624 case HCI_OP_READ_DEF_LINK_POLICY:
2625 hci_cc_read_def_link_policy(hdev, skb);
2628 case HCI_OP_WRITE_DEF_LINK_POLICY:
2629 hci_cc_write_def_link_policy(hdev, skb);
2633 hci_cc_reset(hdev, skb);
2636 case HCI_OP_WRITE_LOCAL_NAME:
2637 hci_cc_write_local_name(hdev, skb);
2640 case HCI_OP_READ_LOCAL_NAME:
2641 hci_cc_read_local_name(hdev, skb);
2644 case HCI_OP_WRITE_AUTH_ENABLE:
2645 hci_cc_write_auth_enable(hdev, skb);
2648 case HCI_OP_WRITE_ENCRYPT_MODE:
2649 hci_cc_write_encrypt_mode(hdev, skb);
2652 case HCI_OP_WRITE_SCAN_ENABLE:
2653 hci_cc_write_scan_enable(hdev, skb);
2656 case HCI_OP_READ_CLASS_OF_DEV:
2657 hci_cc_read_class_of_dev(hdev, skb);
2660 case HCI_OP_WRITE_CLASS_OF_DEV:
2661 hci_cc_write_class_of_dev(hdev, skb);
2664 case HCI_OP_READ_VOICE_SETTING:
2665 hci_cc_read_voice_setting(hdev, skb);
2668 case HCI_OP_WRITE_VOICE_SETTING:
2669 hci_cc_write_voice_setting(hdev, skb);
2672 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2673 hci_cc_read_num_supported_iac(hdev, skb);
2676 case HCI_OP_WRITE_SSP_MODE:
2677 hci_cc_write_ssp_mode(hdev, skb);
2680 case HCI_OP_WRITE_SC_SUPPORT:
2681 hci_cc_write_sc_support(hdev, skb);
2684 case HCI_OP_READ_LOCAL_VERSION:
2685 hci_cc_read_local_version(hdev, skb);
2688 case HCI_OP_READ_LOCAL_COMMANDS:
2689 hci_cc_read_local_commands(hdev, skb);
2692 case HCI_OP_READ_LOCAL_FEATURES:
2693 hci_cc_read_local_features(hdev, skb);
2696 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2697 hci_cc_read_local_ext_features(hdev, skb);
2700 case HCI_OP_READ_BUFFER_SIZE:
2701 hci_cc_read_buffer_size(hdev, skb);
2704 case HCI_OP_READ_BD_ADDR:
2705 hci_cc_read_bd_addr(hdev, skb);
2708 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2709 hci_cc_read_page_scan_activity(hdev, skb);
2712 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2713 hci_cc_write_page_scan_activity(hdev, skb);
2716 case HCI_OP_READ_PAGE_SCAN_TYPE:
2717 hci_cc_read_page_scan_type(hdev, skb);
2720 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2721 hci_cc_write_page_scan_type(hdev, skb);
2724 case HCI_OP_READ_DATA_BLOCK_SIZE:
2725 hci_cc_read_data_block_size(hdev, skb);
2728 case HCI_OP_READ_FLOW_CONTROL_MODE:
2729 hci_cc_read_flow_control_mode(hdev, skb);
2732 case HCI_OP_READ_LOCAL_AMP_INFO:
2733 hci_cc_read_local_amp_info(hdev, skb);
2736 case HCI_OP_READ_CLOCK:
2737 hci_cc_read_clock(hdev, skb);
2740 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2741 hci_cc_read_local_amp_assoc(hdev, skb);
2744 case HCI_OP_READ_INQ_RSP_TX_POWER:
2745 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2748 case HCI_OP_PIN_CODE_REPLY:
2749 hci_cc_pin_code_reply(hdev, skb);
2752 case HCI_OP_PIN_CODE_NEG_REPLY:
2753 hci_cc_pin_code_neg_reply(hdev, skb);
2756 case HCI_OP_READ_LOCAL_OOB_DATA:
2757 hci_cc_read_local_oob_data(hdev, skb);
2760 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2761 hci_cc_read_local_oob_ext_data(hdev, skb);
2764 case HCI_OP_LE_READ_BUFFER_SIZE:
2765 hci_cc_le_read_buffer_size(hdev, skb);
2768 case HCI_OP_LE_READ_LOCAL_FEATURES:
2769 hci_cc_le_read_local_features(hdev, skb);
2772 case HCI_OP_LE_READ_ADV_TX_POWER:
2773 hci_cc_le_read_adv_tx_power(hdev, skb);
2776 case HCI_OP_USER_CONFIRM_REPLY:
2777 hci_cc_user_confirm_reply(hdev, skb);
2780 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2781 hci_cc_user_confirm_neg_reply(hdev, skb);
2784 case HCI_OP_USER_PASSKEY_REPLY:
2785 hci_cc_user_passkey_reply(hdev, skb);
2788 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2789 hci_cc_user_passkey_neg_reply(hdev, skb);
2792 case HCI_OP_LE_SET_RANDOM_ADDR:
2793 hci_cc_le_set_random_addr(hdev, skb);
2796 case HCI_OP_LE_SET_ADV_ENABLE:
2797 hci_cc_le_set_adv_enable(hdev, skb);
2800 case HCI_OP_LE_SET_SCAN_PARAM:
2801 hci_cc_le_set_scan_param(hdev, skb);
2804 case HCI_OP_LE_SET_SCAN_ENABLE:
2805 hci_cc_le_set_scan_enable(hdev, skb);
2808 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2809 hci_cc_le_read_white_list_size(hdev, skb);
2812 case HCI_OP_LE_CLEAR_WHITE_LIST:
2813 hci_cc_le_clear_white_list(hdev, skb);
2816 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2817 hci_cc_le_add_to_white_list(hdev, skb);
2820 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2821 hci_cc_le_del_from_white_list(hdev, skb);
2824 case HCI_OP_LE_READ_SUPPORTED_STATES:
2825 hci_cc_le_read_supported_states(hdev, skb);
2828 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2829 hci_cc_write_le_host_supported(hdev, skb);
2832 case HCI_OP_LE_SET_ADV_PARAM:
2833 hci_cc_set_adv_param(hdev, skb);
2836 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2837 hci_cc_write_remote_amp_assoc(hdev, skb);
2840 case HCI_OP_READ_RSSI:
2841 hci_cc_read_rssi(hdev, skb);
2844 case HCI_OP_READ_TX_POWER:
2845 hci_cc_read_tx_power(hdev, skb);
2849 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2853 if (opcode != HCI_OP_NOP)
2854 cancel_delayed_work(&hdev->cmd_timer);
2856 hci_req_cmd_complete(hdev, opcode, status);
2858 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2859 atomic_set(&hdev->cmd_cnt, 1);
2860 if (!skb_queue_empty(&hdev->cmd_q))
2861 queue_work(hdev->workqueue, &hdev->cmd_work);
2865 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2867 struct hci_ev_cmd_status *ev = (void *) skb->data;
2870 skb_pull(skb, sizeof(*ev));
2872 opcode = __le16_to_cpu(ev->opcode);
2875 case HCI_OP_INQUIRY:
2876 hci_cs_inquiry(hdev, ev->status);
2879 case HCI_OP_CREATE_CONN:
2880 hci_cs_create_conn(hdev, ev->status);
2883 case HCI_OP_DISCONNECT:
2884 hci_cs_disconnect(hdev, ev->status);
2887 case HCI_OP_ADD_SCO:
2888 hci_cs_add_sco(hdev, ev->status);
2891 case HCI_OP_AUTH_REQUESTED:
2892 hci_cs_auth_requested(hdev, ev->status);
2895 case HCI_OP_SET_CONN_ENCRYPT:
2896 hci_cs_set_conn_encrypt(hdev, ev->status);
2899 case HCI_OP_REMOTE_NAME_REQ:
2900 hci_cs_remote_name_req(hdev, ev->status);
2903 case HCI_OP_READ_REMOTE_FEATURES:
2904 hci_cs_read_remote_features(hdev, ev->status);
2907 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2908 hci_cs_read_remote_ext_features(hdev, ev->status);
2911 case HCI_OP_SETUP_SYNC_CONN:
2912 hci_cs_setup_sync_conn(hdev, ev->status);
2915 case HCI_OP_CREATE_PHY_LINK:
2916 hci_cs_create_phylink(hdev, ev->status);
2919 case HCI_OP_ACCEPT_PHY_LINK:
2920 hci_cs_accept_phylink(hdev, ev->status);
2923 case HCI_OP_SNIFF_MODE:
2924 hci_cs_sniff_mode(hdev, ev->status);
2927 case HCI_OP_EXIT_SNIFF_MODE:
2928 hci_cs_exit_sniff_mode(hdev, ev->status);
2931 case HCI_OP_SWITCH_ROLE:
2932 hci_cs_switch_role(hdev, ev->status);
2935 case HCI_OP_LE_CREATE_CONN:
2936 hci_cs_le_create_conn(hdev, ev->status);
2939 case HCI_OP_LE_START_ENC:
2940 hci_cs_le_start_enc(hdev, ev->status);
2944 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2948 if (opcode != HCI_OP_NOP)
2949 cancel_delayed_work(&hdev->cmd_timer);
2952 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2953 hci_req_cmd_complete(hdev, opcode, ev->status);
2955 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2956 atomic_set(&hdev->cmd_cnt, 1);
2957 if (!skb_queue_empty(&hdev->cmd_q))
2958 queue_work(hdev->workqueue, &hdev->cmd_work);
2962 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
2964 struct hci_ev_hardware_error *ev = (void *) skb->data;
2966 BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code);
2969 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2971 struct hci_ev_role_change *ev = (void *) skb->data;
2972 struct hci_conn *conn;
2974 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2978 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2981 conn->role = ev->role;
2983 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2985 hci_role_switch_cfm(conn, ev->status, ev->role);
2988 hci_dev_unlock(hdev);
2991 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2993 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2996 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2997 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3001 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3002 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3003 BT_DBG("%s bad parameters", hdev->name);
3007 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3009 for (i = 0; i < ev->num_hndl; i++) {
3010 struct hci_comp_pkts_info *info = &ev->handles[i];
3011 struct hci_conn *conn;
3012 __u16 handle, count;
3014 handle = __le16_to_cpu(info->handle);
3015 count = __le16_to_cpu(info->count);
3017 conn = hci_conn_hash_lookup_handle(hdev, handle);
3021 conn->sent -= count;
3023 switch (conn->type) {
3025 hdev->acl_cnt += count;
3026 if (hdev->acl_cnt > hdev->acl_pkts)
3027 hdev->acl_cnt = hdev->acl_pkts;
3031 if (hdev->le_pkts) {
3032 hdev->le_cnt += count;
3033 if (hdev->le_cnt > hdev->le_pkts)
3034 hdev->le_cnt = hdev->le_pkts;
3036 hdev->acl_cnt += count;
3037 if (hdev->acl_cnt > hdev->acl_pkts)
3038 hdev->acl_cnt = hdev->acl_pkts;
3043 hdev->sco_cnt += count;
3044 if (hdev->sco_cnt > hdev->sco_pkts)
3045 hdev->sco_cnt = hdev->sco_pkts;
3049 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3054 queue_work(hdev->workqueue, &hdev->tx_work);
3057 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3060 struct hci_chan *chan;
3062 switch (hdev->dev_type) {
3064 return hci_conn_hash_lookup_handle(hdev, handle);
3066 chan = hci_chan_lookup_handle(hdev, handle);
3071 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3078 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3080 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3083 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3084 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3088 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3089 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3090 BT_DBG("%s bad parameters", hdev->name);
3094 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3097 for (i = 0; i < ev->num_hndl; i++) {
3098 struct hci_comp_blocks_info *info = &ev->handles[i];
3099 struct hci_conn *conn = NULL;
3100 __u16 handle, block_count;
3102 handle = __le16_to_cpu(info->handle);
3103 block_count = __le16_to_cpu(info->blocks);
3105 conn = __hci_conn_lookup_handle(hdev, handle);
3109 conn->sent -= block_count;
3111 switch (conn->type) {
3114 hdev->block_cnt += block_count;
3115 if (hdev->block_cnt > hdev->num_blocks)
3116 hdev->block_cnt = hdev->num_blocks;
3120 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3125 queue_work(hdev->workqueue, &hdev->tx_work);
3128 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3130 struct hci_ev_mode_change *ev = (void *) skb->data;
3131 struct hci_conn *conn;
3133 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3137 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3139 conn->mode = ev->mode;
3141 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3143 if (conn->mode == HCI_CM_ACTIVE)
3144 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3146 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3149 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3150 hci_sco_setup(conn, ev->status);
3153 hci_dev_unlock(hdev);
3156 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3158 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3159 struct hci_conn *conn;
3161 BT_DBG("%s", hdev->name);
3165 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3169 if (conn->state == BT_CONNECTED) {
3170 hci_conn_hold(conn);
3171 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3172 hci_conn_drop(conn);
3175 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3176 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3177 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3178 sizeof(ev->bdaddr), &ev->bdaddr);
3179 } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3182 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3187 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3191 hci_dev_unlock(hdev);
3194 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3196 if (key_type == HCI_LK_CHANGED_COMBINATION)
3199 conn->pin_length = pin_len;
3200 conn->key_type = key_type;
3203 case HCI_LK_LOCAL_UNIT:
3204 case HCI_LK_REMOTE_UNIT:
3205 case HCI_LK_DEBUG_COMBINATION:
3207 case HCI_LK_COMBINATION:
3209 conn->pending_sec_level = BT_SECURITY_HIGH;
3211 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3213 case HCI_LK_UNAUTH_COMBINATION_P192:
3214 case HCI_LK_UNAUTH_COMBINATION_P256:
3215 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3217 case HCI_LK_AUTH_COMBINATION_P192:
3218 conn->pending_sec_level = BT_SECURITY_HIGH;
3220 case HCI_LK_AUTH_COMBINATION_P256:
3221 conn->pending_sec_level = BT_SECURITY_FIPS;
3226 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3228 struct hci_ev_link_key_req *ev = (void *) skb->data;
3229 struct hci_cp_link_key_reply cp;
3230 struct hci_conn *conn;
3231 struct link_key *key;
3233 BT_DBG("%s", hdev->name);
3235 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3240 key = hci_find_link_key(hdev, &ev->bdaddr);
3242 BT_DBG("%s link key not found for %pMR", hdev->name,
3247 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3250 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3252 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3253 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3254 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3255 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3259 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3260 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3261 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3262 BT_DBG("%s ignoring key unauthenticated for high security",
3267 conn_set_key(conn, key->type, key->pin_len);
3270 bacpy(&cp.bdaddr, &ev->bdaddr);
3271 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3273 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3275 hci_dev_unlock(hdev);
3280 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3281 hci_dev_unlock(hdev);
3284 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3286 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3287 struct hci_conn *conn;
3288 struct link_key *key;
3292 BT_DBG("%s", hdev->name);
3296 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3300 hci_conn_hold(conn);
3301 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3302 hci_conn_drop(conn);
3304 conn_set_key(conn, ev->key_type, conn->pin_length);
3306 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3309 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3310 ev->key_type, pin_len, &persistent);
3314 /* Update connection information since adding the key will have
3315 * fixed up the type in the case of changed combination keys.
3317 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3318 conn_set_key(conn, key->type, key->pin_len);
3320 mgmt_new_link_key(hdev, key, persistent);
3322 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3323 * is set. If it's not set simply remove the key from the kernel
3324 * list (we've still notified user space about it but with
3325 * store_hint being 0).
3327 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3328 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3329 list_del_rcu(&key->list);
3330 kfree_rcu(key, rcu);
3335 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3337 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3340 hci_dev_unlock(hdev);
3343 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3345 struct hci_ev_clock_offset *ev = (void *) skb->data;
3346 struct hci_conn *conn;
3348 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3352 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3353 if (conn && !ev->status) {
3354 struct inquiry_entry *ie;
3356 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3358 ie->data.clock_offset = ev->clock_offset;
3359 ie->timestamp = jiffies;
3363 hci_dev_unlock(hdev);
3366 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3368 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3369 struct hci_conn *conn;
3371 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3375 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3376 if (conn && !ev->status)
3377 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3379 hci_dev_unlock(hdev);
3382 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3384 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3385 struct inquiry_entry *ie;
3387 BT_DBG("%s", hdev->name);
3391 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3393 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3394 ie->timestamp = jiffies;
3397 hci_dev_unlock(hdev);
3400 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3401 struct sk_buff *skb)
3403 struct inquiry_data data;
3404 int num_rsp = *((__u8 *) skb->data);
3406 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3411 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3416 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3417 struct inquiry_info_with_rssi_and_pscan_mode *info;
3418 info = (void *) (skb->data + 1);
3420 for (; num_rsp; num_rsp--, info++) {
3423 bacpy(&data.bdaddr, &info->bdaddr);
3424 data.pscan_rep_mode = info->pscan_rep_mode;
3425 data.pscan_period_mode = info->pscan_period_mode;
3426 data.pscan_mode = info->pscan_mode;
3427 memcpy(data.dev_class, info->dev_class, 3);
3428 data.clock_offset = info->clock_offset;
3429 data.rssi = info->rssi;
3430 data.ssp_mode = 0x00;
3432 flags = hci_inquiry_cache_update(hdev, &data, false);
3434 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3435 info->dev_class, info->rssi,
3436 flags, NULL, 0, NULL, 0);
3439 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3441 for (; num_rsp; num_rsp--, info++) {
3444 bacpy(&data.bdaddr, &info->bdaddr);
3445 data.pscan_rep_mode = info->pscan_rep_mode;
3446 data.pscan_period_mode = info->pscan_period_mode;
3447 data.pscan_mode = 0x00;
3448 memcpy(data.dev_class, info->dev_class, 3);
3449 data.clock_offset = info->clock_offset;
3450 data.rssi = info->rssi;
3451 data.ssp_mode = 0x00;
3453 flags = hci_inquiry_cache_update(hdev, &data, false);
3455 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3456 info->dev_class, info->rssi,
3457 flags, NULL, 0, NULL, 0);
3461 hci_dev_unlock(hdev);
3464 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3465 struct sk_buff *skb)
3467 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3468 struct hci_conn *conn;
3470 BT_DBG("%s", hdev->name);
3474 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3478 if (ev->page < HCI_MAX_PAGES)
3479 memcpy(conn->features[ev->page], ev->features, 8);
3481 if (!ev->status && ev->page == 0x01) {
3482 struct inquiry_entry *ie;
3484 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3486 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3488 if (ev->features[0] & LMP_HOST_SSP) {
3489 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3491 /* It is mandatory by the Bluetooth specification that
3492 * Extended Inquiry Results are only used when Secure
3493 * Simple Pairing is enabled, but some devices violate
3496 * To make these devices work, the internal SSP
3497 * enabled flag needs to be cleared if the remote host
3498 * features do not indicate SSP support */
3499 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3502 if (ev->features[0] & LMP_HOST_SC)
3503 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3506 if (conn->state != BT_CONFIG)
3509 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3510 struct hci_cp_remote_name_req cp;
3511 memset(&cp, 0, sizeof(cp));
3512 bacpy(&cp.bdaddr, &conn->dst);
3513 cp.pscan_rep_mode = 0x02;
3514 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3515 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3516 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3518 if (!hci_outgoing_auth_needed(hdev, conn)) {
3519 conn->state = BT_CONNECTED;
3520 hci_proto_connect_cfm(conn, ev->status);
3521 hci_conn_drop(conn);
3525 hci_dev_unlock(hdev);
3528 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3529 struct sk_buff *skb)
3531 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3532 struct hci_conn *conn;
3534 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3538 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3540 if (ev->link_type == ESCO_LINK)
3543 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3547 conn->type = SCO_LINK;
3550 switch (ev->status) {
3552 conn->handle = __le16_to_cpu(ev->handle);
3553 conn->state = BT_CONNECTED;
3555 hci_conn_add_sysfs(conn);
3558 case 0x10: /* Connection Accept Timeout */
3559 case 0x0d: /* Connection Rejected due to Limited Resources */
3560 case 0x11: /* Unsupported Feature or Parameter Value */
3561 case 0x1c: /* SCO interval rejected */
3562 case 0x1a: /* Unsupported Remote Feature */
3563 case 0x1f: /* Unspecified error */
3564 case 0x20: /* Unsupported LMP Parameter value */
3566 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3567 (hdev->esco_type & EDR_ESCO_MASK);
3568 if (hci_setup_sync(conn, conn->link->handle))
3574 conn->state = BT_CLOSED;
3578 hci_proto_connect_cfm(conn, ev->status);
3583 hci_dev_unlock(hdev);
3586 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3590 while (parsed < eir_len) {
3591 u8 field_len = eir[0];
3596 parsed += field_len + 1;
3597 eir += field_len + 1;
3603 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3604 struct sk_buff *skb)
3606 struct inquiry_data data;
3607 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3608 int num_rsp = *((__u8 *) skb->data);
3611 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3616 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3621 for (; num_rsp; num_rsp--, info++) {
3625 bacpy(&data.bdaddr, &info->bdaddr);
3626 data.pscan_rep_mode = info->pscan_rep_mode;
3627 data.pscan_period_mode = info->pscan_period_mode;
3628 data.pscan_mode = 0x00;
3629 memcpy(data.dev_class, info->dev_class, 3);
3630 data.clock_offset = info->clock_offset;
3631 data.rssi = info->rssi;
3632 data.ssp_mode = 0x01;
3634 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3635 name_known = eir_has_data_type(info->data,
3641 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3643 eir_len = eir_get_length(info->data, sizeof(info->data));
3645 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3646 info->dev_class, info->rssi,
3647 flags, info->data, eir_len, NULL, 0);
3650 hci_dev_unlock(hdev);
3653 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3654 struct sk_buff *skb)
3656 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3657 struct hci_conn *conn;
3659 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3660 __le16_to_cpu(ev->handle));
3664 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3668 /* For BR/EDR the necessary steps are taken through the
3669 * auth_complete event.
3671 if (conn->type != LE_LINK)
3675 conn->sec_level = conn->pending_sec_level;
3677 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3679 if (ev->status && conn->state == BT_CONNECTED) {
3680 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3681 hci_conn_drop(conn);
3685 if (conn->state == BT_CONFIG) {
3687 conn->state = BT_CONNECTED;
3689 hci_proto_connect_cfm(conn, ev->status);
3690 hci_conn_drop(conn);
3692 hci_auth_cfm(conn, ev->status);
3694 hci_conn_hold(conn);
3695 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3696 hci_conn_drop(conn);
3700 hci_dev_unlock(hdev);
3703 static u8 hci_get_auth_req(struct hci_conn *conn)
3705 /* If remote requests no-bonding follow that lead */
3706 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3707 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3708 return conn->remote_auth | (conn->auth_type & 0x01);
3710 /* If both remote and local have enough IO capabilities, require
3713 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3714 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3715 return conn->remote_auth | 0x01;
3717 /* No MITM protection possible so ignore remote requirement */
3718 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3721 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3723 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3724 struct hci_conn *conn;
3726 BT_DBG("%s", hdev->name);
3730 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3734 hci_conn_hold(conn);
3736 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3739 /* Allow pairing if we're pairable, the initiators of the
3740 * pairing or if the remote is not requesting bonding.
3742 if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3743 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3744 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3745 struct hci_cp_io_capability_reply cp;
3747 bacpy(&cp.bdaddr, &ev->bdaddr);
3748 /* Change the IO capability from KeyboardDisplay
3749 * to DisplayYesNo as it is not supported by BT spec. */
3750 cp.capability = (conn->io_capability == 0x04) ?
3751 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3753 /* If we are initiators, there is no remote information yet */
3754 if (conn->remote_auth == 0xff) {
3755 /* Request MITM protection if our IO caps allow it
3756 * except for the no-bonding case.
3758 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3759 conn->auth_type != HCI_AT_NO_BONDING)
3760 conn->auth_type |= 0x01;
3762 conn->auth_type = hci_get_auth_req(conn);
3765 /* If we're not bondable, force one of the non-bondable
3766 * authentication requirement values.
3768 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
3769 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3771 cp.authentication = conn->auth_type;
3773 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3774 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3779 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3782 struct hci_cp_io_capability_neg_reply cp;
3784 bacpy(&cp.bdaddr, &ev->bdaddr);
3785 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3787 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3792 hci_dev_unlock(hdev);
3795 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3797 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3798 struct hci_conn *conn;
3800 BT_DBG("%s", hdev->name);
3804 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3808 conn->remote_cap = ev->capability;
3809 conn->remote_auth = ev->authentication;
3811 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3814 hci_dev_unlock(hdev);
3817 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3818 struct sk_buff *skb)
3820 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3821 int loc_mitm, rem_mitm, confirm_hint = 0;
3822 struct hci_conn *conn;
3824 BT_DBG("%s", hdev->name);
3828 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3831 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3835 loc_mitm = (conn->auth_type & 0x01);
3836 rem_mitm = (conn->remote_auth & 0x01);
3838 /* If we require MITM but the remote device can't provide that
3839 * (it has NoInputNoOutput) then reject the confirmation
3840 * request. We check the security level here since it doesn't
3841 * necessarily match conn->auth_type.
3843 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3844 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3845 BT_DBG("Rejecting request: remote device can't provide MITM");
3846 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3847 sizeof(ev->bdaddr), &ev->bdaddr);
3851 /* If no side requires MITM protection; auto-accept */
3852 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3853 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3855 /* If we're not the initiators request authorization to
3856 * proceed from user space (mgmt_user_confirm with
3857 * confirm_hint set to 1). The exception is if neither
3858 * side had MITM or if the local IO capability is
3859 * NoInputNoOutput, in which case we do auto-accept
3861 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3862 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3863 (loc_mitm || rem_mitm)) {
3864 BT_DBG("Confirming auto-accept as acceptor");
3869 BT_DBG("Auto-accept of user confirmation with %ums delay",
3870 hdev->auto_accept_delay);
3872 if (hdev->auto_accept_delay > 0) {
3873 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3874 queue_delayed_work(conn->hdev->workqueue,
3875 &conn->auto_accept_work, delay);
3879 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3880 sizeof(ev->bdaddr), &ev->bdaddr);
3885 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3886 le32_to_cpu(ev->passkey), confirm_hint);
3889 hci_dev_unlock(hdev);
3892 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3893 struct sk_buff *skb)
3895 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3897 BT_DBG("%s", hdev->name);
3899 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3900 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3903 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3904 struct sk_buff *skb)
3906 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3907 struct hci_conn *conn;
3909 BT_DBG("%s", hdev->name);
3911 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3915 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3916 conn->passkey_entered = 0;
3918 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3919 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3920 conn->dst_type, conn->passkey_notify,
3921 conn->passkey_entered);
3924 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3926 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3927 struct hci_conn *conn;
3929 BT_DBG("%s", hdev->name);
3931 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3936 case HCI_KEYPRESS_STARTED:
3937 conn->passkey_entered = 0;
3940 case HCI_KEYPRESS_ENTERED:
3941 conn->passkey_entered++;
3944 case HCI_KEYPRESS_ERASED:
3945 conn->passkey_entered--;
3948 case HCI_KEYPRESS_CLEARED:
3949 conn->passkey_entered = 0;
3952 case HCI_KEYPRESS_COMPLETED:
3956 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3957 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3958 conn->dst_type, conn->passkey_notify,
3959 conn->passkey_entered);
3962 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3963 struct sk_buff *skb)
3965 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3966 struct hci_conn *conn;
3968 BT_DBG("%s", hdev->name);
3972 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3976 /* Reset the authentication requirement to unknown */
3977 conn->remote_auth = 0xff;
3979 /* To avoid duplicate auth_failed events to user space we check
3980 * the HCI_CONN_AUTH_PEND flag which will be set if we
3981 * initiated the authentication. A traditional auth_complete
3982 * event gets always produced as initiator and is also mapped to
3983 * the mgmt_auth_failed event */
3984 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3985 mgmt_auth_failed(conn, ev->status);
3987 hci_conn_drop(conn);
3990 hci_dev_unlock(hdev);
3993 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3994 struct sk_buff *skb)
3996 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3997 struct inquiry_entry *ie;
3998 struct hci_conn *conn;
4000 BT_DBG("%s", hdev->name);
4004 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4006 memcpy(conn->features[1], ev->features, 8);
4008 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4010 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4012 hci_dev_unlock(hdev);
4015 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4016 struct sk_buff *skb)
4018 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4019 struct oob_data *data;
4021 BT_DBG("%s", hdev->name);
4025 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4028 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
4030 if (bredr_sc_enabled(hdev)) {
4031 struct hci_cp_remote_oob_ext_data_reply cp;
4033 bacpy(&cp.bdaddr, &ev->bdaddr);
4034 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4035 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4036 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4037 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4039 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4042 struct hci_cp_remote_oob_data_reply cp;
4044 bacpy(&cp.bdaddr, &ev->bdaddr);
4045 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4046 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4048 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4052 struct hci_cp_remote_oob_data_neg_reply cp;
4054 bacpy(&cp.bdaddr, &ev->bdaddr);
4055 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4060 hci_dev_unlock(hdev);
4063 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4064 struct sk_buff *skb)
4066 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4067 struct hci_conn *hcon, *bredr_hcon;
4069 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4074 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4076 hci_dev_unlock(hdev);
4082 hci_dev_unlock(hdev);
4086 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4088 hcon->state = BT_CONNECTED;
4089 bacpy(&hcon->dst, &bredr_hcon->dst);
4091 hci_conn_hold(hcon);
4092 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4093 hci_conn_drop(hcon);
4095 hci_conn_add_sysfs(hcon);
4097 amp_physical_cfm(bredr_hcon, hcon);
4099 hci_dev_unlock(hdev);
4102 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4104 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4105 struct hci_conn *hcon;
4106 struct hci_chan *hchan;
4107 struct amp_mgr *mgr;
4109 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4110 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4113 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4117 /* Create AMP hchan */
4118 hchan = hci_chan_create(hcon);
4122 hchan->handle = le16_to_cpu(ev->handle);
4124 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4126 mgr = hcon->amp_mgr;
4127 if (mgr && mgr->bredr_chan) {
4128 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4130 l2cap_chan_lock(bredr_chan);
4132 bredr_chan->conn->mtu = hdev->block_mtu;
4133 l2cap_logical_cfm(bredr_chan, hchan, 0);
4134 hci_conn_hold(hcon);
4136 l2cap_chan_unlock(bredr_chan);
4140 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4141 struct sk_buff *skb)
4143 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4144 struct hci_chan *hchan;
4146 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4147 le16_to_cpu(ev->handle), ev->status);
4154 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4158 amp_destroy_logical_link(hchan, ev->reason);
4161 hci_dev_unlock(hdev);
4164 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4165 struct sk_buff *skb)
4167 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4168 struct hci_conn *hcon;
4170 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4177 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4179 hcon->state = BT_CLOSED;
4183 hci_dev_unlock(hdev);
4186 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4188 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4189 struct hci_conn_params *params;
4190 struct hci_conn *conn;
4191 struct smp_irk *irk;
4194 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4198 /* All controllers implicitly stop advertising in the event of a
4199 * connection, so ensure that the state bit is cleared.
4201 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4203 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4205 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4207 BT_ERR("No memory for new connection");
4211 conn->dst_type = ev->bdaddr_type;
4213 /* If we didn't have a hci_conn object previously
4214 * but we're in master role this must be something
4215 * initiated using a white list. Since white list based
4216 * connections are not "first class citizens" we don't
4217 * have full tracking of them. Therefore, we go ahead
4218 * with a "best effort" approach of determining the
4219 * initiator address based on the HCI_PRIVACY flag.
4222 conn->resp_addr_type = ev->bdaddr_type;
4223 bacpy(&conn->resp_addr, &ev->bdaddr);
4224 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4225 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4226 bacpy(&conn->init_addr, &hdev->rpa);
4228 hci_copy_identity_address(hdev,
4230 &conn->init_addr_type);
4234 cancel_delayed_work(&conn->le_conn_timeout);
4238 /* Set the responder (our side) address type based on
4239 * the advertising address type.
4241 conn->resp_addr_type = hdev->adv_addr_type;
4242 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4243 bacpy(&conn->resp_addr, &hdev->random_addr);
4245 bacpy(&conn->resp_addr, &hdev->bdaddr);
4247 conn->init_addr_type = ev->bdaddr_type;
4248 bacpy(&conn->init_addr, &ev->bdaddr);
4250 /* For incoming connections, set the default minimum
4251 * and maximum connection interval. They will be used
4252 * to check if the parameters are in range and if not
4253 * trigger the connection update procedure.
4255 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4256 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4259 /* Lookup the identity address from the stored connection
4260 * address and address type.
4262 * When establishing connections to an identity address, the
4263 * connection procedure will store the resolvable random
4264 * address first. Now if it can be converted back into the
4265 * identity address, start using the identity address from
4268 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4270 bacpy(&conn->dst, &irk->bdaddr);
4271 conn->dst_type = irk->addr_type;
4275 hci_le_conn_failed(conn, ev->status);
4279 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4280 addr_type = BDADDR_LE_PUBLIC;
4282 addr_type = BDADDR_LE_RANDOM;
4284 /* Drop the connection if the device is blocked */
4285 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4286 hci_conn_drop(conn);
4290 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4291 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4293 conn->sec_level = BT_SECURITY_LOW;
4294 conn->handle = __le16_to_cpu(ev->handle);
4295 conn->state = BT_CONNECTED;
4297 conn->le_conn_interval = le16_to_cpu(ev->interval);
4298 conn->le_conn_latency = le16_to_cpu(ev->latency);
4299 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4301 hci_conn_add_sysfs(conn);
4303 hci_proto_connect_cfm(conn, ev->status);
4305 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4308 list_del_init(¶ms->action);
4310 hci_conn_drop(params->conn);
4311 hci_conn_put(params->conn);
4312 params->conn = NULL;
4317 hci_update_background_scan(hdev);
4318 hci_dev_unlock(hdev);
4321 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4322 struct sk_buff *skb)
4324 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4325 struct hci_conn *conn;
4327 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4334 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4336 conn->le_conn_interval = le16_to_cpu(ev->interval);
4337 conn->le_conn_latency = le16_to_cpu(ev->latency);
4338 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4341 hci_dev_unlock(hdev);
4344 /* This function requires the caller holds hdev->lock */
4345 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4347 u8 addr_type, u8 adv_type)
4349 struct hci_conn *conn;
4350 struct hci_conn_params *params;
4352 /* If the event is not connectable don't proceed further */
4353 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4356 /* Ignore if the device is blocked */
4357 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4360 /* Most controller will fail if we try to create new connections
4361 * while we have an existing one in slave role.
4363 if (hdev->conn_hash.le_num_slave > 0)
4366 /* If we're not connectable only connect devices that we have in
4367 * our pend_le_conns list.
4369 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4374 switch (params->auto_connect) {
4375 case HCI_AUTO_CONN_DIRECT:
4376 /* Only devices advertising with ADV_DIRECT_IND are
4377 * triggering a connection attempt. This is allowing
4378 * incoming connections from slave devices.
4380 if (adv_type != LE_ADV_DIRECT_IND)
4383 case HCI_AUTO_CONN_ALWAYS:
4384 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4385 * are triggering a connection attempt. This means
4386 * that incoming connectioms from slave device are
4387 * accepted and also outgoing connections to slave
4388 * devices are established when found.
4395 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4396 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4397 if (!IS_ERR(conn)) {
4398 /* Store the pointer since we don't really have any
4399 * other owner of the object besides the params that
4400 * triggered it. This way we can abort the connection if
4401 * the parameters get removed and keep the reference
4402 * count consistent once the connection is established.
4404 params->conn = hci_conn_get(conn);
4408 switch (PTR_ERR(conn)) {
4410 /* If hci_connect() returns -EBUSY it means there is already
4411 * an LE connection attempt going on. Since controllers don't
4412 * support more than one connection attempt at the time, we
4413 * don't consider this an error case.
4417 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4424 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4425 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4427 struct discovery_state *d = &hdev->discovery;
4428 struct smp_irk *irk;
4429 struct hci_conn *conn;
4433 /* Check if we need to convert to identity address */
4434 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4436 bdaddr = &irk->bdaddr;
4437 bdaddr_type = irk->addr_type;
4440 /* Check if we have been requested to connect to this device */
4441 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4442 if (conn && type == LE_ADV_IND) {
4443 /* Store report for later inclusion by
4444 * mgmt_device_connected
4446 memcpy(conn->le_adv_data, data, len);
4447 conn->le_adv_data_len = len;
4450 /* Passive scanning shouldn't trigger any device found events,
4451 * except for devices marked as CONN_REPORT for which we do send
4452 * device found events.
4454 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4455 if (type == LE_ADV_DIRECT_IND)
4458 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4459 bdaddr, bdaddr_type))
4462 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4463 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4466 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4467 rssi, flags, data, len, NULL, 0);
4471 /* When receiving non-connectable or scannable undirected
4472 * advertising reports, this means that the remote device is
4473 * not connectable and then clearly indicate this in the
4474 * device found event.
4476 * When receiving a scan response, then there is no way to
4477 * know if the remote device is connectable or not. However
4478 * since scan responses are merged with a previously seen
4479 * advertising report, the flags field from that report
4482 * In the really unlikely case that a controller get confused
4483 * and just sends a scan response event, then it is marked as
4484 * not connectable as well.
4486 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4487 type == LE_ADV_SCAN_RSP)
4488 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4492 /* If there's nothing pending either store the data from this
4493 * event or send an immediate device found event if the data
4494 * should not be stored for later.
4496 if (!has_pending_adv_report(hdev)) {
4497 /* If the report will trigger a SCAN_REQ store it for
4500 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4501 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4502 rssi, flags, data, len);
4506 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4507 rssi, flags, data, len, NULL, 0);
4511 /* Check if the pending report is for the same device as the new one */
4512 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4513 bdaddr_type == d->last_adv_addr_type);
4515 /* If the pending data doesn't match this report or this isn't a
4516 * scan response (e.g. we got a duplicate ADV_IND) then force
4517 * sending of the pending data.
4519 if (type != LE_ADV_SCAN_RSP || !match) {
4520 /* Send out whatever is in the cache, but skip duplicates */
4522 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4523 d->last_adv_addr_type, NULL,
4524 d->last_adv_rssi, d->last_adv_flags,
4526 d->last_adv_data_len, NULL, 0);
4528 /* If the new report will trigger a SCAN_REQ store it for
4531 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4532 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4533 rssi, flags, data, len);
4537 /* The advertising reports cannot be merged, so clear
4538 * the pending report and send out a device found event.
4540 clear_pending_adv_report(hdev);
4541 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4542 rssi, flags, data, len, NULL, 0);
4546 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4547 * the new event is a SCAN_RSP. We can therefore proceed with
4548 * sending a merged device found event.
4550 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4551 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4552 d->last_adv_data, d->last_adv_data_len, data, len);
4553 clear_pending_adv_report(hdev);
4556 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4558 u8 num_reports = skb->data[0];
4559 void *ptr = &skb->data[1];
4563 while (num_reports--) {
4564 struct hci_ev_le_advertising_info *ev = ptr;
4567 rssi = ev->data[ev->length];
4568 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4569 ev->bdaddr_type, rssi, ev->data, ev->length);
4571 ptr += sizeof(*ev) + ev->length + 1;
4574 hci_dev_unlock(hdev);
4577 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4579 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4580 struct hci_cp_le_ltk_reply cp;
4581 struct hci_cp_le_ltk_neg_reply neg;
4582 struct hci_conn *conn;
4583 struct smp_ltk *ltk;
4585 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4589 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4593 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4597 if (smp_ltk_is_sc(ltk)) {
4598 /* With SC both EDiv and Rand are set to zero */
4599 if (ev->ediv || ev->rand)
4602 /* For non-SC keys check that EDiv and Rand match */
4603 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4607 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4608 cp.handle = cpu_to_le16(conn->handle);
4610 conn->pending_sec_level = smp_ltk_sec_level(ltk);
4612 conn->enc_key_size = ltk->enc_size;
4614 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4616 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4617 * temporary key used to encrypt a connection following
4618 * pairing. It is used during the Encrypted Session Setup to
4619 * distribute the keys. Later, security can be re-established
4620 * using a distributed LTK.
4622 if (ltk->type == SMP_STK) {
4623 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4624 list_del_rcu(<k->list);
4625 kfree_rcu(ltk, rcu);
4627 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4630 hci_dev_unlock(hdev);
4635 neg.handle = ev->handle;
4636 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4637 hci_dev_unlock(hdev);
4640 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4643 struct hci_cp_le_conn_param_req_neg_reply cp;
4645 cp.handle = cpu_to_le16(handle);
4648 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4652 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4653 struct sk_buff *skb)
4655 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4656 struct hci_cp_le_conn_param_req_reply cp;
4657 struct hci_conn *hcon;
4658 u16 handle, min, max, latency, timeout;
4660 handle = le16_to_cpu(ev->handle);
4661 min = le16_to_cpu(ev->interval_min);
4662 max = le16_to_cpu(ev->interval_max);
4663 latency = le16_to_cpu(ev->latency);
4664 timeout = le16_to_cpu(ev->timeout);
4666 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4667 if (!hcon || hcon->state != BT_CONNECTED)
4668 return send_conn_param_neg_reply(hdev, handle,
4669 HCI_ERROR_UNKNOWN_CONN_ID);
4671 if (hci_check_conn_params(min, max, latency, timeout))
4672 return send_conn_param_neg_reply(hdev, handle,
4673 HCI_ERROR_INVALID_LL_PARAMS);
4675 if (hcon->role == HCI_ROLE_MASTER) {
4676 struct hci_conn_params *params;
4681 params = hci_conn_params_lookup(hdev, &hcon->dst,
4684 params->conn_min_interval = min;
4685 params->conn_max_interval = max;
4686 params->conn_latency = latency;
4687 params->supervision_timeout = timeout;
4693 hci_dev_unlock(hdev);
4695 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4696 store_hint, min, max, latency, timeout);
4699 cp.handle = ev->handle;
4700 cp.interval_min = ev->interval_min;
4701 cp.interval_max = ev->interval_max;
4702 cp.latency = ev->latency;
4703 cp.timeout = ev->timeout;
4707 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4710 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4712 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4714 skb_pull(skb, sizeof(*le_ev));
4716 switch (le_ev->subevent) {
4717 case HCI_EV_LE_CONN_COMPLETE:
4718 hci_le_conn_complete_evt(hdev, skb);
4721 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4722 hci_le_conn_update_complete_evt(hdev, skb);
4725 case HCI_EV_LE_ADVERTISING_REPORT:
4726 hci_le_adv_report_evt(hdev, skb);
4729 case HCI_EV_LE_LTK_REQ:
4730 hci_le_ltk_request_evt(hdev, skb);
4733 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4734 hci_le_remote_conn_param_req_evt(hdev, skb);
4742 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4744 struct hci_ev_channel_selected *ev = (void *) skb->data;
4745 struct hci_conn *hcon;
4747 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4749 skb_pull(skb, sizeof(*ev));
4751 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4755 amp_read_loc_assoc_final_data(hdev, hcon);
4758 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4760 struct hci_event_hdr *hdr = (void *) skb->data;
4761 __u8 event = hdr->evt;
4765 /* Received events are (currently) only needed when a request is
4766 * ongoing so avoid unnecessary memory allocation.
4768 if (hci_req_pending(hdev)) {
4769 kfree_skb(hdev->recv_evt);
4770 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4773 hci_dev_unlock(hdev);
4775 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4777 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4778 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4779 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4781 hci_req_cmd_complete(hdev, opcode, 0);
4785 case HCI_EV_INQUIRY_COMPLETE:
4786 hci_inquiry_complete_evt(hdev, skb);
4789 case HCI_EV_INQUIRY_RESULT:
4790 hci_inquiry_result_evt(hdev, skb);
4793 case HCI_EV_CONN_COMPLETE:
4794 hci_conn_complete_evt(hdev, skb);
4797 case HCI_EV_CONN_REQUEST:
4798 hci_conn_request_evt(hdev, skb);
4801 case HCI_EV_DISCONN_COMPLETE:
4802 hci_disconn_complete_evt(hdev, skb);
4805 case HCI_EV_AUTH_COMPLETE:
4806 hci_auth_complete_evt(hdev, skb);
4809 case HCI_EV_REMOTE_NAME:
4810 hci_remote_name_evt(hdev, skb);
4813 case HCI_EV_ENCRYPT_CHANGE:
4814 hci_encrypt_change_evt(hdev, skb);
4817 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4818 hci_change_link_key_complete_evt(hdev, skb);
4821 case HCI_EV_REMOTE_FEATURES:
4822 hci_remote_features_evt(hdev, skb);
4825 case HCI_EV_CMD_COMPLETE:
4826 hci_cmd_complete_evt(hdev, skb);
4829 case HCI_EV_CMD_STATUS:
4830 hci_cmd_status_evt(hdev, skb);
4833 case HCI_EV_HARDWARE_ERROR:
4834 hci_hardware_error_evt(hdev, skb);
4837 case HCI_EV_ROLE_CHANGE:
4838 hci_role_change_evt(hdev, skb);
4841 case HCI_EV_NUM_COMP_PKTS:
4842 hci_num_comp_pkts_evt(hdev, skb);
4845 case HCI_EV_MODE_CHANGE:
4846 hci_mode_change_evt(hdev, skb);
4849 case HCI_EV_PIN_CODE_REQ:
4850 hci_pin_code_request_evt(hdev, skb);
4853 case HCI_EV_LINK_KEY_REQ:
4854 hci_link_key_request_evt(hdev, skb);
4857 case HCI_EV_LINK_KEY_NOTIFY:
4858 hci_link_key_notify_evt(hdev, skb);
4861 case HCI_EV_CLOCK_OFFSET:
4862 hci_clock_offset_evt(hdev, skb);
4865 case HCI_EV_PKT_TYPE_CHANGE:
4866 hci_pkt_type_change_evt(hdev, skb);
4869 case HCI_EV_PSCAN_REP_MODE:
4870 hci_pscan_rep_mode_evt(hdev, skb);
4873 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4874 hci_inquiry_result_with_rssi_evt(hdev, skb);
4877 case HCI_EV_REMOTE_EXT_FEATURES:
4878 hci_remote_ext_features_evt(hdev, skb);
4881 case HCI_EV_SYNC_CONN_COMPLETE:
4882 hci_sync_conn_complete_evt(hdev, skb);
4885 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4886 hci_extended_inquiry_result_evt(hdev, skb);
4889 case HCI_EV_KEY_REFRESH_COMPLETE:
4890 hci_key_refresh_complete_evt(hdev, skb);
4893 case HCI_EV_IO_CAPA_REQUEST:
4894 hci_io_capa_request_evt(hdev, skb);
4897 case HCI_EV_IO_CAPA_REPLY:
4898 hci_io_capa_reply_evt(hdev, skb);
4901 case HCI_EV_USER_CONFIRM_REQUEST:
4902 hci_user_confirm_request_evt(hdev, skb);
4905 case HCI_EV_USER_PASSKEY_REQUEST:
4906 hci_user_passkey_request_evt(hdev, skb);
4909 case HCI_EV_USER_PASSKEY_NOTIFY:
4910 hci_user_passkey_notify_evt(hdev, skb);
4913 case HCI_EV_KEYPRESS_NOTIFY:
4914 hci_keypress_notify_evt(hdev, skb);
4917 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4918 hci_simple_pair_complete_evt(hdev, skb);
4921 case HCI_EV_REMOTE_HOST_FEATURES:
4922 hci_remote_host_features_evt(hdev, skb);
4925 case HCI_EV_LE_META:
4926 hci_le_meta_evt(hdev, skb);
4929 case HCI_EV_CHANNEL_SELECTED:
4930 hci_chan_selected_evt(hdev, skb);
4933 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4934 hci_remote_oob_data_request_evt(hdev, skb);
4937 case HCI_EV_PHY_LINK_COMPLETE:
4938 hci_phy_link_complete_evt(hdev, skb);
4941 case HCI_EV_LOGICAL_LINK_COMPLETE:
4942 hci_loglink_complete_evt(hdev, skb);
4945 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4946 hci_disconn_loglink_complete_evt(hdev, skb);
4949 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4950 hci_disconn_phylink_complete_evt(hdev, skb);
4953 case HCI_EV_NUM_COMP_BLOCKS:
4954 hci_num_comp_blocks_evt(hdev, skb);
4958 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4963 hdev->stat.evt_rx++;