2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
49 #include "hci_codec.h"
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
70 BT_DBG("%s %x", req->hdev->name, scan);
72 /* Inquiry and Page scans */
73 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
81 BT_DBG("%s %x", req->hdev->name, auth);
84 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
92 BT_DBG("%s %x", req->hdev->name, encrypt);
95 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
101 __le16 policy = cpu_to_le16(opt);
103 BT_DBG("%s %x", req->hdev->name, policy);
105 /* Default link policy */
106 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
110 /* Get HCI device by index.
111 * Device is held on return. */
112 struct hci_dev *hci_dev_get(int index)
114 struct hci_dev *hdev = NULL, *d;
121 read_lock(&hci_dev_list_lock);
122 list_for_each_entry(d, &hci_dev_list, list) {
123 if (d->id == index) {
124 hdev = hci_dev_hold(d);
128 read_unlock(&hci_dev_list_lock);
132 /* ---- Inquiry support ---- */
134 bool hci_discovery_active(struct hci_dev *hdev)
136 struct discovery_state *discov = &hdev->discovery;
138 switch (discov->state) {
139 case DISCOVERY_FINDING:
140 case DISCOVERY_RESOLVING:
148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
150 int old_state = hdev->discovery.state;
152 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
154 if (old_state == state)
157 hdev->discovery.state = state;
160 case DISCOVERY_STOPPED:
161 hci_update_passive_scan(hdev);
163 if (old_state != DISCOVERY_STARTING)
164 mgmt_discovering(hdev, 0);
166 case DISCOVERY_STARTING:
168 case DISCOVERY_FINDING:
169 mgmt_discovering(hdev, 1);
171 case DISCOVERY_RESOLVING:
173 case DISCOVERY_STOPPING:
178 void hci_inquiry_cache_flush(struct hci_dev *hdev)
180 struct discovery_state *cache = &hdev->discovery;
181 struct inquiry_entry *p, *n;
183 list_for_each_entry_safe(p, n, &cache->all, all) {
188 INIT_LIST_HEAD(&cache->unknown);
189 INIT_LIST_HEAD(&cache->resolve);
192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
195 struct discovery_state *cache = &hdev->discovery;
196 struct inquiry_entry *e;
198 BT_DBG("cache %p, %pMR", cache, bdaddr);
200 list_for_each_entry(e, &cache->all, all) {
201 if (!bacmp(&e->data.bdaddr, bdaddr))
208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
211 struct discovery_state *cache = &hdev->discovery;
212 struct inquiry_entry *e;
214 BT_DBG("cache %p, %pMR", cache, bdaddr);
216 list_for_each_entry(e, &cache->unknown, list) {
217 if (!bacmp(&e->data.bdaddr, bdaddr))
224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
228 struct discovery_state *cache = &hdev->discovery;
229 struct inquiry_entry *e;
231 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
233 list_for_each_entry(e, &cache->resolve, list) {
234 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
236 if (!bacmp(&e->data.bdaddr, bdaddr))
243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
244 struct inquiry_entry *ie)
246 struct discovery_state *cache = &hdev->discovery;
247 struct list_head *pos = &cache->resolve;
248 struct inquiry_entry *p;
252 list_for_each_entry(p, &cache->resolve, list) {
253 if (p->name_state != NAME_PENDING &&
254 abs(p->data.rssi) >= abs(ie->data.rssi))
259 list_add(&ie->list, pos);
262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
265 struct discovery_state *cache = &hdev->discovery;
266 struct inquiry_entry *ie;
269 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
271 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
274 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
276 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
278 if (!ie->data.ssp_mode)
279 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
281 if (ie->name_state == NAME_NEEDED &&
282 data->rssi != ie->data.rssi) {
283 ie->data.rssi = data->rssi;
284 hci_inquiry_cache_update_resolve(hdev, ie);
290 /* Entry not in the cache. Add new one. */
291 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
293 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
297 list_add(&ie->all, &cache->all);
300 ie->name_state = NAME_KNOWN;
302 ie->name_state = NAME_NOT_KNOWN;
303 list_add(&ie->list, &cache->unknown);
307 if (name_known && ie->name_state != NAME_KNOWN &&
308 ie->name_state != NAME_PENDING) {
309 ie->name_state = NAME_KNOWN;
313 memcpy(&ie->data, data, sizeof(*data));
314 ie->timestamp = jiffies;
315 cache->timestamp = jiffies;
317 if (ie->name_state == NAME_NOT_KNOWN)
318 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
326 struct discovery_state *cache = &hdev->discovery;
327 struct inquiry_info *info = (struct inquiry_info *) buf;
328 struct inquiry_entry *e;
331 list_for_each_entry(e, &cache->all, all) {
332 struct inquiry_data *data = &e->data;
337 bacpy(&info->bdaddr, &data->bdaddr);
338 info->pscan_rep_mode = data->pscan_rep_mode;
339 info->pscan_period_mode = data->pscan_period_mode;
340 info->pscan_mode = data->pscan_mode;
341 memcpy(info->dev_class, data->dev_class, 3);
342 info->clock_offset = data->clock_offset;
348 BT_DBG("cache %p, copied %d", cache, copied);
352 static int hci_inq_req(struct hci_request *req, unsigned long opt)
354 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355 struct hci_dev *hdev = req->hdev;
356 struct hci_cp_inquiry cp;
358 BT_DBG("%s", hdev->name);
360 if (test_bit(HCI_INQUIRY, &hdev->flags))
364 memcpy(&cp.lap, &ir->lap, 3);
365 cp.length = ir->length;
366 cp.num_rsp = ir->num_rsp;
367 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
372 int hci_inquiry(void __user *arg)
374 __u8 __user *ptr = arg;
375 struct hci_inquiry_req ir;
376 struct hci_dev *hdev;
377 int err = 0, do_inquiry = 0, max_rsp;
381 if (copy_from_user(&ir, ptr, sizeof(ir)))
384 hdev = hci_dev_get(ir.dev_id);
388 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
393 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
398 if (hdev->dev_type != HCI_PRIMARY) {
403 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
408 /* Restrict maximum inquiry length to 60 seconds */
409 if (ir.length > 60) {
415 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
416 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
417 hci_inquiry_cache_flush(hdev);
420 hci_dev_unlock(hdev);
422 timeo = ir.length * msecs_to_jiffies(2000);
425 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
430 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
431 * cleared). If it is interrupted by a signal, return -EINTR.
433 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
434 TASK_INTERRUPTIBLE)) {
440 /* for unlimited number of responses we will use buffer with
443 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
445 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
446 * copy it to the user space.
448 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
455 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
456 hci_dev_unlock(hdev);
458 BT_DBG("num_rsp %d", ir.num_rsp);
460 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
462 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
475 static int hci_dev_do_open(struct hci_dev *hdev)
479 BT_DBG("%s %p", hdev->name, hdev);
481 hci_req_sync_lock(hdev);
483 ret = hci_dev_open_sync(hdev);
485 hci_req_sync_unlock(hdev);
489 /* ---- HCI ioctl helpers ---- */
491 int hci_dev_open(__u16 dev)
493 struct hci_dev *hdev;
496 hdev = hci_dev_get(dev);
500 /* Devices that are marked as unconfigured can only be powered
501 * up as user channel. Trying to bring them up as normal devices
502 * will result into a failure. Only user channel operation is
505 * When this function is called for a user channel, the flag
506 * HCI_USER_CHANNEL will be set first before attempting to
509 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
510 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
515 /* We need to ensure that no other power on/off work is pending
516 * before proceeding to call hci_dev_do_open. This is
517 * particularly important if the setup procedure has not yet
520 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
521 cancel_delayed_work(&hdev->power_off);
523 /* After this call it is guaranteed that the setup procedure
524 * has finished. This means that error conditions like RFKILL
525 * or no valid public or static random address apply.
527 flush_workqueue(hdev->req_workqueue);
529 /* For controllers not using the management interface and that
530 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
531 * so that pairing works for them. Once the management interface
532 * is in use this bit will be cleared again and userspace has
533 * to explicitly enable it.
535 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
536 !hci_dev_test_flag(hdev, HCI_MGMT))
537 hci_dev_set_flag(hdev, HCI_BONDABLE);
539 err = hci_dev_do_open(hdev);
546 int hci_dev_do_close(struct hci_dev *hdev)
550 BT_DBG("%s %p", hdev->name, hdev);
552 hci_req_sync_lock(hdev);
554 err = hci_dev_close_sync(hdev);
556 hci_req_sync_unlock(hdev);
561 int hci_dev_close(__u16 dev)
563 struct hci_dev *hdev;
566 hdev = hci_dev_get(dev);
570 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
575 cancel_work_sync(&hdev->power_on);
576 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
577 cancel_delayed_work(&hdev->power_off);
579 err = hci_dev_do_close(hdev);
586 static int hci_dev_do_reset(struct hci_dev *hdev)
590 BT_DBG("%s %p", hdev->name, hdev);
592 hci_req_sync_lock(hdev);
595 skb_queue_purge(&hdev->rx_q);
596 skb_queue_purge(&hdev->cmd_q);
598 /* Cancel these to avoid queueing non-chained pending work */
599 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
602 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
603 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
605 * inside RCU section to see the flag or complete scheduling.
608 /* Explicitly cancel works in case scheduled after setting the flag. */
609 cancel_delayed_work(&hdev->cmd_timer);
610 cancel_delayed_work(&hdev->ncmd_timer);
612 /* Avoid potential lockdep warnings from the *_flush() calls by
613 * ensuring the workqueue is empty up front.
615 drain_workqueue(hdev->workqueue);
618 hci_inquiry_cache_flush(hdev);
619 hci_conn_hash_flush(hdev);
620 hci_dev_unlock(hdev);
625 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
627 atomic_set(&hdev->cmd_cnt, 1);
633 ret = hci_reset_sync(hdev);
635 hci_req_sync_unlock(hdev);
639 int hci_dev_reset(__u16 dev)
641 struct hci_dev *hdev;
644 hdev = hci_dev_get(dev);
648 if (!test_bit(HCI_UP, &hdev->flags)) {
653 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
658 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
663 err = hci_dev_do_reset(hdev);
670 int hci_dev_reset_stat(__u16 dev)
672 struct hci_dev *hdev;
675 hdev = hci_dev_get(dev);
679 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
684 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
689 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
696 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
698 bool conn_changed, discov_changed;
700 BT_DBG("%s scan 0x%02x", hdev->name, scan);
702 if ((scan & SCAN_PAGE))
703 conn_changed = !hci_dev_test_and_set_flag(hdev,
706 conn_changed = hci_dev_test_and_clear_flag(hdev,
709 if ((scan & SCAN_INQUIRY)) {
710 discov_changed = !hci_dev_test_and_set_flag(hdev,
713 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
714 discov_changed = hci_dev_test_and_clear_flag(hdev,
718 if (!hci_dev_test_flag(hdev, HCI_MGMT))
721 if (conn_changed || discov_changed) {
722 /* In case this was disabled through mgmt */
723 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
725 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
726 hci_update_adv_data(hdev, hdev->cur_adv_instance);
728 mgmt_new_settings(hdev);
732 int hci_dev_cmd(unsigned int cmd, void __user *arg)
734 struct hci_dev *hdev;
735 struct hci_dev_req dr;
738 if (copy_from_user(&dr, arg, sizeof(dr)))
741 hdev = hci_dev_get(dr.dev_id);
745 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
750 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
755 if (hdev->dev_type != HCI_PRIMARY) {
760 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
767 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
768 HCI_INIT_TIMEOUT, NULL);
772 if (!lmp_encrypt_capable(hdev)) {
777 if (!test_bit(HCI_AUTH, &hdev->flags)) {
778 /* Auth must be enabled first */
779 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
780 HCI_INIT_TIMEOUT, NULL);
785 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
786 HCI_INIT_TIMEOUT, NULL);
790 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
791 HCI_INIT_TIMEOUT, NULL);
793 /* Ensure that the connectable and discoverable states
794 * get correctly modified as this was a non-mgmt change.
797 hci_update_passive_scan_state(hdev, dr.dev_opt);
801 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
802 HCI_INIT_TIMEOUT, NULL);
806 hdev->link_mode = ((__u16) dr.dev_opt) &
807 (HCI_LM_MASTER | HCI_LM_ACCEPT);
811 if (hdev->pkt_type == (__u16) dr.dev_opt)
814 hdev->pkt_type = (__u16) dr.dev_opt;
815 mgmt_phy_configuration_changed(hdev, NULL);
819 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
820 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
824 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
825 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
838 int hci_get_dev_list(void __user *arg)
840 struct hci_dev *hdev;
841 struct hci_dev_list_req *dl;
842 struct hci_dev_req *dr;
843 int n = 0, size, err;
846 if (get_user(dev_num, (__u16 __user *) arg))
849 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
852 size = sizeof(*dl) + dev_num * sizeof(*dr);
854 dl = kzalloc(size, GFP_KERNEL);
860 read_lock(&hci_dev_list_lock);
861 list_for_each_entry(hdev, &hci_dev_list, list) {
862 unsigned long flags = hdev->flags;
864 /* When the auto-off is configured it means the transport
865 * is running, but in that case still indicate that the
866 * device is actually down.
868 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
869 flags &= ~BIT(HCI_UP);
871 (dr + n)->dev_id = hdev->id;
872 (dr + n)->dev_opt = flags;
877 read_unlock(&hci_dev_list_lock);
880 size = sizeof(*dl) + n * sizeof(*dr);
882 err = copy_to_user(arg, dl, size);
885 return err ? -EFAULT : 0;
888 int hci_get_dev_info(void __user *arg)
890 struct hci_dev *hdev;
891 struct hci_dev_info di;
895 if (copy_from_user(&di, arg, sizeof(di)))
898 hdev = hci_dev_get(di.dev_id);
902 /* When the auto-off is configured it means the transport
903 * is running, but in that case still indicate that the
904 * device is actually down.
906 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
907 flags = hdev->flags & ~BIT(HCI_UP);
911 strscpy(di.name, hdev->name, sizeof(di.name));
912 di.bdaddr = hdev->bdaddr;
913 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
915 di.pkt_type = hdev->pkt_type;
916 if (lmp_bredr_capable(hdev)) {
917 di.acl_mtu = hdev->acl_mtu;
918 di.acl_pkts = hdev->acl_pkts;
919 di.sco_mtu = hdev->sco_mtu;
920 di.sco_pkts = hdev->sco_pkts;
922 di.acl_mtu = hdev->le_mtu;
923 di.acl_pkts = hdev->le_pkts;
927 di.link_policy = hdev->link_policy;
928 di.link_mode = hdev->link_mode;
930 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
931 memcpy(&di.features, &hdev->features, sizeof(di.features));
933 if (copy_to_user(arg, &di, sizeof(di)))
941 /* ---- Interface to HCI drivers ---- */
943 static int hci_dev_do_poweroff(struct hci_dev *hdev)
947 BT_DBG("%s %p", hdev->name, hdev);
949 hci_req_sync_lock(hdev);
951 err = hci_set_powered_sync(hdev, false);
953 hci_req_sync_unlock(hdev);
958 static int hci_rfkill_set_block(void *data, bool blocked)
960 struct hci_dev *hdev = data;
963 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
965 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
968 if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
972 hci_dev_set_flag(hdev, HCI_RFKILLED);
974 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
975 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
976 err = hci_dev_do_poweroff(hdev);
978 bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
981 /* Make sure the device is still closed even if
982 * anything during power off sequence (eg.
983 * disconnecting devices) failed.
985 hci_dev_do_close(hdev);
989 hci_dev_clear_flag(hdev, HCI_RFKILLED);
995 static const struct rfkill_ops hci_rfkill_ops = {
996 .set_block = hci_rfkill_set_block,
999 static void hci_power_on(struct work_struct *work)
1001 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1004 BT_DBG("%s", hdev->name);
1006 if (test_bit(HCI_UP, &hdev->flags) &&
1007 hci_dev_test_flag(hdev, HCI_MGMT) &&
1008 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1009 cancel_delayed_work(&hdev->power_off);
1010 err = hci_powered_update_sync(hdev);
1011 mgmt_power_on(hdev, err);
1015 err = hci_dev_do_open(hdev);
1018 mgmt_set_powered_failed(hdev, err);
1019 hci_dev_unlock(hdev);
1023 /* During the HCI setup phase, a few error conditions are
1024 * ignored and they need to be checked now. If they are still
1025 * valid, it is important to turn the device back off.
1027 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
1028 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
1029 (hdev->dev_type == HCI_PRIMARY &&
1030 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1031 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1032 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1033 hci_dev_do_close(hdev);
1034 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1035 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1036 HCI_AUTO_OFF_TIMEOUT);
1039 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1040 /* For unconfigured devices, set the HCI_RAW flag
1041 * so that userspace can easily identify them.
1043 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1044 set_bit(HCI_RAW, &hdev->flags);
1046 /* For fully configured devices, this will send
1047 * the Index Added event. For unconfigured devices,
1048 * it will send Unconfigued Index Added event.
1050 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1051 * and no event will be send.
1053 mgmt_index_added(hdev);
1054 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1055 /* When the controller is now configured, then it
1056 * is important to clear the HCI_RAW flag.
1058 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1059 clear_bit(HCI_RAW, &hdev->flags);
1061 /* Powering on the controller with HCI_CONFIG set only
1062 * happens with the transition from unconfigured to
1063 * configured. This will send the Index Added event.
1065 mgmt_index_added(hdev);
1069 static void hci_power_off(struct work_struct *work)
1071 struct hci_dev *hdev = container_of(work, struct hci_dev,
1074 BT_DBG("%s", hdev->name);
1076 hci_dev_do_close(hdev);
1079 static void hci_error_reset(struct work_struct *work)
1081 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1084 BT_DBG("%s", hdev->name);
1087 hdev->hw_error(hdev, hdev->hw_error_code);
1089 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1091 if (!hci_dev_do_close(hdev))
1092 hci_dev_do_open(hdev);
1097 void hci_uuids_clear(struct hci_dev *hdev)
1099 struct bt_uuid *uuid, *tmp;
1101 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1102 list_del(&uuid->list);
1107 void hci_link_keys_clear(struct hci_dev *hdev)
1109 struct link_key *key, *tmp;
1111 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1112 list_del_rcu(&key->list);
1113 kfree_rcu(key, rcu);
1117 void hci_smp_ltks_clear(struct hci_dev *hdev)
1119 struct smp_ltk *k, *tmp;
1121 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1122 list_del_rcu(&k->list);
1127 void hci_smp_irks_clear(struct hci_dev *hdev)
1129 struct smp_irk *k, *tmp;
1131 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1132 list_del_rcu(&k->list);
1137 void hci_blocked_keys_clear(struct hci_dev *hdev)
1139 struct blocked_key *b, *tmp;
1141 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1142 list_del_rcu(&b->list);
1147 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1149 bool blocked = false;
1150 struct blocked_key *b;
1153 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1154 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1164 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1169 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1170 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1173 if (hci_is_blocked_key(hdev,
1174 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1176 bt_dev_warn_ratelimited(hdev,
1177 "Link key blocked for %pMR",
1190 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1191 u8 key_type, u8 old_key_type)
1194 if (key_type < 0x03)
1197 /* Debug keys are insecure so don't store them persistently */
1198 if (key_type == HCI_LK_DEBUG_COMBINATION)
1201 /* Changed combination key and there's no previous one */
1202 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1205 /* Security mode 3 case */
1209 /* BR/EDR key derived using SC from an LE link */
1210 if (conn->type == LE_LINK)
1213 /* Neither local nor remote side had no-bonding as requirement */
1214 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1217 /* Local side had dedicated bonding as requirement */
1218 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1221 /* Remote side had dedicated bonding as requirement */
1222 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1225 /* If none of the above criteria match, then don't store the key
1230 static u8 ltk_role(u8 type)
1232 if (type == SMP_LTK)
1233 return HCI_ROLE_MASTER;
1235 return HCI_ROLE_SLAVE;
1238 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1239 u8 addr_type, u8 role)
1244 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1245 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1248 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1251 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1253 bt_dev_warn_ratelimited(hdev,
1254 "LTK blocked for %pMR",
1267 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1269 struct smp_irk *irk_to_return = NULL;
1270 struct smp_irk *irk;
1273 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1274 if (!bacmp(&irk->rpa, rpa)) {
1275 irk_to_return = irk;
1280 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1281 if (smp_irk_matches(hdev, irk->val, rpa)) {
1282 bacpy(&irk->rpa, rpa);
1283 irk_to_return = irk;
1289 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1290 irk_to_return->val)) {
1291 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1292 &irk_to_return->bdaddr);
1293 irk_to_return = NULL;
1298 return irk_to_return;
1301 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1304 struct smp_irk *irk_to_return = NULL;
1305 struct smp_irk *irk;
1307 /* Identity Address must be public or static random */
1308 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1312 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1313 if (addr_type == irk->addr_type &&
1314 bacmp(bdaddr, &irk->bdaddr) == 0) {
1315 irk_to_return = irk;
1322 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1323 irk_to_return->val)) {
1324 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1325 &irk_to_return->bdaddr);
1326 irk_to_return = NULL;
1331 return irk_to_return;
1334 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1335 bdaddr_t *bdaddr, u8 *val, u8 type,
1336 u8 pin_len, bool *persistent)
1338 struct link_key *key, *old_key;
1341 old_key = hci_find_link_key(hdev, bdaddr);
1343 old_key_type = old_key->type;
1346 old_key_type = conn ? conn->key_type : 0xff;
1347 key = kzalloc(sizeof(*key), GFP_KERNEL);
1350 list_add_rcu(&key->list, &hdev->link_keys);
1353 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1355 /* Some buggy controller combinations generate a changed
1356 * combination key for legacy pairing even when there's no
1358 if (type == HCI_LK_CHANGED_COMBINATION &&
1359 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1360 type = HCI_LK_COMBINATION;
1362 conn->key_type = type;
1365 bacpy(&key->bdaddr, bdaddr);
1366 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1367 key->pin_len = pin_len;
1369 if (type == HCI_LK_CHANGED_COMBINATION)
1370 key->type = old_key_type;
1375 *persistent = hci_persistent_key(hdev, conn, type,
1381 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1382 u8 addr_type, u8 type, u8 authenticated,
1383 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1385 struct smp_ltk *key, *old_key;
1386 u8 role = ltk_role(type);
1388 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1392 key = kzalloc(sizeof(*key), GFP_KERNEL);
1395 list_add_rcu(&key->list, &hdev->long_term_keys);
1398 bacpy(&key->bdaddr, bdaddr);
1399 key->bdaddr_type = addr_type;
1400 memcpy(key->val, tk, sizeof(key->val));
1401 key->authenticated = authenticated;
1404 key->enc_size = enc_size;
1410 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1411 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1413 struct smp_irk *irk;
1415 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1417 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1421 bacpy(&irk->bdaddr, bdaddr);
1422 irk->addr_type = addr_type;
1424 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1427 memcpy(irk->val, val, 16);
1428 bacpy(&irk->rpa, rpa);
1433 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1435 struct link_key *key;
1437 key = hci_find_link_key(hdev, bdaddr);
1441 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1443 list_del_rcu(&key->list);
1444 kfree_rcu(key, rcu);
1449 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1451 struct smp_ltk *k, *tmp;
1454 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1455 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1458 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1460 list_del_rcu(&k->list);
1465 return removed ? 0 : -ENOENT;
1468 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1470 struct smp_irk *k, *tmp;
1472 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1473 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1476 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1478 list_del_rcu(&k->list);
1483 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1486 struct smp_irk *irk;
1489 if (type == BDADDR_BREDR) {
1490 if (hci_find_link_key(hdev, bdaddr))
1495 /* Convert to HCI addr type which struct smp_ltk uses */
1496 if (type == BDADDR_LE_PUBLIC)
1497 addr_type = ADDR_LE_DEV_PUBLIC;
1499 addr_type = ADDR_LE_DEV_RANDOM;
1501 irk = hci_get_irk(hdev, bdaddr, addr_type);
1503 bdaddr = &irk->bdaddr;
1504 addr_type = irk->addr_type;
1508 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1509 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1519 /* HCI command timer function */
1520 static void hci_cmd_timeout(struct work_struct *work)
1522 struct hci_dev *hdev = container_of(work, struct hci_dev,
1525 if (hdev->req_skb) {
1526 u16 opcode = hci_skb_opcode(hdev->req_skb);
1528 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1530 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1532 bt_dev_err(hdev, "command tx timeout");
1535 if (hdev->cmd_timeout)
1536 hdev->cmd_timeout(hdev);
1538 atomic_set(&hdev->cmd_cnt, 1);
1539 queue_work(hdev->workqueue, &hdev->cmd_work);
1542 /* HCI ncmd timer function */
1543 static void hci_ncmd_timeout(struct work_struct *work)
1545 struct hci_dev *hdev = container_of(work, struct hci_dev,
1548 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1550 /* During HCI_INIT phase no events can be injected if the ncmd timer
1551 * triggers since the procedure has its own timeout handling.
1553 if (test_bit(HCI_INIT, &hdev->flags))
1556 /* This is an irrecoverable state, inject hardware error event */
1557 hci_reset_dev(hdev);
1560 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1561 bdaddr_t *bdaddr, u8 bdaddr_type)
1563 struct oob_data *data;
1565 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1566 if (bacmp(bdaddr, &data->bdaddr) != 0)
1568 if (data->bdaddr_type != bdaddr_type)
1576 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1579 struct oob_data *data;
1581 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1585 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1587 list_del(&data->list);
1593 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1595 struct oob_data *data, *n;
1597 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1598 list_del(&data->list);
1603 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1604 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1605 u8 *hash256, u8 *rand256)
1607 struct oob_data *data;
1609 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1611 data = kmalloc(sizeof(*data), GFP_KERNEL);
1615 bacpy(&data->bdaddr, bdaddr);
1616 data->bdaddr_type = bdaddr_type;
1617 list_add(&data->list, &hdev->remote_oob_data);
1620 if (hash192 && rand192) {
1621 memcpy(data->hash192, hash192, sizeof(data->hash192));
1622 memcpy(data->rand192, rand192, sizeof(data->rand192));
1623 if (hash256 && rand256)
1624 data->present = 0x03;
1626 memset(data->hash192, 0, sizeof(data->hash192));
1627 memset(data->rand192, 0, sizeof(data->rand192));
1628 if (hash256 && rand256)
1629 data->present = 0x02;
1631 data->present = 0x00;
1634 if (hash256 && rand256) {
1635 memcpy(data->hash256, hash256, sizeof(data->hash256));
1636 memcpy(data->rand256, rand256, sizeof(data->rand256));
1638 memset(data->hash256, 0, sizeof(data->hash256));
1639 memset(data->rand256, 0, sizeof(data->rand256));
1640 if (hash192 && rand192)
1641 data->present = 0x01;
1644 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1649 /* This function requires the caller holds hdev->lock */
1650 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1652 struct adv_info *adv_instance;
1654 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1655 if (adv_instance->instance == instance)
1656 return adv_instance;
1662 /* This function requires the caller holds hdev->lock */
1663 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1665 struct adv_info *cur_instance;
1667 cur_instance = hci_find_adv_instance(hdev, instance);
1671 if (cur_instance == list_last_entry(&hdev->adv_instances,
1672 struct adv_info, list))
1673 return list_first_entry(&hdev->adv_instances,
1674 struct adv_info, list);
1676 return list_next_entry(cur_instance, list);
1679 /* This function requires the caller holds hdev->lock */
1680 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1682 struct adv_info *adv_instance;
1684 adv_instance = hci_find_adv_instance(hdev, instance);
1688 BT_DBG("%s removing %dMR", hdev->name, instance);
1690 if (hdev->cur_adv_instance == instance) {
1691 if (hdev->adv_instance_timeout) {
1692 cancel_delayed_work(&hdev->adv_instance_expire);
1693 hdev->adv_instance_timeout = 0;
1695 hdev->cur_adv_instance = 0x00;
1698 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1700 list_del(&adv_instance->list);
1701 kfree(adv_instance);
1703 hdev->adv_instance_cnt--;
1708 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1710 struct adv_info *adv_instance, *n;
1712 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1713 adv_instance->rpa_expired = rpa_expired;
1716 /* This function requires the caller holds hdev->lock */
1717 void hci_adv_instances_clear(struct hci_dev *hdev)
1719 struct adv_info *adv_instance, *n;
1721 if (hdev->adv_instance_timeout) {
1722 cancel_delayed_work(&hdev->adv_instance_expire);
1723 hdev->adv_instance_timeout = 0;
1726 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1727 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1728 list_del(&adv_instance->list);
1729 kfree(adv_instance);
1732 hdev->adv_instance_cnt = 0;
1733 hdev->cur_adv_instance = 0x00;
1736 static void adv_instance_rpa_expired(struct work_struct *work)
1738 struct adv_info *adv_instance = container_of(work, struct adv_info,
1739 rpa_expired_cb.work);
1743 adv_instance->rpa_expired = true;
1746 /* This function requires the caller holds hdev->lock */
1747 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1748 u32 flags, u16 adv_data_len, u8 *adv_data,
1749 u16 scan_rsp_len, u8 *scan_rsp_data,
1750 u16 timeout, u16 duration, s8 tx_power,
1751 u32 min_interval, u32 max_interval,
1754 struct adv_info *adv;
1756 adv = hci_find_adv_instance(hdev, instance);
1758 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1759 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1760 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1762 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1763 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1764 return ERR_PTR(-EOVERFLOW);
1766 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1768 return ERR_PTR(-ENOMEM);
1770 adv->pending = true;
1771 adv->instance = instance;
1772 list_add(&adv->list, &hdev->adv_instances);
1773 hdev->adv_instance_cnt++;
1777 adv->min_interval = min_interval;
1778 adv->max_interval = max_interval;
1779 adv->tx_power = tx_power;
1780 /* Defining a mesh_handle changes the timing units to ms,
1781 * rather than seconds, and ties the instance to the requested
1784 adv->mesh = mesh_handle;
1786 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1787 scan_rsp_len, scan_rsp_data);
1789 adv->timeout = timeout;
1790 adv->remaining_time = timeout;
1793 adv->duration = hdev->def_multi_adv_rotation_duration;
1795 adv->duration = duration;
1797 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1799 BT_DBG("%s for %dMR", hdev->name, instance);
1804 /* This function requires the caller holds hdev->lock */
1805 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1806 u32 flags, u8 data_len, u8 *data,
1807 u32 min_interval, u32 max_interval)
1809 struct adv_info *adv;
1811 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1812 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1813 min_interval, max_interval, 0);
1817 adv->periodic = true;
1818 adv->per_adv_data_len = data_len;
1821 memcpy(adv->per_adv_data, data, data_len);
1826 /* This function requires the caller holds hdev->lock */
1827 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1828 u16 adv_data_len, u8 *adv_data,
1829 u16 scan_rsp_len, u8 *scan_rsp_data)
1831 struct adv_info *adv;
1833 adv = hci_find_adv_instance(hdev, instance);
1835 /* If advertisement doesn't exist, we can't modify its data */
1839 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1840 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1841 memcpy(adv->adv_data, adv_data, adv_data_len);
1842 adv->adv_data_len = adv_data_len;
1843 adv->adv_data_changed = true;
1846 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1847 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1848 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1849 adv->scan_rsp_len = scan_rsp_len;
1850 adv->scan_rsp_changed = true;
1853 /* Mark as changed if there are flags which would affect it */
1854 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1855 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1856 adv->scan_rsp_changed = true;
1861 /* This function requires the caller holds hdev->lock */
1862 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1865 struct adv_info *adv;
1867 if (instance == 0x00) {
1868 /* Instance 0 always manages the "Tx Power" and "Flags"
1871 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1873 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1874 * corresponds to the "connectable" instance flag.
1876 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1877 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1879 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1880 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1881 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1882 flags |= MGMT_ADV_FLAG_DISCOV;
1887 adv = hci_find_adv_instance(hdev, instance);
1889 /* Return 0 when we got an invalid instance identifier. */
1896 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1898 struct adv_info *adv;
1900 /* Instance 0x00 always set local name */
1901 if (instance == 0x00)
1904 adv = hci_find_adv_instance(hdev, instance);
1908 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1909 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1912 return adv->scan_rsp_len ? true : false;
1915 /* This function requires the caller holds hdev->lock */
1916 void hci_adv_monitors_clear(struct hci_dev *hdev)
1918 struct adv_monitor *monitor;
1921 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1922 hci_free_adv_monitor(hdev, monitor);
1924 idr_destroy(&hdev->adv_monitors_idr);
1927 /* Frees the monitor structure and do some bookkeepings.
1928 * This function requires the caller holds hdev->lock.
1930 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1932 struct adv_pattern *pattern;
1933 struct adv_pattern *tmp;
1938 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1939 list_del(&pattern->list);
1943 if (monitor->handle)
1944 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1946 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1947 hdev->adv_monitors_cnt--;
1948 mgmt_adv_monitor_removed(hdev, monitor->handle);
1954 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1955 * also attempts to forward the request to the controller.
1956 * This function requires the caller holds hci_req_sync_lock.
1958 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1960 int min, max, handle;
1968 min = HCI_MIN_ADV_MONITOR_HANDLE;
1969 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1970 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1973 hci_dev_unlock(hdev);
1978 monitor->handle = handle;
1980 if (!hdev_is_powered(hdev))
1983 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1984 case HCI_ADV_MONITOR_EXT_NONE:
1985 bt_dev_dbg(hdev, "add monitor %d status %d",
1986 monitor->handle, status);
1987 /* Message was not forwarded to controller - not an error */
1990 case HCI_ADV_MONITOR_EXT_MSFT:
1991 status = msft_add_monitor_pattern(hdev, monitor);
1992 bt_dev_dbg(hdev, "add monitor %d msft status %d",
2000 /* Attempts to tell the controller and free the monitor. If somehow the
2001 * controller doesn't have a corresponding handle, remove anyway.
2002 * This function requires the caller holds hci_req_sync_lock.
2004 static int hci_remove_adv_monitor(struct hci_dev *hdev,
2005 struct adv_monitor *monitor)
2010 switch (hci_get_adv_monitor_offload_ext(hdev)) {
2011 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
2012 bt_dev_dbg(hdev, "remove monitor %d status %d",
2013 monitor->handle, status);
2016 case HCI_ADV_MONITOR_EXT_MSFT:
2017 handle = monitor->handle;
2018 status = msft_remove_monitor(hdev, monitor);
2019 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
2024 /* In case no matching handle registered, just free the monitor */
2025 if (status == -ENOENT)
2031 if (status == -ENOENT)
2032 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2034 hci_free_adv_monitor(hdev, monitor);
2039 /* This function requires the caller holds hci_req_sync_lock */
2040 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2042 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2047 return hci_remove_adv_monitor(hdev, monitor);
2050 /* This function requires the caller holds hci_req_sync_lock */
2051 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2053 struct adv_monitor *monitor;
2054 int idr_next_id = 0;
2058 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2062 status = hci_remove_adv_monitor(hdev, monitor);
2072 /* This function requires the caller holds hdev->lock */
2073 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2075 return !idr_is_empty(&hdev->adv_monitors_idr);
2078 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2080 if (msft_monitor_supported(hdev))
2081 return HCI_ADV_MONITOR_EXT_MSFT;
2083 return HCI_ADV_MONITOR_EXT_NONE;
2086 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2087 bdaddr_t *bdaddr, u8 type)
2089 struct bdaddr_list *b;
2091 list_for_each_entry(b, bdaddr_list, list) {
2092 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2099 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2100 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2103 struct bdaddr_list_with_irk *b;
2105 list_for_each_entry(b, bdaddr_list, list) {
2106 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2113 struct bdaddr_list_with_flags *
2114 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2115 bdaddr_t *bdaddr, u8 type)
2117 struct bdaddr_list_with_flags *b;
2119 list_for_each_entry(b, bdaddr_list, list) {
2120 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2127 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2129 struct bdaddr_list *b, *n;
2131 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2137 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2139 struct bdaddr_list *entry;
2141 if (!bacmp(bdaddr, BDADDR_ANY))
2144 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2147 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2151 bacpy(&entry->bdaddr, bdaddr);
2152 entry->bdaddr_type = type;
2154 list_add(&entry->list, list);
2159 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2160 u8 type, u8 *peer_irk, u8 *local_irk)
2162 struct bdaddr_list_with_irk *entry;
2164 if (!bacmp(bdaddr, BDADDR_ANY))
2167 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2170 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2174 bacpy(&entry->bdaddr, bdaddr);
2175 entry->bdaddr_type = type;
2178 memcpy(entry->peer_irk, peer_irk, 16);
2181 memcpy(entry->local_irk, local_irk, 16);
2183 list_add(&entry->list, list);
2188 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2191 struct bdaddr_list_with_flags *entry;
2193 if (!bacmp(bdaddr, BDADDR_ANY))
2196 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2199 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2203 bacpy(&entry->bdaddr, bdaddr);
2204 entry->bdaddr_type = type;
2205 entry->flags = flags;
2207 list_add(&entry->list, list);
2212 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2214 struct bdaddr_list *entry;
2216 if (!bacmp(bdaddr, BDADDR_ANY)) {
2217 hci_bdaddr_list_clear(list);
2221 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2225 list_del(&entry->list);
2231 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2234 struct bdaddr_list_with_irk *entry;
2236 if (!bacmp(bdaddr, BDADDR_ANY)) {
2237 hci_bdaddr_list_clear(list);
2241 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2245 list_del(&entry->list);
2251 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2254 struct bdaddr_list_with_flags *entry;
2256 if (!bacmp(bdaddr, BDADDR_ANY)) {
2257 hci_bdaddr_list_clear(list);
2261 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2265 list_del(&entry->list);
2271 /* This function requires the caller holds hdev->lock */
2272 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2273 bdaddr_t *addr, u8 addr_type)
2275 struct hci_conn_params *params;
2277 list_for_each_entry(params, &hdev->le_conn_params, list) {
2278 if (bacmp(¶ms->addr, addr) == 0 &&
2279 params->addr_type == addr_type) {
2287 /* This function requires the caller holds hdev->lock or rcu_read_lock */
2288 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2289 bdaddr_t *addr, u8 addr_type)
2291 struct hci_conn_params *param;
2295 list_for_each_entry_rcu(param, list, action) {
2296 if (bacmp(¶m->addr, addr) == 0 &&
2297 param->addr_type == addr_type) {
2308 /* This function requires the caller holds hdev->lock */
2309 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2311 if (list_empty(¶m->action))
2314 list_del_rcu(¶m->action);
2316 INIT_LIST_HEAD(¶m->action);
2319 /* This function requires the caller holds hdev->lock */
2320 void hci_pend_le_list_add(struct hci_conn_params *param,
2321 struct list_head *list)
2323 list_add_rcu(¶m->action, list);
2326 /* This function requires the caller holds hdev->lock */
2327 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2328 bdaddr_t *addr, u8 addr_type)
2330 struct hci_conn_params *params;
2332 params = hci_conn_params_lookup(hdev, addr, addr_type);
2336 params = kzalloc(sizeof(*params), GFP_KERNEL);
2338 bt_dev_err(hdev, "out of memory");
2342 bacpy(¶ms->addr, addr);
2343 params->addr_type = addr_type;
2345 list_add(¶ms->list, &hdev->le_conn_params);
2346 INIT_LIST_HEAD(¶ms->action);
2348 params->conn_min_interval = hdev->le_conn_min_interval;
2349 params->conn_max_interval = hdev->le_conn_max_interval;
2350 params->conn_latency = hdev->le_conn_latency;
2351 params->supervision_timeout = hdev->le_supv_timeout;
2352 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2354 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2359 void hci_conn_params_free(struct hci_conn_params *params)
2361 hci_pend_le_list_del_init(params);
2364 hci_conn_drop(params->conn);
2365 hci_conn_put(params->conn);
2368 list_del(¶ms->list);
2372 /* This function requires the caller holds hdev->lock */
2373 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2375 struct hci_conn_params *params;
2377 params = hci_conn_params_lookup(hdev, addr, addr_type);
2381 hci_conn_params_free(params);
2383 hci_update_passive_scan(hdev);
2385 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2388 /* This function requires the caller holds hdev->lock */
2389 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2391 struct hci_conn_params *params, *tmp;
2393 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2394 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2397 /* If trying to establish one time connection to disabled
2398 * device, leave the params, but mark them as just once.
2400 if (params->explicit_connect) {
2401 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2405 hci_conn_params_free(params);
2408 BT_DBG("All LE disabled connection parameters were removed");
2411 /* This function requires the caller holds hdev->lock */
2412 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2414 struct hci_conn_params *params, *tmp;
2416 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2417 hci_conn_params_free(params);
2419 BT_DBG("All LE connection parameters were removed");
2422 /* Copy the Identity Address of the controller.
2424 * If the controller has a public BD_ADDR, then by default use that one.
2425 * If this is a LE only controller without a public address, default to
2426 * the static random address.
2428 * For debugging purposes it is possible to force controllers with a
2429 * public address to use the static random address instead.
2431 * In case BR/EDR has been disabled on a dual-mode controller and
2432 * userspace has configured a static address, then that address
2433 * becomes the identity address instead of the public BR/EDR address.
2435 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2438 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2439 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2440 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2441 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2442 bacpy(bdaddr, &hdev->static_addr);
2443 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2445 bacpy(bdaddr, &hdev->bdaddr);
2446 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2450 static void hci_clear_wake_reason(struct hci_dev *hdev)
2454 hdev->wake_reason = 0;
2455 bacpy(&hdev->wake_addr, BDADDR_ANY);
2456 hdev->wake_addr_type = 0;
2458 hci_dev_unlock(hdev);
2461 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2464 struct hci_dev *hdev =
2465 container_of(nb, struct hci_dev, suspend_notifier);
2468 /* Userspace has full control of this device. Do nothing. */
2469 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2472 /* To avoid a potential race with hci_unregister_dev. */
2475 if (action == PM_SUSPEND_PREPARE)
2476 ret = hci_suspend_dev(hdev);
2477 else if (action == PM_POST_SUSPEND)
2478 ret = hci_resume_dev(hdev);
2481 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2488 /* Alloc HCI device */
2489 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2491 struct hci_dev *hdev;
2492 unsigned int alloc_size;
2494 alloc_size = sizeof(*hdev);
2496 /* Fixme: May need ALIGN-ment? */
2497 alloc_size += sizeof_priv;
2500 hdev = kzalloc(alloc_size, GFP_KERNEL);
2504 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2505 hdev->esco_type = (ESCO_HV1);
2506 hdev->link_mode = (HCI_LM_ACCEPT);
2507 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2508 hdev->io_capability = 0x03; /* No Input No Output */
2509 hdev->manufacturer = 0xffff; /* Default to internal use */
2510 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2511 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2512 hdev->adv_instance_cnt = 0;
2513 hdev->cur_adv_instance = 0x00;
2514 hdev->adv_instance_timeout = 0;
2516 hdev->advmon_allowlist_duration = 300;
2517 hdev->advmon_no_filter_duration = 500;
2518 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2520 hdev->sniff_max_interval = 800;
2521 hdev->sniff_min_interval = 80;
2523 hdev->le_adv_channel_map = 0x07;
2524 hdev->le_adv_min_interval = 0x0800;
2525 hdev->le_adv_max_interval = 0x0800;
2526 hdev->le_scan_interval = 0x0060;
2527 hdev->le_scan_window = 0x0030;
2528 hdev->le_scan_int_suspend = 0x0400;
2529 hdev->le_scan_window_suspend = 0x0012;
2530 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2531 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2532 hdev->le_scan_int_adv_monitor = 0x0060;
2533 hdev->le_scan_window_adv_monitor = 0x0030;
2534 hdev->le_scan_int_connect = 0x0060;
2535 hdev->le_scan_window_connect = 0x0060;
2536 hdev->le_conn_min_interval = 0x0018;
2537 hdev->le_conn_max_interval = 0x0028;
2538 hdev->le_conn_latency = 0x0000;
2539 hdev->le_supv_timeout = 0x002a;
2540 hdev->le_def_tx_len = 0x001b;
2541 hdev->le_def_tx_time = 0x0148;
2542 hdev->le_max_tx_len = 0x001b;
2543 hdev->le_max_tx_time = 0x0148;
2544 hdev->le_max_rx_len = 0x001b;
2545 hdev->le_max_rx_time = 0x0148;
2546 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2547 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2548 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2549 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2550 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2551 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2552 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2553 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2554 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2556 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2557 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2558 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2559 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2560 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2561 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2563 /* default 1.28 sec page scan */
2564 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2565 hdev->def_page_scan_int = 0x0800;
2566 hdev->def_page_scan_window = 0x0012;
2568 mutex_init(&hdev->lock);
2569 mutex_init(&hdev->req_lock);
2571 ida_init(&hdev->unset_handle_ida);
2573 INIT_LIST_HEAD(&hdev->mesh_pending);
2574 INIT_LIST_HEAD(&hdev->mgmt_pending);
2575 INIT_LIST_HEAD(&hdev->reject_list);
2576 INIT_LIST_HEAD(&hdev->accept_list);
2577 INIT_LIST_HEAD(&hdev->uuids);
2578 INIT_LIST_HEAD(&hdev->link_keys);
2579 INIT_LIST_HEAD(&hdev->long_term_keys);
2580 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2581 INIT_LIST_HEAD(&hdev->remote_oob_data);
2582 INIT_LIST_HEAD(&hdev->le_accept_list);
2583 INIT_LIST_HEAD(&hdev->le_resolv_list);
2584 INIT_LIST_HEAD(&hdev->le_conn_params);
2585 INIT_LIST_HEAD(&hdev->pend_le_conns);
2586 INIT_LIST_HEAD(&hdev->pend_le_reports);
2587 INIT_LIST_HEAD(&hdev->conn_hash.list);
2588 INIT_LIST_HEAD(&hdev->adv_instances);
2589 INIT_LIST_HEAD(&hdev->blocked_keys);
2590 INIT_LIST_HEAD(&hdev->monitored_devices);
2592 INIT_LIST_HEAD(&hdev->local_codecs);
2593 INIT_WORK(&hdev->rx_work, hci_rx_work);
2594 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2595 INIT_WORK(&hdev->tx_work, hci_tx_work);
2596 INIT_WORK(&hdev->power_on, hci_power_on);
2597 INIT_WORK(&hdev->error_reset, hci_error_reset);
2599 hci_cmd_sync_init(hdev);
2601 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2603 skb_queue_head_init(&hdev->rx_q);
2604 skb_queue_head_init(&hdev->cmd_q);
2605 skb_queue_head_init(&hdev->raw_q);
2607 init_waitqueue_head(&hdev->req_wait_q);
2609 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2610 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2612 hci_devcd_setup(hdev);
2613 hci_request_setup(hdev);
2615 hci_init_sysfs(hdev);
2616 discovery_init(hdev);
2620 EXPORT_SYMBOL(hci_alloc_dev_priv);
2622 /* Free HCI device */
2623 void hci_free_dev(struct hci_dev *hdev)
2625 /* will free via device release */
2626 put_device(&hdev->dev);
2628 EXPORT_SYMBOL(hci_free_dev);
2630 /* Register HCI device */
2631 int hci_register_dev(struct hci_dev *hdev)
2635 if (!hdev->open || !hdev->close || !hdev->send)
2638 /* Do not allow HCI_AMP devices to register at index 0,
2639 * so the index can be used as the AMP controller ID.
2641 switch (hdev->dev_type) {
2643 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2646 id = ida_alloc_range(&hci_index_ida, 1, HCI_MAX_ID - 1,
2656 error = dev_set_name(&hdev->dev, "hci%u", id);
2660 hdev->name = dev_name(&hdev->dev);
2663 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2665 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2666 if (!hdev->workqueue) {
2671 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2673 if (!hdev->req_workqueue) {
2674 destroy_workqueue(hdev->workqueue);
2679 if (!IS_ERR_OR_NULL(bt_debugfs))
2680 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2682 error = device_add(&hdev->dev);
2686 hci_leds_init(hdev);
2688 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2689 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2692 if (rfkill_register(hdev->rfkill) < 0) {
2693 rfkill_destroy(hdev->rfkill);
2694 hdev->rfkill = NULL;
2698 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2699 hci_dev_set_flag(hdev, HCI_RFKILLED);
2701 hci_dev_set_flag(hdev, HCI_SETUP);
2702 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2704 if (hdev->dev_type == HCI_PRIMARY) {
2705 /* Assume BR/EDR support until proven otherwise (such as
2706 * through reading supported features during init.
2708 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2711 write_lock(&hci_dev_list_lock);
2712 list_add(&hdev->list, &hci_dev_list);
2713 write_unlock(&hci_dev_list_lock);
2715 /* Devices that are marked for raw-only usage are unconfigured
2716 * and should not be included in normal operation.
2718 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2719 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2721 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2725 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2727 hci_sock_dev_event(hdev, HCI_DEV_REG);
2730 error = hci_register_suspend_notifier(hdev);
2732 BT_WARN("register suspend notifier failed error:%d\n", error);
2734 queue_work(hdev->req_workqueue, &hdev->power_on);
2736 idr_init(&hdev->adv_monitors_idr);
2737 msft_register(hdev);
2742 debugfs_remove_recursive(hdev->debugfs);
2743 destroy_workqueue(hdev->workqueue);
2744 destroy_workqueue(hdev->req_workqueue);
2746 ida_free(&hci_index_ida, hdev->id);
2750 EXPORT_SYMBOL(hci_register_dev);
2752 /* Unregister HCI device */
2753 void hci_unregister_dev(struct hci_dev *hdev)
2755 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2757 mutex_lock(&hdev->unregister_lock);
2758 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2759 mutex_unlock(&hdev->unregister_lock);
2761 write_lock(&hci_dev_list_lock);
2762 list_del(&hdev->list);
2763 write_unlock(&hci_dev_list_lock);
2765 cancel_work_sync(&hdev->power_on);
2767 hci_cmd_sync_clear(hdev);
2769 hci_unregister_suspend_notifier(hdev);
2771 msft_unregister(hdev);
2773 hci_dev_do_close(hdev);
2775 if (!test_bit(HCI_INIT, &hdev->flags) &&
2776 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2777 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2779 mgmt_index_removed(hdev);
2780 hci_dev_unlock(hdev);
2783 /* mgmt_index_removed should take care of emptying the
2785 BUG_ON(!list_empty(&hdev->mgmt_pending));
2787 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2790 rfkill_unregister(hdev->rfkill);
2791 rfkill_destroy(hdev->rfkill);
2794 device_del(&hdev->dev);
2795 /* Actual cleanup is deferred until hci_release_dev(). */
2798 EXPORT_SYMBOL(hci_unregister_dev);
2800 /* Release HCI device */
2801 void hci_release_dev(struct hci_dev *hdev)
2803 debugfs_remove_recursive(hdev->debugfs);
2804 kfree_const(hdev->hw_info);
2805 kfree_const(hdev->fw_info);
2807 destroy_workqueue(hdev->workqueue);
2808 destroy_workqueue(hdev->req_workqueue);
2811 hci_bdaddr_list_clear(&hdev->reject_list);
2812 hci_bdaddr_list_clear(&hdev->accept_list);
2813 hci_uuids_clear(hdev);
2814 hci_link_keys_clear(hdev);
2815 hci_smp_ltks_clear(hdev);
2816 hci_smp_irks_clear(hdev);
2817 hci_remote_oob_data_clear(hdev);
2818 hci_adv_instances_clear(hdev);
2819 hci_adv_monitors_clear(hdev);
2820 hci_bdaddr_list_clear(&hdev->le_accept_list);
2821 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2822 hci_conn_params_clear_all(hdev);
2823 hci_discovery_filter_clear(hdev);
2824 hci_blocked_keys_clear(hdev);
2825 hci_codec_list_clear(&hdev->local_codecs);
2826 hci_dev_unlock(hdev);
2828 ida_destroy(&hdev->unset_handle_ida);
2829 ida_free(&hci_index_ida, hdev->id);
2830 kfree_skb(hdev->sent_cmd);
2831 kfree_skb(hdev->req_skb);
2832 kfree_skb(hdev->recv_event);
2835 EXPORT_SYMBOL(hci_release_dev);
2837 int hci_register_suspend_notifier(struct hci_dev *hdev)
2841 if (!hdev->suspend_notifier.notifier_call &&
2842 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2843 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2844 ret = register_pm_notifier(&hdev->suspend_notifier);
2850 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2854 if (hdev->suspend_notifier.notifier_call) {
2855 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2857 hdev->suspend_notifier.notifier_call = NULL;
2863 /* Cancel ongoing command synchronously:
2865 * - Cancel command timer
2866 * - Reset command counter
2867 * - Cancel command request
2869 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2871 bt_dev_dbg(hdev, "err 0x%2.2x", err);
2873 cancel_delayed_work_sync(&hdev->cmd_timer);
2874 cancel_delayed_work_sync(&hdev->ncmd_timer);
2875 atomic_set(&hdev->cmd_cnt, 1);
2877 hci_cmd_sync_cancel_sync(hdev, err);
2880 /* Suspend HCI device */
2881 int hci_suspend_dev(struct hci_dev *hdev)
2885 bt_dev_dbg(hdev, "");
2887 /* Suspend should only act on when powered. */
2888 if (!hdev_is_powered(hdev) ||
2889 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2892 /* If powering down don't attempt to suspend */
2893 if (mgmt_powering_down(hdev))
2896 /* Cancel potentially blocking sync operation before suspend */
2897 hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2899 hci_req_sync_lock(hdev);
2900 ret = hci_suspend_sync(hdev);
2901 hci_req_sync_unlock(hdev);
2903 hci_clear_wake_reason(hdev);
2904 mgmt_suspending(hdev, hdev->suspend_state);
2906 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2909 EXPORT_SYMBOL(hci_suspend_dev);
2911 /* Resume HCI device */
2912 int hci_resume_dev(struct hci_dev *hdev)
2916 bt_dev_dbg(hdev, "");
2918 /* Resume should only act on when powered. */
2919 if (!hdev_is_powered(hdev) ||
2920 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2923 /* If powering down don't attempt to resume */
2924 if (mgmt_powering_down(hdev))
2927 hci_req_sync_lock(hdev);
2928 ret = hci_resume_sync(hdev);
2929 hci_req_sync_unlock(hdev);
2931 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2932 hdev->wake_addr_type);
2934 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2937 EXPORT_SYMBOL(hci_resume_dev);
2939 /* Reset HCI device */
2940 int hci_reset_dev(struct hci_dev *hdev)
2942 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2943 struct sk_buff *skb;
2945 skb = bt_skb_alloc(3, GFP_ATOMIC);
2949 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2950 skb_put_data(skb, hw_err, 3);
2952 bt_dev_err(hdev, "Injecting HCI hardware error event");
2954 /* Send Hardware Error to upper stack */
2955 return hci_recv_frame(hdev, skb);
2957 EXPORT_SYMBOL(hci_reset_dev);
2959 /* Receive frame from HCI drivers */
2960 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2962 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2963 && !test_bit(HCI_INIT, &hdev->flags))) {
2968 switch (hci_skb_pkt_type(skb)) {
2971 case HCI_ACLDATA_PKT:
2972 /* Detect if ISO packet has been sent as ACL */
2973 if (hci_conn_num(hdev, ISO_LINK)) {
2974 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2977 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2978 if (type == ISO_LINK)
2979 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2982 case HCI_SCODATA_PKT:
2984 case HCI_ISODATA_PKT:
2992 bt_cb(skb)->incoming = 1;
2995 __net_timestamp(skb);
2997 skb_queue_tail(&hdev->rx_q, skb);
2998 queue_work(hdev->workqueue, &hdev->rx_work);
3002 EXPORT_SYMBOL(hci_recv_frame);
3004 /* Receive diagnostic message from HCI drivers */
3005 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3007 /* Mark as diagnostic packet */
3008 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3011 __net_timestamp(skb);
3013 skb_queue_tail(&hdev->rx_q, skb);
3014 queue_work(hdev->workqueue, &hdev->rx_work);
3018 EXPORT_SYMBOL(hci_recv_diag);
3020 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3024 va_start(vargs, fmt);
3025 kfree_const(hdev->hw_info);
3026 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3029 EXPORT_SYMBOL(hci_set_hw_info);
3031 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3035 va_start(vargs, fmt);
3036 kfree_const(hdev->fw_info);
3037 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3040 EXPORT_SYMBOL(hci_set_fw_info);
3042 /* ---- Interface to upper protocols ---- */
3044 int hci_register_cb(struct hci_cb *cb)
3046 BT_DBG("%p name %s", cb, cb->name);
3048 mutex_lock(&hci_cb_list_lock);
3049 list_add_tail(&cb->list, &hci_cb_list);
3050 mutex_unlock(&hci_cb_list_lock);
3054 EXPORT_SYMBOL(hci_register_cb);
3056 int hci_unregister_cb(struct hci_cb *cb)
3058 BT_DBG("%p name %s", cb, cb->name);
3060 mutex_lock(&hci_cb_list_lock);
3061 list_del(&cb->list);
3062 mutex_unlock(&hci_cb_list_lock);
3066 EXPORT_SYMBOL(hci_unregister_cb);
3068 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3072 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3076 __net_timestamp(skb);
3078 /* Send copy to monitor */
3079 hci_send_to_monitor(hdev, skb);
3081 if (atomic_read(&hdev->promisc)) {
3082 /* Send copy to the sockets */
3083 hci_send_to_sock(hdev, skb);
3086 /* Get rid of skb owner, prior to sending to the driver. */
3089 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3094 err = hdev->send(hdev, skb);
3096 bt_dev_err(hdev, "sending frame failed (%d)", err);
3104 /* Send HCI command */
3105 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3108 struct sk_buff *skb;
3110 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3112 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3114 bt_dev_err(hdev, "no memory for command");
3118 /* Stand-alone HCI commands must be flagged as
3119 * single-command requests.
3121 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3123 skb_queue_tail(&hdev->cmd_q, skb);
3124 queue_work(hdev->workqueue, &hdev->cmd_work);
3129 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3132 struct sk_buff *skb;
3134 if (hci_opcode_ogf(opcode) != 0x3f) {
3135 /* A controller receiving a command shall respond with either
3136 * a Command Status Event or a Command Complete Event.
3137 * Therefore, all standard HCI commands must be sent via the
3138 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3139 * Some vendors do not comply with this rule for vendor-specific
3140 * commands and do not return any event. We want to support
3141 * unresponded commands for such cases only.
3143 bt_dev_err(hdev, "unresponded command not supported");
3147 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3149 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3154 hci_send_frame(hdev, skb);
3158 EXPORT_SYMBOL(__hci_cmd_send);
3160 /* Get data from the previously sent command */
3161 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3163 struct hci_command_hdr *hdr;
3165 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3168 hdr = (void *)skb->data;
3170 if (hdr->opcode != cpu_to_le16(opcode))
3173 return skb->data + HCI_COMMAND_HDR_SIZE;
3176 /* Get data from the previously sent command */
3177 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3181 /* Check if opcode matches last sent command */
3182 data = hci_cmd_data(hdev->sent_cmd, opcode);
3184 /* Check if opcode matches last request */
3185 data = hci_cmd_data(hdev->req_skb, opcode);
3190 /* Get data from last received event */
3191 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3193 struct hci_event_hdr *hdr;
3196 if (!hdev->recv_event)
3199 hdr = (void *)hdev->recv_event->data;
3200 offset = sizeof(*hdr);
3202 if (hdr->evt != event) {
3203 /* In case of LE metaevent check the subevent match */
3204 if (hdr->evt == HCI_EV_LE_META) {
3205 struct hci_ev_le_meta *ev;
3207 ev = (void *)hdev->recv_event->data + offset;
3208 offset += sizeof(*ev);
3209 if (ev->subevent == event)
3216 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3218 return hdev->recv_event->data + offset;
3222 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3224 struct hci_acl_hdr *hdr;
3227 skb_push(skb, HCI_ACL_HDR_SIZE);
3228 skb_reset_transport_header(skb);
3229 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3230 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3231 hdr->dlen = cpu_to_le16(len);
3234 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3235 struct sk_buff *skb, __u16 flags)
3237 struct hci_conn *conn = chan->conn;
3238 struct hci_dev *hdev = conn->hdev;
3239 struct sk_buff *list;
3241 skb->len = skb_headlen(skb);
3244 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3246 switch (hdev->dev_type) {
3248 hci_add_acl_hdr(skb, conn->handle, flags);
3251 hci_add_acl_hdr(skb, chan->handle, flags);
3254 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3258 list = skb_shinfo(skb)->frag_list;
3260 /* Non fragmented */
3261 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3263 skb_queue_tail(queue, skb);
3266 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3268 skb_shinfo(skb)->frag_list = NULL;
3270 /* Queue all fragments atomically. We need to use spin_lock_bh
3271 * here because of 6LoWPAN links, as there this function is
3272 * called from softirq and using normal spin lock could cause
3275 spin_lock_bh(&queue->lock);
3277 __skb_queue_tail(queue, skb);
3279 flags &= ~ACL_START;
3282 skb = list; list = list->next;
3284 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3285 hci_add_acl_hdr(skb, conn->handle, flags);
3287 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3289 __skb_queue_tail(queue, skb);
3292 spin_unlock_bh(&queue->lock);
3296 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3298 struct hci_dev *hdev = chan->conn->hdev;
3300 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3302 hci_queue_acl(chan, &chan->data_q, skb, flags);
3304 queue_work(hdev->workqueue, &hdev->tx_work);
3308 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3310 struct hci_dev *hdev = conn->hdev;
3311 struct hci_sco_hdr hdr;
3313 BT_DBG("%s len %d", hdev->name, skb->len);
3315 hdr.handle = cpu_to_le16(conn->handle);
3316 hdr.dlen = skb->len;
3318 skb_push(skb, HCI_SCO_HDR_SIZE);
3319 skb_reset_transport_header(skb);
3320 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3322 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3324 skb_queue_tail(&conn->data_q, skb);
3325 queue_work(hdev->workqueue, &hdev->tx_work);
3329 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3331 struct hci_iso_hdr *hdr;
3334 skb_push(skb, HCI_ISO_HDR_SIZE);
3335 skb_reset_transport_header(skb);
3336 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3337 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3338 hdr->dlen = cpu_to_le16(len);
3341 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3342 struct sk_buff *skb)
3344 struct hci_dev *hdev = conn->hdev;
3345 struct sk_buff *list;
3348 skb->len = skb_headlen(skb);
3351 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3353 list = skb_shinfo(skb)->frag_list;
3355 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3356 hci_add_iso_hdr(skb, conn->handle, flags);
3359 /* Non fragmented */
3360 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3362 skb_queue_tail(queue, skb);
3365 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3367 skb_shinfo(skb)->frag_list = NULL;
3369 __skb_queue_tail(queue, skb);
3372 skb = list; list = list->next;
3374 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3375 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3377 hci_add_iso_hdr(skb, conn->handle, flags);
3379 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3381 __skb_queue_tail(queue, skb);
3386 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3388 struct hci_dev *hdev = conn->hdev;
3390 BT_DBG("%s len %d", hdev->name, skb->len);
3392 hci_queue_iso(conn, &conn->data_q, skb);
3394 queue_work(hdev->workqueue, &hdev->tx_work);
3397 /* ---- HCI TX task (outgoing data) ---- */
3399 /* HCI Connection scheduler */
3400 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3402 struct hci_dev *hdev;
3412 switch (conn->type) {
3414 cnt = hdev->acl_cnt;
3417 cnt = hdev->block_cnt;
3421 cnt = hdev->sco_cnt;
3424 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3427 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3428 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3432 bt_dev_err(hdev, "unknown link type %d", conn->type);
3439 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3442 struct hci_conn_hash *h = &hdev->conn_hash;
3443 struct hci_conn *conn = NULL, *c;
3444 unsigned int num = 0, min = ~0;
3446 /* We don't have to lock device here. Connections are always
3447 * added and removed with TX task disabled. */
3451 list_for_each_entry_rcu(c, &h->list, list) {
3452 if (c->type != type || skb_queue_empty(&c->data_q))
3455 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3460 if (c->sent < min) {
3465 if (hci_conn_num(hdev, type) == num)
3471 hci_quote_sent(conn, num, quote);
3473 BT_DBG("conn %p quote %d", conn, *quote);
3477 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3479 struct hci_conn_hash *h = &hdev->conn_hash;
3482 bt_dev_err(hdev, "link tx timeout");
3486 /* Kill stalled connections */
3487 list_for_each_entry_rcu(c, &h->list, list) {
3488 if (c->type == type && c->sent) {
3489 bt_dev_err(hdev, "killing stalled connection %pMR",
3491 /* hci_disconnect might sleep, so, we have to release
3492 * the RCU read lock before calling it.
3495 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3503 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3506 struct hci_conn_hash *h = &hdev->conn_hash;
3507 struct hci_chan *chan = NULL;
3508 unsigned int num = 0, min = ~0, cur_prio = 0;
3509 struct hci_conn *conn;
3512 BT_DBG("%s", hdev->name);
3516 list_for_each_entry_rcu(conn, &h->list, list) {
3517 struct hci_chan *tmp;
3519 if (conn->type != type)
3522 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3527 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3528 struct sk_buff *skb;
3530 if (skb_queue_empty(&tmp->data_q))
3533 skb = skb_peek(&tmp->data_q);
3534 if (skb->priority < cur_prio)
3537 if (skb->priority > cur_prio) {
3540 cur_prio = skb->priority;
3545 if (conn->sent < min) {
3551 if (hci_conn_num(hdev, type) == conn_num)
3560 hci_quote_sent(chan->conn, num, quote);
3562 BT_DBG("chan %p quote %d", chan, *quote);
3566 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3568 struct hci_conn_hash *h = &hdev->conn_hash;
3569 struct hci_conn *conn;
3572 BT_DBG("%s", hdev->name);
3576 list_for_each_entry_rcu(conn, &h->list, list) {
3577 struct hci_chan *chan;
3579 if (conn->type != type)
3582 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3587 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3588 struct sk_buff *skb;
3595 if (skb_queue_empty(&chan->data_q))
3598 skb = skb_peek(&chan->data_q);
3599 if (skb->priority >= HCI_PRIO_MAX - 1)
3602 skb->priority = HCI_PRIO_MAX - 1;
3604 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3608 if (hci_conn_num(hdev, type) == num)
3616 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3618 /* Calculate count of blocks used by this packet */
3619 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3622 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3624 unsigned long last_tx;
3626 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3631 last_tx = hdev->le_last_tx;
3634 last_tx = hdev->acl_last_tx;
3638 /* tx timeout must be longer than maximum link supervision timeout
3641 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3642 hci_link_tx_to(hdev, type);
3646 static void hci_sched_sco(struct hci_dev *hdev)
3648 struct hci_conn *conn;
3649 struct sk_buff *skb;
3652 BT_DBG("%s", hdev->name);
3654 if (!hci_conn_num(hdev, SCO_LINK))
3657 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3658 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3659 BT_DBG("skb %p len %d", skb, skb->len);
3660 hci_send_frame(hdev, skb);
3663 if (conn->sent == ~0)
3669 static void hci_sched_esco(struct hci_dev *hdev)
3671 struct hci_conn *conn;
3672 struct sk_buff *skb;
3675 BT_DBG("%s", hdev->name);
3677 if (!hci_conn_num(hdev, ESCO_LINK))
3680 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3682 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3683 BT_DBG("skb %p len %d", skb, skb->len);
3684 hci_send_frame(hdev, skb);
3687 if (conn->sent == ~0)
3693 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3695 unsigned int cnt = hdev->acl_cnt;
3696 struct hci_chan *chan;
3697 struct sk_buff *skb;
3700 __check_timeout(hdev, cnt, ACL_LINK);
3702 while (hdev->acl_cnt &&
3703 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3704 u32 priority = (skb_peek(&chan->data_q))->priority;
3705 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3706 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3707 skb->len, skb->priority);
3709 /* Stop if priority has changed */
3710 if (skb->priority < priority)
3713 skb = skb_dequeue(&chan->data_q);
3715 hci_conn_enter_active_mode(chan->conn,
3716 bt_cb(skb)->force_active);
3718 hci_send_frame(hdev, skb);
3719 hdev->acl_last_tx = jiffies;
3725 /* Send pending SCO packets right away */
3726 hci_sched_sco(hdev);
3727 hci_sched_esco(hdev);
3731 if (cnt != hdev->acl_cnt)
3732 hci_prio_recalculate(hdev, ACL_LINK);
3735 static void hci_sched_acl_blk(struct hci_dev *hdev)
3737 unsigned int cnt = hdev->block_cnt;
3738 struct hci_chan *chan;
3739 struct sk_buff *skb;
3743 BT_DBG("%s", hdev->name);
3745 if (hdev->dev_type == HCI_AMP)
3750 __check_timeout(hdev, cnt, type);
3752 while (hdev->block_cnt > 0 &&
3753 (chan = hci_chan_sent(hdev, type, "e))) {
3754 u32 priority = (skb_peek(&chan->data_q))->priority;
3755 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3758 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3759 skb->len, skb->priority);
3761 /* Stop if priority has changed */
3762 if (skb->priority < priority)
3765 skb = skb_dequeue(&chan->data_q);
3767 blocks = __get_blocks(hdev, skb);
3768 if (blocks > hdev->block_cnt)
3771 hci_conn_enter_active_mode(chan->conn,
3772 bt_cb(skb)->force_active);
3774 hci_send_frame(hdev, skb);
3775 hdev->acl_last_tx = jiffies;
3777 hdev->block_cnt -= blocks;
3780 chan->sent += blocks;
3781 chan->conn->sent += blocks;
3785 if (cnt != hdev->block_cnt)
3786 hci_prio_recalculate(hdev, type);
3789 static void hci_sched_acl(struct hci_dev *hdev)
3791 BT_DBG("%s", hdev->name);
3793 /* No ACL link over BR/EDR controller */
3794 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3797 /* No AMP link over AMP controller */
3798 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3801 switch (hdev->flow_ctl_mode) {
3802 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3803 hci_sched_acl_pkt(hdev);
3806 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3807 hci_sched_acl_blk(hdev);
3812 static void hci_sched_le(struct hci_dev *hdev)
3814 struct hci_chan *chan;
3815 struct sk_buff *skb;
3816 int quote, cnt, tmp;
3818 BT_DBG("%s", hdev->name);
3820 if (!hci_conn_num(hdev, LE_LINK))
3823 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3825 __check_timeout(hdev, cnt, LE_LINK);
3828 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3829 u32 priority = (skb_peek(&chan->data_q))->priority;
3830 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3831 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3832 skb->len, skb->priority);
3834 /* Stop if priority has changed */
3835 if (skb->priority < priority)
3838 skb = skb_dequeue(&chan->data_q);
3840 hci_send_frame(hdev, skb);
3841 hdev->le_last_tx = jiffies;
3847 /* Send pending SCO packets right away */
3848 hci_sched_sco(hdev);
3849 hci_sched_esco(hdev);
3856 hdev->acl_cnt = cnt;
3859 hci_prio_recalculate(hdev, LE_LINK);
3863 static void hci_sched_iso(struct hci_dev *hdev)
3865 struct hci_conn *conn;
3866 struct sk_buff *skb;
3869 BT_DBG("%s", hdev->name);
3871 if (!hci_conn_num(hdev, ISO_LINK))
3874 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3875 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3876 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) {
3877 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3878 BT_DBG("skb %p len %d", skb, skb->len);
3879 hci_send_frame(hdev, skb);
3882 if (conn->sent == ~0)
3889 static void hci_tx_work(struct work_struct *work)
3891 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3892 struct sk_buff *skb;
3894 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3895 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3897 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3898 /* Schedule queues and send stuff to HCI driver */
3899 hci_sched_sco(hdev);
3900 hci_sched_esco(hdev);
3901 hci_sched_iso(hdev);
3902 hci_sched_acl(hdev);
3906 /* Send next queued raw (unknown type) packet */
3907 while ((skb = skb_dequeue(&hdev->raw_q)))
3908 hci_send_frame(hdev, skb);
3911 /* ----- HCI RX task (incoming data processing) ----- */
3913 /* ACL data packet */
3914 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3916 struct hci_acl_hdr *hdr = (void *) skb->data;
3917 struct hci_conn *conn;
3918 __u16 handle, flags;
3920 skb_pull(skb, HCI_ACL_HDR_SIZE);
3922 handle = __le16_to_cpu(hdr->handle);
3923 flags = hci_flags(handle);
3924 handle = hci_handle(handle);
3926 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3929 hdev->stat.acl_rx++;
3932 conn = hci_conn_hash_lookup_handle(hdev, handle);
3933 hci_dev_unlock(hdev);
3936 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3938 /* Send to upper protocol */
3939 l2cap_recv_acldata(conn, skb, flags);
3942 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3949 /* SCO data packet */
3950 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3952 struct hci_sco_hdr *hdr = (void *) skb->data;
3953 struct hci_conn *conn;
3954 __u16 handle, flags;
3956 skb_pull(skb, HCI_SCO_HDR_SIZE);
3958 handle = __le16_to_cpu(hdr->handle);
3959 flags = hci_flags(handle);
3960 handle = hci_handle(handle);
3962 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3965 hdev->stat.sco_rx++;
3968 conn = hci_conn_hash_lookup_handle(hdev, handle);
3969 hci_dev_unlock(hdev);
3972 /* Send to upper protocol */
3973 hci_skb_pkt_status(skb) = flags & 0x03;
3974 sco_recv_scodata(conn, skb);
3977 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3984 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3986 struct hci_iso_hdr *hdr;
3987 struct hci_conn *conn;
3988 __u16 handle, flags;
3990 hdr = skb_pull_data(skb, sizeof(*hdr));
3992 bt_dev_err(hdev, "ISO packet too small");
3996 handle = __le16_to_cpu(hdr->handle);
3997 flags = hci_flags(handle);
3998 handle = hci_handle(handle);
4000 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
4004 conn = hci_conn_hash_lookup_handle(hdev, handle);
4005 hci_dev_unlock(hdev);
4008 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
4013 /* Send to upper protocol */
4014 iso_recv(conn, skb, flags);
4021 static bool hci_req_is_complete(struct hci_dev *hdev)
4023 struct sk_buff *skb;
4025 skb = skb_peek(&hdev->cmd_q);
4029 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4032 static void hci_resend_last(struct hci_dev *hdev)
4034 struct hci_command_hdr *sent;
4035 struct sk_buff *skb;
4038 if (!hdev->sent_cmd)
4041 sent = (void *) hdev->sent_cmd->data;
4042 opcode = __le16_to_cpu(sent->opcode);
4043 if (opcode == HCI_OP_RESET)
4046 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4050 skb_queue_head(&hdev->cmd_q, skb);
4051 queue_work(hdev->workqueue, &hdev->cmd_work);
4054 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4055 hci_req_complete_t *req_complete,
4056 hci_req_complete_skb_t *req_complete_skb)
4058 struct sk_buff *skb;
4059 unsigned long flags;
4061 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4063 /* If the completed command doesn't match the last one that was
4064 * sent we need to do special handling of it.
4066 if (!hci_sent_cmd_data(hdev, opcode)) {
4067 /* Some CSR based controllers generate a spontaneous
4068 * reset complete event during init and any pending
4069 * command will never be completed. In such a case we
4070 * need to resend whatever was the last sent
4073 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4074 hci_resend_last(hdev);
4079 /* If we reach this point this event matches the last command sent */
4080 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4082 /* If the command succeeded and there's still more commands in
4083 * this request the request is not yet complete.
4085 if (!status && !hci_req_is_complete(hdev))
4088 skb = hdev->req_skb;
4090 /* If this was the last command in a request the complete
4091 * callback would be found in hdev->req_skb instead of the
4092 * command queue (hdev->cmd_q).
4094 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
4095 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4099 if (skb && bt_cb(skb)->hci.req_complete) {
4100 *req_complete = bt_cb(skb)->hci.req_complete;
4104 /* Remove all pending commands belonging to this request */
4105 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4106 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4107 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4108 __skb_queue_head(&hdev->cmd_q, skb);
4112 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4113 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4115 *req_complete = bt_cb(skb)->hci.req_complete;
4116 dev_kfree_skb_irq(skb);
4118 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4121 static void hci_rx_work(struct work_struct *work)
4123 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4124 struct sk_buff *skb;
4126 BT_DBG("%s", hdev->name);
4128 /* The kcov_remote functions used for collecting packet parsing
4129 * coverage information from this background thread and associate
4130 * the coverage with the syscall's thread which originally injected
4131 * the packet. This helps fuzzing the kernel.
4133 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4134 kcov_remote_start_common(skb_get_kcov_handle(skb));
4136 /* Send copy to monitor */
4137 hci_send_to_monitor(hdev, skb);
4139 if (atomic_read(&hdev->promisc)) {
4140 /* Send copy to the sockets */
4141 hci_send_to_sock(hdev, skb);
4144 /* If the device has been opened in HCI_USER_CHANNEL,
4145 * the userspace has exclusive access to device.
4146 * When device is HCI_INIT, we still need to process
4147 * the data packets to the driver in order
4148 * to complete its setup().
4150 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4151 !test_bit(HCI_INIT, &hdev->flags)) {
4156 if (test_bit(HCI_INIT, &hdev->flags)) {
4157 /* Don't process data packets in this states. */
4158 switch (hci_skb_pkt_type(skb)) {
4159 case HCI_ACLDATA_PKT:
4160 case HCI_SCODATA_PKT:
4161 case HCI_ISODATA_PKT:
4168 switch (hci_skb_pkt_type(skb)) {
4170 BT_DBG("%s Event packet", hdev->name);
4171 hci_event_packet(hdev, skb);
4174 case HCI_ACLDATA_PKT:
4175 BT_DBG("%s ACL data packet", hdev->name);
4176 hci_acldata_packet(hdev, skb);
4179 case HCI_SCODATA_PKT:
4180 BT_DBG("%s SCO data packet", hdev->name);
4181 hci_scodata_packet(hdev, skb);
4184 case HCI_ISODATA_PKT:
4185 BT_DBG("%s ISO data packet", hdev->name);
4186 hci_isodata_packet(hdev, skb);
4196 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4200 bt_dev_dbg(hdev, "skb %p", skb);
4202 kfree_skb(hdev->sent_cmd);
4204 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4205 if (!hdev->sent_cmd) {
4206 skb_queue_head(&hdev->cmd_q, skb);
4207 queue_work(hdev->workqueue, &hdev->cmd_work);
4211 err = hci_send_frame(hdev, skb);
4213 hci_cmd_sync_cancel_sync(hdev, -err);
4217 if (hci_req_status_pend(hdev) &&
4218 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4219 kfree_skb(hdev->req_skb);
4220 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4223 atomic_dec(&hdev->cmd_cnt);
4226 static void hci_cmd_work(struct work_struct *work)
4228 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4229 struct sk_buff *skb;
4231 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4232 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4234 /* Send queued commands */
4235 if (atomic_read(&hdev->cmd_cnt)) {
4236 skb = skb_dequeue(&hdev->cmd_q);
4240 hci_send_cmd_sync(hdev, skb);
4243 if (test_bit(HCI_RESET, &hdev->flags) ||
4244 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4245 cancel_delayed_work(&hdev->cmd_timer);
4247 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,