2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
49 #include "hci_codec.h"
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
70 BT_DBG("%s %x", req->hdev->name, scan);
72 /* Inquiry and Page scans */
73 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
81 BT_DBG("%s %x", req->hdev->name, auth);
84 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
92 BT_DBG("%s %x", req->hdev->name, encrypt);
95 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
101 __le16 policy = cpu_to_le16(opt);
103 BT_DBG("%s %x", req->hdev->name, policy);
105 /* Default link policy */
106 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
110 /* Get HCI device by index.
111 * Device is held on return. */
112 struct hci_dev *hci_dev_get(int index)
114 struct hci_dev *hdev = NULL, *d;
121 read_lock(&hci_dev_list_lock);
122 list_for_each_entry(d, &hci_dev_list, list) {
123 if (d->id == index) {
124 hdev = hci_dev_hold(d);
128 read_unlock(&hci_dev_list_lock);
132 /* ---- Inquiry support ---- */
134 bool hci_discovery_active(struct hci_dev *hdev)
136 struct discovery_state *discov = &hdev->discovery;
138 switch (discov->state) {
139 case DISCOVERY_FINDING:
140 case DISCOVERY_RESOLVING:
148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
150 int old_state = hdev->discovery.state;
152 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
154 if (old_state == state)
157 hdev->discovery.state = state;
160 case DISCOVERY_STOPPED:
161 hci_update_passive_scan(hdev);
163 if (old_state != DISCOVERY_STARTING)
164 mgmt_discovering(hdev, 0);
166 case DISCOVERY_STARTING:
168 case DISCOVERY_FINDING:
169 mgmt_discovering(hdev, 1);
171 case DISCOVERY_RESOLVING:
173 case DISCOVERY_STOPPING:
178 void hci_inquiry_cache_flush(struct hci_dev *hdev)
180 struct discovery_state *cache = &hdev->discovery;
181 struct inquiry_entry *p, *n;
183 list_for_each_entry_safe(p, n, &cache->all, all) {
188 INIT_LIST_HEAD(&cache->unknown);
189 INIT_LIST_HEAD(&cache->resolve);
192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
195 struct discovery_state *cache = &hdev->discovery;
196 struct inquiry_entry *e;
198 BT_DBG("cache %p, %pMR", cache, bdaddr);
200 list_for_each_entry(e, &cache->all, all) {
201 if (!bacmp(&e->data.bdaddr, bdaddr))
208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
211 struct discovery_state *cache = &hdev->discovery;
212 struct inquiry_entry *e;
214 BT_DBG("cache %p, %pMR", cache, bdaddr);
216 list_for_each_entry(e, &cache->unknown, list) {
217 if (!bacmp(&e->data.bdaddr, bdaddr))
224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
228 struct discovery_state *cache = &hdev->discovery;
229 struct inquiry_entry *e;
231 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
233 list_for_each_entry(e, &cache->resolve, list) {
234 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
236 if (!bacmp(&e->data.bdaddr, bdaddr))
243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
244 struct inquiry_entry *ie)
246 struct discovery_state *cache = &hdev->discovery;
247 struct list_head *pos = &cache->resolve;
248 struct inquiry_entry *p;
252 list_for_each_entry(p, &cache->resolve, list) {
253 if (p->name_state != NAME_PENDING &&
254 abs(p->data.rssi) >= abs(ie->data.rssi))
259 list_add(&ie->list, pos);
262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
265 struct discovery_state *cache = &hdev->discovery;
266 struct inquiry_entry *ie;
269 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
271 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
274 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
276 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
278 if (!ie->data.ssp_mode)
279 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
281 if (ie->name_state == NAME_NEEDED &&
282 data->rssi != ie->data.rssi) {
283 ie->data.rssi = data->rssi;
284 hci_inquiry_cache_update_resolve(hdev, ie);
290 /* Entry not in the cache. Add new one. */
291 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
293 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
297 list_add(&ie->all, &cache->all);
300 ie->name_state = NAME_KNOWN;
302 ie->name_state = NAME_NOT_KNOWN;
303 list_add(&ie->list, &cache->unknown);
307 if (name_known && ie->name_state != NAME_KNOWN &&
308 ie->name_state != NAME_PENDING) {
309 ie->name_state = NAME_KNOWN;
313 memcpy(&ie->data, data, sizeof(*data));
314 ie->timestamp = jiffies;
315 cache->timestamp = jiffies;
317 if (ie->name_state == NAME_NOT_KNOWN)
318 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
326 struct discovery_state *cache = &hdev->discovery;
327 struct inquiry_info *info = (struct inquiry_info *) buf;
328 struct inquiry_entry *e;
331 list_for_each_entry(e, &cache->all, all) {
332 struct inquiry_data *data = &e->data;
337 bacpy(&info->bdaddr, &data->bdaddr);
338 info->pscan_rep_mode = data->pscan_rep_mode;
339 info->pscan_period_mode = data->pscan_period_mode;
340 info->pscan_mode = data->pscan_mode;
341 memcpy(info->dev_class, data->dev_class, 3);
342 info->clock_offset = data->clock_offset;
348 BT_DBG("cache %p, copied %d", cache, copied);
352 static int hci_inq_req(struct hci_request *req, unsigned long opt)
354 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355 struct hci_dev *hdev = req->hdev;
356 struct hci_cp_inquiry cp;
358 BT_DBG("%s", hdev->name);
360 if (test_bit(HCI_INQUIRY, &hdev->flags))
364 memcpy(&cp.lap, &ir->lap, 3);
365 cp.length = ir->length;
366 cp.num_rsp = ir->num_rsp;
367 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
372 int hci_inquiry(void __user *arg)
374 __u8 __user *ptr = arg;
375 struct hci_inquiry_req ir;
376 struct hci_dev *hdev;
377 int err = 0, do_inquiry = 0, max_rsp;
381 if (copy_from_user(&ir, ptr, sizeof(ir)))
384 hdev = hci_dev_get(ir.dev_id);
388 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
393 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
398 if (hdev->dev_type != HCI_PRIMARY) {
403 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
408 /* Restrict maximum inquiry length to 60 seconds */
409 if (ir.length > 60) {
415 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
416 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
417 hci_inquiry_cache_flush(hdev);
420 hci_dev_unlock(hdev);
422 timeo = ir.length * msecs_to_jiffies(2000);
425 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
430 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
431 * cleared). If it is interrupted by a signal, return -EINTR.
433 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
434 TASK_INTERRUPTIBLE)) {
440 /* for unlimited number of responses we will use buffer with
443 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
445 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
446 * copy it to the user space.
448 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
455 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
456 hci_dev_unlock(hdev);
458 BT_DBG("num_rsp %d", ir.num_rsp);
460 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
462 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
475 static int hci_dev_do_open(struct hci_dev *hdev)
479 BT_DBG("%s %p", hdev->name, hdev);
481 hci_req_sync_lock(hdev);
483 ret = hci_dev_open_sync(hdev);
485 hci_req_sync_unlock(hdev);
489 /* ---- HCI ioctl helpers ---- */
491 int hci_dev_open(__u16 dev)
493 struct hci_dev *hdev;
496 hdev = hci_dev_get(dev);
500 /* Devices that are marked as unconfigured can only be powered
501 * up as user channel. Trying to bring them up as normal devices
502 * will result into a failure. Only user channel operation is
505 * When this function is called for a user channel, the flag
506 * HCI_USER_CHANNEL will be set first before attempting to
509 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
510 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
515 /* We need to ensure that no other power on/off work is pending
516 * before proceeding to call hci_dev_do_open. This is
517 * particularly important if the setup procedure has not yet
520 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
521 cancel_delayed_work(&hdev->power_off);
523 /* After this call it is guaranteed that the setup procedure
524 * has finished. This means that error conditions like RFKILL
525 * or no valid public or static random address apply.
527 flush_workqueue(hdev->req_workqueue);
529 /* For controllers not using the management interface and that
530 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
531 * so that pairing works for them. Once the management interface
532 * is in use this bit will be cleared again and userspace has
533 * to explicitly enable it.
535 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
536 !hci_dev_test_flag(hdev, HCI_MGMT))
537 hci_dev_set_flag(hdev, HCI_BONDABLE);
539 err = hci_dev_do_open(hdev);
546 int hci_dev_do_close(struct hci_dev *hdev)
550 BT_DBG("%s %p", hdev->name, hdev);
552 hci_req_sync_lock(hdev);
554 err = hci_dev_close_sync(hdev);
556 hci_req_sync_unlock(hdev);
561 int hci_dev_close(__u16 dev)
563 struct hci_dev *hdev;
566 hdev = hci_dev_get(dev);
570 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
575 cancel_work_sync(&hdev->power_on);
576 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
577 cancel_delayed_work(&hdev->power_off);
579 err = hci_dev_do_close(hdev);
586 static int hci_dev_do_reset(struct hci_dev *hdev)
590 BT_DBG("%s %p", hdev->name, hdev);
592 hci_req_sync_lock(hdev);
595 skb_queue_purge(&hdev->rx_q);
596 skb_queue_purge(&hdev->cmd_q);
598 /* Cancel these to avoid queueing non-chained pending work */
599 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
600 cancel_delayed_work(&hdev->cmd_timer);
601 cancel_delayed_work(&hdev->ncmd_timer);
603 /* Avoid potential lockdep warnings from the *_flush() calls by
604 * ensuring the workqueue is empty up front.
606 drain_workqueue(hdev->workqueue);
609 hci_inquiry_cache_flush(hdev);
610 hci_conn_hash_flush(hdev);
611 hci_dev_unlock(hdev);
616 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
618 atomic_set(&hdev->cmd_cnt, 1);
619 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
621 ret = hci_reset_sync(hdev);
623 hci_req_sync_unlock(hdev);
627 int hci_dev_reset(__u16 dev)
629 struct hci_dev *hdev;
632 hdev = hci_dev_get(dev);
636 if (!test_bit(HCI_UP, &hdev->flags)) {
641 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
646 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
651 err = hci_dev_do_reset(hdev);
658 int hci_dev_reset_stat(__u16 dev)
660 struct hci_dev *hdev;
663 hdev = hci_dev_get(dev);
667 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
672 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
677 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
684 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
686 bool conn_changed, discov_changed;
688 BT_DBG("%s scan 0x%02x", hdev->name, scan);
690 if ((scan & SCAN_PAGE))
691 conn_changed = !hci_dev_test_and_set_flag(hdev,
694 conn_changed = hci_dev_test_and_clear_flag(hdev,
697 if ((scan & SCAN_INQUIRY)) {
698 discov_changed = !hci_dev_test_and_set_flag(hdev,
701 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
702 discov_changed = hci_dev_test_and_clear_flag(hdev,
706 if (!hci_dev_test_flag(hdev, HCI_MGMT))
709 if (conn_changed || discov_changed) {
710 /* In case this was disabled through mgmt */
711 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
713 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
714 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
716 mgmt_new_settings(hdev);
720 int hci_dev_cmd(unsigned int cmd, void __user *arg)
722 struct hci_dev *hdev;
723 struct hci_dev_req dr;
726 if (copy_from_user(&dr, arg, sizeof(dr)))
729 hdev = hci_dev_get(dr.dev_id);
733 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
738 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
743 if (hdev->dev_type != HCI_PRIMARY) {
748 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
755 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
756 HCI_INIT_TIMEOUT, NULL);
760 if (!lmp_encrypt_capable(hdev)) {
765 if (!test_bit(HCI_AUTH, &hdev->flags)) {
766 /* Auth must be enabled first */
767 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
768 HCI_INIT_TIMEOUT, NULL);
773 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
774 HCI_INIT_TIMEOUT, NULL);
778 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
779 HCI_INIT_TIMEOUT, NULL);
781 /* Ensure that the connectable and discoverable states
782 * get correctly modified as this was a non-mgmt change.
785 hci_update_passive_scan_state(hdev, dr.dev_opt);
789 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
790 HCI_INIT_TIMEOUT, NULL);
794 hdev->link_mode = ((__u16) dr.dev_opt) &
795 (HCI_LM_MASTER | HCI_LM_ACCEPT);
799 if (hdev->pkt_type == (__u16) dr.dev_opt)
802 hdev->pkt_type = (__u16) dr.dev_opt;
803 mgmt_phy_configuration_changed(hdev, NULL);
807 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
808 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
812 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
813 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
826 int hci_get_dev_list(void __user *arg)
828 struct hci_dev *hdev;
829 struct hci_dev_list_req *dl;
830 struct hci_dev_req *dr;
831 int n = 0, size, err;
834 if (get_user(dev_num, (__u16 __user *) arg))
837 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
840 size = sizeof(*dl) + dev_num * sizeof(*dr);
842 dl = kzalloc(size, GFP_KERNEL);
848 read_lock(&hci_dev_list_lock);
849 list_for_each_entry(hdev, &hci_dev_list, list) {
850 unsigned long flags = hdev->flags;
852 /* When the auto-off is configured it means the transport
853 * is running, but in that case still indicate that the
854 * device is actually down.
856 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
857 flags &= ~BIT(HCI_UP);
859 (dr + n)->dev_id = hdev->id;
860 (dr + n)->dev_opt = flags;
865 read_unlock(&hci_dev_list_lock);
868 size = sizeof(*dl) + n * sizeof(*dr);
870 err = copy_to_user(arg, dl, size);
873 return err ? -EFAULT : 0;
876 int hci_get_dev_info(void __user *arg)
878 struct hci_dev *hdev;
879 struct hci_dev_info di;
883 if (copy_from_user(&di, arg, sizeof(di)))
886 hdev = hci_dev_get(di.dev_id);
890 /* When the auto-off is configured it means the transport
891 * is running, but in that case still indicate that the
892 * device is actually down.
894 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
895 flags = hdev->flags & ~BIT(HCI_UP);
899 strcpy(di.name, hdev->name);
900 di.bdaddr = hdev->bdaddr;
901 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
903 di.pkt_type = hdev->pkt_type;
904 if (lmp_bredr_capable(hdev)) {
905 di.acl_mtu = hdev->acl_mtu;
906 di.acl_pkts = hdev->acl_pkts;
907 di.sco_mtu = hdev->sco_mtu;
908 di.sco_pkts = hdev->sco_pkts;
910 di.acl_mtu = hdev->le_mtu;
911 di.acl_pkts = hdev->le_pkts;
915 di.link_policy = hdev->link_policy;
916 di.link_mode = hdev->link_mode;
918 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
919 memcpy(&di.features, &hdev->features, sizeof(di.features));
921 if (copy_to_user(arg, &di, sizeof(di)))
929 /* ---- Interface to HCI drivers ---- */
931 static int hci_rfkill_set_block(void *data, bool blocked)
933 struct hci_dev *hdev = data;
935 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
937 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
941 hci_dev_set_flag(hdev, HCI_RFKILLED);
942 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
943 !hci_dev_test_flag(hdev, HCI_CONFIG))
944 hci_dev_do_close(hdev);
946 hci_dev_clear_flag(hdev, HCI_RFKILLED);
952 static const struct rfkill_ops hci_rfkill_ops = {
953 .set_block = hci_rfkill_set_block,
956 static void hci_power_on(struct work_struct *work)
958 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
961 BT_DBG("%s", hdev->name);
963 if (test_bit(HCI_UP, &hdev->flags) &&
964 hci_dev_test_flag(hdev, HCI_MGMT) &&
965 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
966 cancel_delayed_work(&hdev->power_off);
967 err = hci_powered_update_sync(hdev);
968 mgmt_power_on(hdev, err);
972 err = hci_dev_do_open(hdev);
975 mgmt_set_powered_failed(hdev, err);
976 hci_dev_unlock(hdev);
980 /* During the HCI setup phase, a few error conditions are
981 * ignored and they need to be checked now. If they are still
982 * valid, it is important to turn the device back off.
984 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
985 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
986 (hdev->dev_type == HCI_PRIMARY &&
987 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
988 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
989 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
990 hci_dev_do_close(hdev);
991 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
992 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
993 HCI_AUTO_OFF_TIMEOUT);
996 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
997 /* For unconfigured devices, set the HCI_RAW flag
998 * so that userspace can easily identify them.
1000 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1001 set_bit(HCI_RAW, &hdev->flags);
1003 /* For fully configured devices, this will send
1004 * the Index Added event. For unconfigured devices,
1005 * it will send Unconfigued Index Added event.
1007 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1008 * and no event will be send.
1010 mgmt_index_added(hdev);
1011 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1012 /* When the controller is now configured, then it
1013 * is important to clear the HCI_RAW flag.
1015 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1016 clear_bit(HCI_RAW, &hdev->flags);
1018 /* Powering on the controller with HCI_CONFIG set only
1019 * happens with the transition from unconfigured to
1020 * configured. This will send the Index Added event.
1022 mgmt_index_added(hdev);
1026 static void hci_power_off(struct work_struct *work)
1028 struct hci_dev *hdev = container_of(work, struct hci_dev,
1031 BT_DBG("%s", hdev->name);
1033 hci_dev_do_close(hdev);
1036 static void hci_error_reset(struct work_struct *work)
1038 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1040 BT_DBG("%s", hdev->name);
1043 hdev->hw_error(hdev, hdev->hw_error_code);
1045 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1047 if (hci_dev_do_close(hdev))
1050 hci_dev_do_open(hdev);
1053 void hci_uuids_clear(struct hci_dev *hdev)
1055 struct bt_uuid *uuid, *tmp;
1057 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1058 list_del(&uuid->list);
1063 void hci_link_keys_clear(struct hci_dev *hdev)
1065 struct link_key *key;
1067 list_for_each_entry(key, &hdev->link_keys, list) {
1068 list_del_rcu(&key->list);
1069 kfree_rcu(key, rcu);
1073 void hci_smp_ltks_clear(struct hci_dev *hdev)
1077 list_for_each_entry(k, &hdev->long_term_keys, list) {
1078 list_del_rcu(&k->list);
1083 void hci_smp_irks_clear(struct hci_dev *hdev)
1087 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1088 list_del_rcu(&k->list);
1093 void hci_blocked_keys_clear(struct hci_dev *hdev)
1095 struct blocked_key *b;
1097 list_for_each_entry(b, &hdev->blocked_keys, list) {
1098 list_del_rcu(&b->list);
1103 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1105 bool blocked = false;
1106 struct blocked_key *b;
1109 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1110 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1120 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1125 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1126 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1129 if (hci_is_blocked_key(hdev,
1130 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1132 bt_dev_warn_ratelimited(hdev,
1133 "Link key blocked for %pMR",
1146 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1147 u8 key_type, u8 old_key_type)
1150 if (key_type < 0x03)
1153 /* Debug keys are insecure so don't store them persistently */
1154 if (key_type == HCI_LK_DEBUG_COMBINATION)
1157 /* Changed combination key and there's no previous one */
1158 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1161 /* Security mode 3 case */
1165 /* BR/EDR key derived using SC from an LE link */
1166 if (conn->type == LE_LINK)
1169 /* Neither local nor remote side had no-bonding as requirement */
1170 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1173 /* Local side had dedicated bonding as requirement */
1174 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1177 /* Remote side had dedicated bonding as requirement */
1178 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1181 /* If none of the above criteria match, then don't store the key
1186 static u8 ltk_role(u8 type)
1188 if (type == SMP_LTK)
1189 return HCI_ROLE_MASTER;
1191 return HCI_ROLE_SLAVE;
1194 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1195 u8 addr_type, u8 role)
1200 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1201 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1204 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1207 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1209 bt_dev_warn_ratelimited(hdev,
1210 "LTK blocked for %pMR",
1223 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1225 struct smp_irk *irk_to_return = NULL;
1226 struct smp_irk *irk;
1229 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1230 if (!bacmp(&irk->rpa, rpa)) {
1231 irk_to_return = irk;
1236 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1237 if (smp_irk_matches(hdev, irk->val, rpa)) {
1238 bacpy(&irk->rpa, rpa);
1239 irk_to_return = irk;
1245 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1246 irk_to_return->val)) {
1247 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1248 &irk_to_return->bdaddr);
1249 irk_to_return = NULL;
1254 return irk_to_return;
1257 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1260 struct smp_irk *irk_to_return = NULL;
1261 struct smp_irk *irk;
1263 /* Identity Address must be public or static random */
1264 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1268 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1269 if (addr_type == irk->addr_type &&
1270 bacmp(bdaddr, &irk->bdaddr) == 0) {
1271 irk_to_return = irk;
1278 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1279 irk_to_return->val)) {
1280 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1281 &irk_to_return->bdaddr);
1282 irk_to_return = NULL;
1287 return irk_to_return;
1290 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1291 bdaddr_t *bdaddr, u8 *val, u8 type,
1292 u8 pin_len, bool *persistent)
1294 struct link_key *key, *old_key;
1297 old_key = hci_find_link_key(hdev, bdaddr);
1299 old_key_type = old_key->type;
1302 old_key_type = conn ? conn->key_type : 0xff;
1303 key = kzalloc(sizeof(*key), GFP_KERNEL);
1306 list_add_rcu(&key->list, &hdev->link_keys);
1309 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1311 /* Some buggy controller combinations generate a changed
1312 * combination key for legacy pairing even when there's no
1314 if (type == HCI_LK_CHANGED_COMBINATION &&
1315 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1316 type = HCI_LK_COMBINATION;
1318 conn->key_type = type;
1321 bacpy(&key->bdaddr, bdaddr);
1322 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1323 key->pin_len = pin_len;
1325 if (type == HCI_LK_CHANGED_COMBINATION)
1326 key->type = old_key_type;
1331 *persistent = hci_persistent_key(hdev, conn, type,
1337 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1338 u8 addr_type, u8 type, u8 authenticated,
1339 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1341 struct smp_ltk *key, *old_key;
1342 u8 role = ltk_role(type);
1344 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1348 key = kzalloc(sizeof(*key), GFP_KERNEL);
1351 list_add_rcu(&key->list, &hdev->long_term_keys);
1354 bacpy(&key->bdaddr, bdaddr);
1355 key->bdaddr_type = addr_type;
1356 memcpy(key->val, tk, sizeof(key->val));
1357 key->authenticated = authenticated;
1360 key->enc_size = enc_size;
1366 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1367 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1369 struct smp_irk *irk;
1371 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1373 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1377 bacpy(&irk->bdaddr, bdaddr);
1378 irk->addr_type = addr_type;
1380 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1383 memcpy(irk->val, val, 16);
1384 bacpy(&irk->rpa, rpa);
1389 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1391 struct link_key *key;
1393 key = hci_find_link_key(hdev, bdaddr);
1397 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1399 list_del_rcu(&key->list);
1400 kfree_rcu(key, rcu);
1405 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1410 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1411 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1414 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1416 list_del_rcu(&k->list);
1421 return removed ? 0 : -ENOENT;
1424 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1428 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
1429 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1432 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1434 list_del_rcu(&k->list);
1439 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1442 struct smp_irk *irk;
1445 if (type == BDADDR_BREDR) {
1446 if (hci_find_link_key(hdev, bdaddr))
1451 /* Convert to HCI addr type which struct smp_ltk uses */
1452 if (type == BDADDR_LE_PUBLIC)
1453 addr_type = ADDR_LE_DEV_PUBLIC;
1455 addr_type = ADDR_LE_DEV_RANDOM;
1457 irk = hci_get_irk(hdev, bdaddr, addr_type);
1459 bdaddr = &irk->bdaddr;
1460 addr_type = irk->addr_type;
1464 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1465 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1475 /* HCI command timer function */
1476 static void hci_cmd_timeout(struct work_struct *work)
1478 struct hci_dev *hdev = container_of(work, struct hci_dev,
1481 if (hdev->sent_cmd) {
1482 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1483 u16 opcode = __le16_to_cpu(sent->opcode);
1485 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1487 bt_dev_err(hdev, "command tx timeout");
1490 if (hdev->cmd_timeout)
1491 hdev->cmd_timeout(hdev);
1493 atomic_set(&hdev->cmd_cnt, 1);
1494 queue_work(hdev->workqueue, &hdev->cmd_work);
1497 /* HCI ncmd timer function */
1498 static void hci_ncmd_timeout(struct work_struct *work)
1500 struct hci_dev *hdev = container_of(work, struct hci_dev,
1503 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1505 /* During HCI_INIT phase no events can be injected if the ncmd timer
1506 * triggers since the procedure has its own timeout handling.
1508 if (test_bit(HCI_INIT, &hdev->flags))
1511 /* This is an irrecoverable state, inject hardware error event */
1512 hci_reset_dev(hdev);
1515 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1516 bdaddr_t *bdaddr, u8 bdaddr_type)
1518 struct oob_data *data;
1520 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1521 if (bacmp(bdaddr, &data->bdaddr) != 0)
1523 if (data->bdaddr_type != bdaddr_type)
1531 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1534 struct oob_data *data;
1536 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1540 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1542 list_del(&data->list);
1548 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1550 struct oob_data *data, *n;
1552 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1553 list_del(&data->list);
1558 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1559 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1560 u8 *hash256, u8 *rand256)
1562 struct oob_data *data;
1564 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1566 data = kmalloc(sizeof(*data), GFP_KERNEL);
1570 bacpy(&data->bdaddr, bdaddr);
1571 data->bdaddr_type = bdaddr_type;
1572 list_add(&data->list, &hdev->remote_oob_data);
1575 if (hash192 && rand192) {
1576 memcpy(data->hash192, hash192, sizeof(data->hash192));
1577 memcpy(data->rand192, rand192, sizeof(data->rand192));
1578 if (hash256 && rand256)
1579 data->present = 0x03;
1581 memset(data->hash192, 0, sizeof(data->hash192));
1582 memset(data->rand192, 0, sizeof(data->rand192));
1583 if (hash256 && rand256)
1584 data->present = 0x02;
1586 data->present = 0x00;
1589 if (hash256 && rand256) {
1590 memcpy(data->hash256, hash256, sizeof(data->hash256));
1591 memcpy(data->rand256, rand256, sizeof(data->rand256));
1593 memset(data->hash256, 0, sizeof(data->hash256));
1594 memset(data->rand256, 0, sizeof(data->rand256));
1595 if (hash192 && rand192)
1596 data->present = 0x01;
1599 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1604 /* This function requires the caller holds hdev->lock */
1605 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1607 struct adv_info *adv_instance;
1609 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1610 if (adv_instance->instance == instance)
1611 return adv_instance;
1617 /* This function requires the caller holds hdev->lock */
1618 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1620 struct adv_info *cur_instance;
1622 cur_instance = hci_find_adv_instance(hdev, instance);
1626 if (cur_instance == list_last_entry(&hdev->adv_instances,
1627 struct adv_info, list))
1628 return list_first_entry(&hdev->adv_instances,
1629 struct adv_info, list);
1631 return list_next_entry(cur_instance, list);
1634 /* This function requires the caller holds hdev->lock */
1635 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1637 struct adv_info *adv_instance;
1639 adv_instance = hci_find_adv_instance(hdev, instance);
1643 BT_DBG("%s removing %dMR", hdev->name, instance);
1645 if (hdev->cur_adv_instance == instance) {
1646 if (hdev->adv_instance_timeout) {
1647 cancel_delayed_work(&hdev->adv_instance_expire);
1648 hdev->adv_instance_timeout = 0;
1650 hdev->cur_adv_instance = 0x00;
1653 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1655 list_del(&adv_instance->list);
1656 kfree(adv_instance);
1658 hdev->adv_instance_cnt--;
1663 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1665 struct adv_info *adv_instance, *n;
1667 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1668 adv_instance->rpa_expired = rpa_expired;
1671 /* This function requires the caller holds hdev->lock */
1672 void hci_adv_instances_clear(struct hci_dev *hdev)
1674 struct adv_info *adv_instance, *n;
1676 if (hdev->adv_instance_timeout) {
1677 cancel_delayed_work(&hdev->adv_instance_expire);
1678 hdev->adv_instance_timeout = 0;
1681 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1682 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1683 list_del(&adv_instance->list);
1684 kfree(adv_instance);
1687 hdev->adv_instance_cnt = 0;
1688 hdev->cur_adv_instance = 0x00;
1691 static void adv_instance_rpa_expired(struct work_struct *work)
1693 struct adv_info *adv_instance = container_of(work, struct adv_info,
1694 rpa_expired_cb.work);
1698 adv_instance->rpa_expired = true;
1701 /* This function requires the caller holds hdev->lock */
1702 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
1703 u16 adv_data_len, u8 *adv_data,
1704 u16 scan_rsp_len, u8 *scan_rsp_data,
1705 u16 timeout, u16 duration, s8 tx_power,
1706 u32 min_interval, u32 max_interval)
1708 struct adv_info *adv_instance;
1710 adv_instance = hci_find_adv_instance(hdev, instance);
1712 memset(adv_instance->adv_data, 0,
1713 sizeof(adv_instance->adv_data));
1714 memset(adv_instance->scan_rsp_data, 0,
1715 sizeof(adv_instance->scan_rsp_data));
1717 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1718 instance < 1 || instance > hdev->le_num_of_adv_sets)
1721 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
1725 adv_instance->pending = true;
1726 adv_instance->instance = instance;
1727 list_add(&adv_instance->list, &hdev->adv_instances);
1728 hdev->adv_instance_cnt++;
1731 adv_instance->flags = flags;
1732 adv_instance->min_interval = min_interval;
1733 adv_instance->max_interval = max_interval;
1734 adv_instance->tx_power = tx_power;
1736 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1737 scan_rsp_len, scan_rsp_data);
1739 adv_instance->timeout = timeout;
1740 adv_instance->remaining_time = timeout;
1743 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
1745 adv_instance->duration = duration;
1747 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
1748 adv_instance_rpa_expired);
1750 BT_DBG("%s for %dMR", hdev->name, instance);
1755 /* This function requires the caller holds hdev->lock */
1756 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1757 u16 adv_data_len, u8 *adv_data,
1758 u16 scan_rsp_len, u8 *scan_rsp_data)
1760 struct adv_info *adv;
1762 adv = hci_find_adv_instance(hdev, instance);
1764 /* If advertisement doesn't exist, we can't modify its data */
1768 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1769 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1770 memcpy(adv->adv_data, adv_data, adv_data_len);
1771 adv->adv_data_len = adv_data_len;
1772 adv->adv_data_changed = true;
1775 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1776 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1777 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1778 adv->scan_rsp_len = scan_rsp_len;
1779 adv->scan_rsp_changed = true;
1782 /* Mark as changed if there are flags which would affect it */
1783 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1784 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1785 adv->scan_rsp_changed = true;
1790 /* This function requires the caller holds hdev->lock */
1791 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1794 struct adv_info *adv;
1796 if (instance == 0x00) {
1797 /* Instance 0 always manages the "Tx Power" and "Flags"
1800 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1802 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1803 * corresponds to the "connectable" instance flag.
1805 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1806 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1808 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1809 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1810 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1811 flags |= MGMT_ADV_FLAG_DISCOV;
1816 adv = hci_find_adv_instance(hdev, instance);
1818 /* Return 0 when we got an invalid instance identifier. */
1825 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1827 struct adv_info *adv;
1829 /* Instance 0x00 always set local name */
1830 if (instance == 0x00)
1833 adv = hci_find_adv_instance(hdev, instance);
1837 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1838 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1841 return adv->scan_rsp_len ? true : false;
1844 /* This function requires the caller holds hdev->lock */
1845 void hci_adv_monitors_clear(struct hci_dev *hdev)
1847 struct adv_monitor *monitor;
1850 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1851 hci_free_adv_monitor(hdev, monitor);
1853 idr_destroy(&hdev->adv_monitors_idr);
1856 /* Frees the monitor structure and do some bookkeepings.
1857 * This function requires the caller holds hdev->lock.
1859 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1861 struct adv_pattern *pattern;
1862 struct adv_pattern *tmp;
1867 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1868 list_del(&pattern->list);
1872 if (monitor->handle)
1873 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1875 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1876 hdev->adv_monitors_cnt--;
1877 mgmt_adv_monitor_removed(hdev, monitor->handle);
1883 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1884 * also attempts to forward the request to the controller.
1885 * This function requires the caller holds hci_req_sync_lock.
1887 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1889 int min, max, handle;
1897 min = HCI_MIN_ADV_MONITOR_HANDLE;
1898 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1899 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1902 hci_dev_unlock(hdev);
1907 monitor->handle = handle;
1909 if (!hdev_is_powered(hdev))
1912 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1913 case HCI_ADV_MONITOR_EXT_NONE:
1914 bt_dev_dbg(hdev, "%s add monitor %d status %d", hdev->name,
1915 monitor->handle, status);
1916 /* Message was not forwarded to controller - not an error */
1919 case HCI_ADV_MONITOR_EXT_MSFT:
1920 status = msft_add_monitor_pattern(hdev, monitor);
1921 bt_dev_dbg(hdev, "%s add monitor %d msft status %d", hdev->name,
1922 monitor->handle, status);
1929 /* Attempts to tell the controller and free the monitor. If somehow the
1930 * controller doesn't have a corresponding handle, remove anyway.
1931 * This function requires the caller holds hci_req_sync_lock.
1933 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1934 struct adv_monitor *monitor)
1938 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1939 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1940 bt_dev_dbg(hdev, "%s remove monitor %d status %d", hdev->name,
1941 monitor->handle, status);
1944 case HCI_ADV_MONITOR_EXT_MSFT:
1945 status = msft_remove_monitor(hdev, monitor);
1946 bt_dev_dbg(hdev, "%s remove monitor %d msft status %d",
1947 hdev->name, monitor->handle, status);
1951 /* In case no matching handle registered, just free the monitor */
1952 if (status == -ENOENT)
1958 if (status == -ENOENT)
1959 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1961 hci_free_adv_monitor(hdev, monitor);
1966 /* This function requires the caller holds hci_req_sync_lock */
1967 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1969 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1974 return hci_remove_adv_monitor(hdev, monitor);
1977 /* This function requires the caller holds hci_req_sync_lock */
1978 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
1980 struct adv_monitor *monitor;
1981 int idr_next_id = 0;
1985 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
1989 status = hci_remove_adv_monitor(hdev, monitor);
1999 /* This function requires the caller holds hdev->lock */
2000 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2002 return !idr_is_empty(&hdev->adv_monitors_idr);
2005 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2007 if (msft_monitor_supported(hdev))
2008 return HCI_ADV_MONITOR_EXT_MSFT;
2010 return HCI_ADV_MONITOR_EXT_NONE;
2013 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2014 bdaddr_t *bdaddr, u8 type)
2016 struct bdaddr_list *b;
2018 list_for_each_entry(b, bdaddr_list, list) {
2019 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2026 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2027 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2030 struct bdaddr_list_with_irk *b;
2032 list_for_each_entry(b, bdaddr_list, list) {
2033 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2040 struct bdaddr_list_with_flags *
2041 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2042 bdaddr_t *bdaddr, u8 type)
2044 struct bdaddr_list_with_flags *b;
2046 list_for_each_entry(b, bdaddr_list, list) {
2047 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2054 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2056 struct bdaddr_list *b, *n;
2058 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2064 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2066 struct bdaddr_list *entry;
2068 if (!bacmp(bdaddr, BDADDR_ANY))
2071 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2074 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2078 bacpy(&entry->bdaddr, bdaddr);
2079 entry->bdaddr_type = type;
2081 list_add(&entry->list, list);
2086 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2087 u8 type, u8 *peer_irk, u8 *local_irk)
2089 struct bdaddr_list_with_irk *entry;
2091 if (!bacmp(bdaddr, BDADDR_ANY))
2094 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2097 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2101 bacpy(&entry->bdaddr, bdaddr);
2102 entry->bdaddr_type = type;
2105 memcpy(entry->peer_irk, peer_irk, 16);
2108 memcpy(entry->local_irk, local_irk, 16);
2110 list_add(&entry->list, list);
2115 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2118 struct bdaddr_list_with_flags *entry;
2120 if (!bacmp(bdaddr, BDADDR_ANY))
2123 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2126 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2130 bacpy(&entry->bdaddr, bdaddr);
2131 entry->bdaddr_type = type;
2132 entry->flags = flags;
2134 list_add(&entry->list, list);
2139 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2141 struct bdaddr_list *entry;
2143 if (!bacmp(bdaddr, BDADDR_ANY)) {
2144 hci_bdaddr_list_clear(list);
2148 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2152 list_del(&entry->list);
2158 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2161 struct bdaddr_list_with_irk *entry;
2163 if (!bacmp(bdaddr, BDADDR_ANY)) {
2164 hci_bdaddr_list_clear(list);
2168 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2172 list_del(&entry->list);
2178 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2181 struct bdaddr_list_with_flags *entry;
2183 if (!bacmp(bdaddr, BDADDR_ANY)) {
2184 hci_bdaddr_list_clear(list);
2188 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2192 list_del(&entry->list);
2198 /* This function requires the caller holds hdev->lock */
2199 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2200 bdaddr_t *addr, u8 addr_type)
2202 struct hci_conn_params *params;
2204 list_for_each_entry(params, &hdev->le_conn_params, list) {
2205 if (bacmp(¶ms->addr, addr) == 0 &&
2206 params->addr_type == addr_type) {
2214 /* This function requires the caller holds hdev->lock */
2215 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2216 bdaddr_t *addr, u8 addr_type)
2218 struct hci_conn_params *param;
2220 list_for_each_entry(param, list, action) {
2221 if (bacmp(¶m->addr, addr) == 0 &&
2222 param->addr_type == addr_type)
2229 /* This function requires the caller holds hdev->lock */
2230 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2231 bdaddr_t *addr, u8 addr_type)
2233 struct hci_conn_params *params;
2235 params = hci_conn_params_lookup(hdev, addr, addr_type);
2239 params = kzalloc(sizeof(*params), GFP_KERNEL);
2241 bt_dev_err(hdev, "out of memory");
2245 bacpy(¶ms->addr, addr);
2246 params->addr_type = addr_type;
2248 list_add(¶ms->list, &hdev->le_conn_params);
2249 INIT_LIST_HEAD(¶ms->action);
2251 params->conn_min_interval = hdev->le_conn_min_interval;
2252 params->conn_max_interval = hdev->le_conn_max_interval;
2253 params->conn_latency = hdev->le_conn_latency;
2254 params->supervision_timeout = hdev->le_supv_timeout;
2255 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2257 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2262 static void hci_conn_params_free(struct hci_conn_params *params)
2265 hci_conn_drop(params->conn);
2266 hci_conn_put(params->conn);
2269 list_del(¶ms->action);
2270 list_del(¶ms->list);
2274 /* This function requires the caller holds hdev->lock */
2275 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2277 struct hci_conn_params *params;
2279 params = hci_conn_params_lookup(hdev, addr, addr_type);
2283 hci_conn_params_free(params);
2285 hci_update_passive_scan(hdev);
2287 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2290 /* This function requires the caller holds hdev->lock */
2291 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2293 struct hci_conn_params *params, *tmp;
2295 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2296 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2299 /* If trying to establish one time connection to disabled
2300 * device, leave the params, but mark them as just once.
2302 if (params->explicit_connect) {
2303 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2307 list_del(¶ms->list);
2311 BT_DBG("All LE disabled connection parameters were removed");
2314 /* This function requires the caller holds hdev->lock */
2315 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2317 struct hci_conn_params *params, *tmp;
2319 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2320 hci_conn_params_free(params);
2322 BT_DBG("All LE connection parameters were removed");
2325 /* Copy the Identity Address of the controller.
2327 * If the controller has a public BD_ADDR, then by default use that one.
2328 * If this is a LE only controller without a public address, default to
2329 * the static random address.
2331 * For debugging purposes it is possible to force controllers with a
2332 * public address to use the static random address instead.
2334 * In case BR/EDR has been disabled on a dual-mode controller and
2335 * userspace has configured a static address, then that address
2336 * becomes the identity address instead of the public BR/EDR address.
2338 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2341 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2342 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2343 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2344 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2345 bacpy(bdaddr, &hdev->static_addr);
2346 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2348 bacpy(bdaddr, &hdev->bdaddr);
2349 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2353 static void hci_clear_wake_reason(struct hci_dev *hdev)
2357 hdev->wake_reason = 0;
2358 bacpy(&hdev->wake_addr, BDADDR_ANY);
2359 hdev->wake_addr_type = 0;
2361 hci_dev_unlock(hdev);
2364 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2367 struct hci_dev *hdev =
2368 container_of(nb, struct hci_dev, suspend_notifier);
2371 if (action == PM_SUSPEND_PREPARE)
2372 ret = hci_suspend_dev(hdev);
2373 else if (action == PM_POST_SUSPEND)
2374 ret = hci_resume_dev(hdev);
2377 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2383 /* Alloc HCI device */
2384 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2386 struct hci_dev *hdev;
2387 unsigned int alloc_size;
2389 alloc_size = sizeof(*hdev);
2391 /* Fixme: May need ALIGN-ment? */
2392 alloc_size += sizeof_priv;
2395 hdev = kzalloc(alloc_size, GFP_KERNEL);
2399 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2400 hdev->esco_type = (ESCO_HV1);
2401 hdev->link_mode = (HCI_LM_ACCEPT);
2402 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2403 hdev->io_capability = 0x03; /* No Input No Output */
2404 hdev->manufacturer = 0xffff; /* Default to internal use */
2405 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2406 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2407 hdev->adv_instance_cnt = 0;
2408 hdev->cur_adv_instance = 0x00;
2409 hdev->adv_instance_timeout = 0;
2411 hdev->advmon_allowlist_duration = 300;
2412 hdev->advmon_no_filter_duration = 500;
2413 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2415 hdev->sniff_max_interval = 800;
2416 hdev->sniff_min_interval = 80;
2418 hdev->le_adv_channel_map = 0x07;
2419 hdev->le_adv_min_interval = 0x0800;
2420 hdev->le_adv_max_interval = 0x0800;
2421 hdev->le_scan_interval = 0x0060;
2422 hdev->le_scan_window = 0x0030;
2423 hdev->le_scan_int_suspend = 0x0400;
2424 hdev->le_scan_window_suspend = 0x0012;
2425 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2426 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2427 hdev->le_scan_int_adv_monitor = 0x0060;
2428 hdev->le_scan_window_adv_monitor = 0x0030;
2429 hdev->le_scan_int_connect = 0x0060;
2430 hdev->le_scan_window_connect = 0x0060;
2431 hdev->le_conn_min_interval = 0x0018;
2432 hdev->le_conn_max_interval = 0x0028;
2433 hdev->le_conn_latency = 0x0000;
2434 hdev->le_supv_timeout = 0x002a;
2435 hdev->le_def_tx_len = 0x001b;
2436 hdev->le_def_tx_time = 0x0148;
2437 hdev->le_max_tx_len = 0x001b;
2438 hdev->le_max_tx_time = 0x0148;
2439 hdev->le_max_rx_len = 0x001b;
2440 hdev->le_max_rx_time = 0x0148;
2441 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2442 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2443 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2444 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2445 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2446 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2447 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2448 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2449 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2451 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2452 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2453 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2454 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2455 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2456 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2458 /* default 1.28 sec page scan */
2459 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2460 hdev->def_page_scan_int = 0x0800;
2461 hdev->def_page_scan_window = 0x0012;
2463 mutex_init(&hdev->lock);
2464 mutex_init(&hdev->req_lock);
2466 INIT_LIST_HEAD(&hdev->mgmt_pending);
2467 INIT_LIST_HEAD(&hdev->reject_list);
2468 INIT_LIST_HEAD(&hdev->accept_list);
2469 INIT_LIST_HEAD(&hdev->uuids);
2470 INIT_LIST_HEAD(&hdev->link_keys);
2471 INIT_LIST_HEAD(&hdev->long_term_keys);
2472 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2473 INIT_LIST_HEAD(&hdev->remote_oob_data);
2474 INIT_LIST_HEAD(&hdev->le_accept_list);
2475 INIT_LIST_HEAD(&hdev->le_resolv_list);
2476 INIT_LIST_HEAD(&hdev->le_conn_params);
2477 INIT_LIST_HEAD(&hdev->pend_le_conns);
2478 INIT_LIST_HEAD(&hdev->pend_le_reports);
2479 INIT_LIST_HEAD(&hdev->conn_hash.list);
2480 INIT_LIST_HEAD(&hdev->adv_instances);
2481 INIT_LIST_HEAD(&hdev->blocked_keys);
2482 INIT_LIST_HEAD(&hdev->monitored_devices);
2484 INIT_LIST_HEAD(&hdev->local_codecs);
2485 INIT_WORK(&hdev->rx_work, hci_rx_work);
2486 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2487 INIT_WORK(&hdev->tx_work, hci_tx_work);
2488 INIT_WORK(&hdev->power_on, hci_power_on);
2489 INIT_WORK(&hdev->error_reset, hci_error_reset);
2491 hci_cmd_sync_init(hdev);
2493 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2495 skb_queue_head_init(&hdev->rx_q);
2496 skb_queue_head_init(&hdev->cmd_q);
2497 skb_queue_head_init(&hdev->raw_q);
2499 init_waitqueue_head(&hdev->req_wait_q);
2501 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2502 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2504 hci_request_setup(hdev);
2506 hci_init_sysfs(hdev);
2507 discovery_init(hdev);
2511 EXPORT_SYMBOL(hci_alloc_dev_priv);
2513 /* Free HCI device */
2514 void hci_free_dev(struct hci_dev *hdev)
2516 /* will free via device release */
2517 put_device(&hdev->dev);
2519 EXPORT_SYMBOL(hci_free_dev);
2521 /* Register HCI device */
2522 int hci_register_dev(struct hci_dev *hdev)
2526 if (!hdev->open || !hdev->close || !hdev->send)
2529 /* Do not allow HCI_AMP devices to register at index 0,
2530 * so the index can be used as the AMP controller ID.
2532 switch (hdev->dev_type) {
2534 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2537 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2546 snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2549 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2551 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2552 if (!hdev->workqueue) {
2557 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2559 if (!hdev->req_workqueue) {
2560 destroy_workqueue(hdev->workqueue);
2565 if (!IS_ERR_OR_NULL(bt_debugfs))
2566 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2568 dev_set_name(&hdev->dev, "%s", hdev->name);
2570 error = device_add(&hdev->dev);
2574 hci_leds_init(hdev);
2576 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2577 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2580 if (rfkill_register(hdev->rfkill) < 0) {
2581 rfkill_destroy(hdev->rfkill);
2582 hdev->rfkill = NULL;
2586 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2587 hci_dev_set_flag(hdev, HCI_RFKILLED);
2589 hci_dev_set_flag(hdev, HCI_SETUP);
2590 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2592 if (hdev->dev_type == HCI_PRIMARY) {
2593 /* Assume BR/EDR support until proven otherwise (such as
2594 * through reading supported features during init.
2596 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2599 write_lock(&hci_dev_list_lock);
2600 list_add(&hdev->list, &hci_dev_list);
2601 write_unlock(&hci_dev_list_lock);
2603 /* Devices that are marked for raw-only usage are unconfigured
2604 * and should not be included in normal operation.
2606 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2607 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2609 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2613 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2615 hci_sock_dev_event(hdev, HCI_DEV_REG);
2618 error = hci_register_suspend_notifier(hdev);
2622 queue_work(hdev->req_workqueue, &hdev->power_on);
2624 idr_init(&hdev->adv_monitors_idr);
2625 msft_register(hdev);
2630 debugfs_remove_recursive(hdev->debugfs);
2631 destroy_workqueue(hdev->workqueue);
2632 destroy_workqueue(hdev->req_workqueue);
2634 ida_simple_remove(&hci_index_ida, hdev->id);
2638 EXPORT_SYMBOL(hci_register_dev);
2640 /* Unregister HCI device */
2641 void hci_unregister_dev(struct hci_dev *hdev)
2643 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2645 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2647 write_lock(&hci_dev_list_lock);
2648 list_del(&hdev->list);
2649 write_unlock(&hci_dev_list_lock);
2651 cancel_work_sync(&hdev->power_on);
2653 hci_cmd_sync_clear(hdev);
2655 hci_unregister_suspend_notifier(hdev);
2657 msft_unregister(hdev);
2659 hci_dev_do_close(hdev);
2661 if (!test_bit(HCI_INIT, &hdev->flags) &&
2662 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2663 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2665 mgmt_index_removed(hdev);
2666 hci_dev_unlock(hdev);
2669 /* mgmt_index_removed should take care of emptying the
2671 BUG_ON(!list_empty(&hdev->mgmt_pending));
2673 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2676 rfkill_unregister(hdev->rfkill);
2677 rfkill_destroy(hdev->rfkill);
2680 device_del(&hdev->dev);
2681 /* Actual cleanup is deferred until hci_release_dev(). */
2684 EXPORT_SYMBOL(hci_unregister_dev);
2686 /* Release HCI device */
2687 void hci_release_dev(struct hci_dev *hdev)
2689 debugfs_remove_recursive(hdev->debugfs);
2690 kfree_const(hdev->hw_info);
2691 kfree_const(hdev->fw_info);
2693 destroy_workqueue(hdev->workqueue);
2694 destroy_workqueue(hdev->req_workqueue);
2697 hci_bdaddr_list_clear(&hdev->reject_list);
2698 hci_bdaddr_list_clear(&hdev->accept_list);
2699 hci_uuids_clear(hdev);
2700 hci_link_keys_clear(hdev);
2701 hci_smp_ltks_clear(hdev);
2702 hci_smp_irks_clear(hdev);
2703 hci_remote_oob_data_clear(hdev);
2704 hci_adv_instances_clear(hdev);
2705 hci_adv_monitors_clear(hdev);
2706 hci_bdaddr_list_clear(&hdev->le_accept_list);
2707 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2708 hci_conn_params_clear_all(hdev);
2709 hci_discovery_filter_clear(hdev);
2710 hci_blocked_keys_clear(hdev);
2711 hci_dev_unlock(hdev);
2713 ida_simple_remove(&hci_index_ida, hdev->id);
2714 kfree_skb(hdev->sent_cmd);
2717 EXPORT_SYMBOL(hci_release_dev);
2719 int hci_register_suspend_notifier(struct hci_dev *hdev)
2723 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2724 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2725 ret = register_pm_notifier(&hdev->suspend_notifier);
2731 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2735 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks))
2736 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2741 /* Suspend HCI device */
2742 int hci_suspend_dev(struct hci_dev *hdev)
2746 bt_dev_dbg(hdev, "");
2748 /* Suspend should only act on when powered. */
2749 if (!hdev_is_powered(hdev) ||
2750 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2753 /* If powering down don't attempt to suspend */
2754 if (mgmt_powering_down(hdev))
2757 hci_req_sync_lock(hdev);
2758 ret = hci_suspend_sync(hdev);
2759 hci_req_sync_unlock(hdev);
2761 hci_clear_wake_reason(hdev);
2762 mgmt_suspending(hdev, hdev->suspend_state);
2764 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2767 EXPORT_SYMBOL(hci_suspend_dev);
2769 /* Resume HCI device */
2770 int hci_resume_dev(struct hci_dev *hdev)
2774 bt_dev_dbg(hdev, "");
2776 /* Resume should only act on when powered. */
2777 if (!hdev_is_powered(hdev) ||
2778 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2781 /* If powering down don't attempt to resume */
2782 if (mgmt_powering_down(hdev))
2785 hci_req_sync_lock(hdev);
2786 ret = hci_resume_sync(hdev);
2787 hci_req_sync_unlock(hdev);
2789 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2790 hdev->wake_addr_type);
2792 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2795 EXPORT_SYMBOL(hci_resume_dev);
2797 /* Reset HCI device */
2798 int hci_reset_dev(struct hci_dev *hdev)
2800 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2801 struct sk_buff *skb;
2803 skb = bt_skb_alloc(3, GFP_ATOMIC);
2807 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2808 skb_put_data(skb, hw_err, 3);
2810 bt_dev_err(hdev, "Injecting HCI hardware error event");
2812 /* Send Hardware Error to upper stack */
2813 return hci_recv_frame(hdev, skb);
2815 EXPORT_SYMBOL(hci_reset_dev);
2817 /* Receive frame from HCI drivers */
2818 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2820 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2821 && !test_bit(HCI_INIT, &hdev->flags))) {
2826 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
2827 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
2828 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
2829 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
2835 bt_cb(skb)->incoming = 1;
2838 __net_timestamp(skb);
2840 skb_queue_tail(&hdev->rx_q, skb);
2841 queue_work(hdev->workqueue, &hdev->rx_work);
2845 EXPORT_SYMBOL(hci_recv_frame);
2847 /* Receive diagnostic message from HCI drivers */
2848 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2850 /* Mark as diagnostic packet */
2851 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2854 __net_timestamp(skb);
2856 skb_queue_tail(&hdev->rx_q, skb);
2857 queue_work(hdev->workqueue, &hdev->rx_work);
2861 EXPORT_SYMBOL(hci_recv_diag);
2863 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2867 va_start(vargs, fmt);
2868 kfree_const(hdev->hw_info);
2869 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2872 EXPORT_SYMBOL(hci_set_hw_info);
2874 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2878 va_start(vargs, fmt);
2879 kfree_const(hdev->fw_info);
2880 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2883 EXPORT_SYMBOL(hci_set_fw_info);
2885 /* ---- Interface to upper protocols ---- */
2887 int hci_register_cb(struct hci_cb *cb)
2889 BT_DBG("%p name %s", cb, cb->name);
2891 mutex_lock(&hci_cb_list_lock);
2892 list_add_tail(&cb->list, &hci_cb_list);
2893 mutex_unlock(&hci_cb_list_lock);
2897 EXPORT_SYMBOL(hci_register_cb);
2899 int hci_unregister_cb(struct hci_cb *cb)
2901 BT_DBG("%p name %s", cb, cb->name);
2903 mutex_lock(&hci_cb_list_lock);
2904 list_del(&cb->list);
2905 mutex_unlock(&hci_cb_list_lock);
2909 EXPORT_SYMBOL(hci_unregister_cb);
2911 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2915 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2919 __net_timestamp(skb);
2921 /* Send copy to monitor */
2922 hci_send_to_monitor(hdev, skb);
2924 if (atomic_read(&hdev->promisc)) {
2925 /* Send copy to the sockets */
2926 hci_send_to_sock(hdev, skb);
2929 /* Get rid of skb owner, prior to sending to the driver. */
2932 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
2937 err = hdev->send(hdev, skb);
2939 bt_dev_err(hdev, "sending frame failed (%d)", err);
2947 /* Send HCI command */
2948 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2951 struct sk_buff *skb;
2953 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2955 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2957 bt_dev_err(hdev, "no memory for command");
2961 /* Stand-alone HCI commands must be flagged as
2962 * single-command requests.
2964 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
2966 skb_queue_tail(&hdev->cmd_q, skb);
2967 queue_work(hdev->workqueue, &hdev->cmd_work);
2972 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
2975 struct sk_buff *skb;
2977 if (hci_opcode_ogf(opcode) != 0x3f) {
2978 /* A controller receiving a command shall respond with either
2979 * a Command Status Event or a Command Complete Event.
2980 * Therefore, all standard HCI commands must be sent via the
2981 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
2982 * Some vendors do not comply with this rule for vendor-specific
2983 * commands and do not return any event. We want to support
2984 * unresponded commands for such cases only.
2986 bt_dev_err(hdev, "unresponded command not supported");
2990 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2992 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
2997 hci_send_frame(hdev, skb);
3001 EXPORT_SYMBOL(__hci_cmd_send);
3003 /* Get data from the previously sent command */
3004 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3006 struct hci_command_hdr *hdr;
3008 if (!hdev->sent_cmd)
3011 hdr = (void *) hdev->sent_cmd->data;
3013 if (hdr->opcode != cpu_to_le16(opcode))
3016 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3018 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3022 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3024 struct hci_acl_hdr *hdr;
3027 skb_push(skb, HCI_ACL_HDR_SIZE);
3028 skb_reset_transport_header(skb);
3029 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3030 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3031 hdr->dlen = cpu_to_le16(len);
3034 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3035 struct sk_buff *skb, __u16 flags)
3037 struct hci_conn *conn = chan->conn;
3038 struct hci_dev *hdev = conn->hdev;
3039 struct sk_buff *list;
3041 skb->len = skb_headlen(skb);
3044 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3046 switch (hdev->dev_type) {
3048 hci_add_acl_hdr(skb, conn->handle, flags);
3051 hci_add_acl_hdr(skb, chan->handle, flags);
3054 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3058 list = skb_shinfo(skb)->frag_list;
3060 /* Non fragmented */
3061 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3063 skb_queue_tail(queue, skb);
3066 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3068 skb_shinfo(skb)->frag_list = NULL;
3070 /* Queue all fragments atomically. We need to use spin_lock_bh
3071 * here because of 6LoWPAN links, as there this function is
3072 * called from softirq and using normal spin lock could cause
3075 spin_lock_bh(&queue->lock);
3077 __skb_queue_tail(queue, skb);
3079 flags &= ~ACL_START;
3082 skb = list; list = list->next;
3084 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3085 hci_add_acl_hdr(skb, conn->handle, flags);
3087 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3089 __skb_queue_tail(queue, skb);
3092 spin_unlock_bh(&queue->lock);
3096 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3098 struct hci_dev *hdev = chan->conn->hdev;
3100 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3102 hci_queue_acl(chan, &chan->data_q, skb, flags);
3104 queue_work(hdev->workqueue, &hdev->tx_work);
3108 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3110 struct hci_dev *hdev = conn->hdev;
3111 struct hci_sco_hdr hdr;
3113 BT_DBG("%s len %d", hdev->name, skb->len);
3115 hdr.handle = cpu_to_le16(conn->handle);
3116 hdr.dlen = skb->len;
3118 skb_push(skb, HCI_SCO_HDR_SIZE);
3119 skb_reset_transport_header(skb);
3120 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3122 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3124 skb_queue_tail(&conn->data_q, skb);
3125 queue_work(hdev->workqueue, &hdev->tx_work);
3128 /* ---- HCI TX task (outgoing data) ---- */
3130 /* HCI Connection scheduler */
3131 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3134 struct hci_conn_hash *h = &hdev->conn_hash;
3135 struct hci_conn *conn = NULL, *c;
3136 unsigned int num = 0, min = ~0;
3138 /* We don't have to lock device here. Connections are always
3139 * added and removed with TX task disabled. */
3143 list_for_each_entry_rcu(c, &h->list, list) {
3144 if (c->type != type || skb_queue_empty(&c->data_q))
3147 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3152 if (c->sent < min) {
3157 if (hci_conn_num(hdev, type) == num)
3166 switch (conn->type) {
3168 cnt = hdev->acl_cnt;
3172 cnt = hdev->sco_cnt;
3175 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3179 bt_dev_err(hdev, "unknown link type %d", conn->type);
3187 BT_DBG("conn %p quote %d", conn, *quote);
3191 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3193 struct hci_conn_hash *h = &hdev->conn_hash;
3196 bt_dev_err(hdev, "link tx timeout");
3200 /* Kill stalled connections */
3201 list_for_each_entry_rcu(c, &h->list, list) {
3202 if (c->type == type && c->sent) {
3203 bt_dev_err(hdev, "killing stalled connection %pMR",
3205 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3212 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3215 struct hci_conn_hash *h = &hdev->conn_hash;
3216 struct hci_chan *chan = NULL;
3217 unsigned int num = 0, min = ~0, cur_prio = 0;
3218 struct hci_conn *conn;
3219 int cnt, q, conn_num = 0;
3221 BT_DBG("%s", hdev->name);
3225 list_for_each_entry_rcu(conn, &h->list, list) {
3226 struct hci_chan *tmp;
3228 if (conn->type != type)
3231 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3236 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3237 struct sk_buff *skb;
3239 if (skb_queue_empty(&tmp->data_q))
3242 skb = skb_peek(&tmp->data_q);
3243 if (skb->priority < cur_prio)
3246 if (skb->priority > cur_prio) {
3249 cur_prio = skb->priority;
3254 if (conn->sent < min) {
3260 if (hci_conn_num(hdev, type) == conn_num)
3269 switch (chan->conn->type) {
3271 cnt = hdev->acl_cnt;
3274 cnt = hdev->block_cnt;
3278 cnt = hdev->sco_cnt;
3281 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3285 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3290 BT_DBG("chan %p quote %d", chan, *quote);
3294 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3296 struct hci_conn_hash *h = &hdev->conn_hash;
3297 struct hci_conn *conn;
3300 BT_DBG("%s", hdev->name);
3304 list_for_each_entry_rcu(conn, &h->list, list) {
3305 struct hci_chan *chan;
3307 if (conn->type != type)
3310 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3315 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3316 struct sk_buff *skb;
3323 if (skb_queue_empty(&chan->data_q))
3326 skb = skb_peek(&chan->data_q);
3327 if (skb->priority >= HCI_PRIO_MAX - 1)
3330 skb->priority = HCI_PRIO_MAX - 1;
3332 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3336 if (hci_conn_num(hdev, type) == num)
3344 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3346 /* Calculate count of blocks used by this packet */
3347 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3350 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3352 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3353 /* ACL tx timeout must be longer than maximum
3354 * link supervision timeout (40.9 seconds) */
3355 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3356 HCI_ACL_TX_TIMEOUT))
3357 hci_link_tx_to(hdev, ACL_LINK);
3362 static void hci_sched_sco(struct hci_dev *hdev)
3364 struct hci_conn *conn;
3365 struct sk_buff *skb;
3368 BT_DBG("%s", hdev->name);
3370 if (!hci_conn_num(hdev, SCO_LINK))
3373 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3374 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3375 BT_DBG("skb %p len %d", skb, skb->len);
3376 hci_send_frame(hdev, skb);
3379 if (conn->sent == ~0)
3385 static void hci_sched_esco(struct hci_dev *hdev)
3387 struct hci_conn *conn;
3388 struct sk_buff *skb;
3391 BT_DBG("%s", hdev->name);
3393 if (!hci_conn_num(hdev, ESCO_LINK))
3396 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3398 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3399 BT_DBG("skb %p len %d", skb, skb->len);
3400 hci_send_frame(hdev, skb);
3403 if (conn->sent == ~0)
3409 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3411 unsigned int cnt = hdev->acl_cnt;
3412 struct hci_chan *chan;
3413 struct sk_buff *skb;
3416 __check_timeout(hdev, cnt);
3418 while (hdev->acl_cnt &&
3419 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3420 u32 priority = (skb_peek(&chan->data_q))->priority;
3421 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3422 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3423 skb->len, skb->priority);
3425 /* Stop if priority has changed */
3426 if (skb->priority < priority)
3429 skb = skb_dequeue(&chan->data_q);
3431 hci_conn_enter_active_mode(chan->conn,
3432 bt_cb(skb)->force_active);
3434 hci_send_frame(hdev, skb);
3435 hdev->acl_last_tx = jiffies;
3441 /* Send pending SCO packets right away */
3442 hci_sched_sco(hdev);
3443 hci_sched_esco(hdev);
3447 if (cnt != hdev->acl_cnt)
3448 hci_prio_recalculate(hdev, ACL_LINK);
3451 static void hci_sched_acl_blk(struct hci_dev *hdev)
3453 unsigned int cnt = hdev->block_cnt;
3454 struct hci_chan *chan;
3455 struct sk_buff *skb;
3459 __check_timeout(hdev, cnt);
3461 BT_DBG("%s", hdev->name);
3463 if (hdev->dev_type == HCI_AMP)
3468 while (hdev->block_cnt > 0 &&
3469 (chan = hci_chan_sent(hdev, type, "e))) {
3470 u32 priority = (skb_peek(&chan->data_q))->priority;
3471 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3474 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3475 skb->len, skb->priority);
3477 /* Stop if priority has changed */
3478 if (skb->priority < priority)
3481 skb = skb_dequeue(&chan->data_q);
3483 blocks = __get_blocks(hdev, skb);
3484 if (blocks > hdev->block_cnt)
3487 hci_conn_enter_active_mode(chan->conn,
3488 bt_cb(skb)->force_active);
3490 hci_send_frame(hdev, skb);
3491 hdev->acl_last_tx = jiffies;
3493 hdev->block_cnt -= blocks;
3496 chan->sent += blocks;
3497 chan->conn->sent += blocks;
3501 if (cnt != hdev->block_cnt)
3502 hci_prio_recalculate(hdev, type);
3505 static void hci_sched_acl(struct hci_dev *hdev)
3507 BT_DBG("%s", hdev->name);
3509 /* No ACL link over BR/EDR controller */
3510 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3513 /* No AMP link over AMP controller */
3514 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3517 switch (hdev->flow_ctl_mode) {
3518 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3519 hci_sched_acl_pkt(hdev);
3522 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3523 hci_sched_acl_blk(hdev);
3528 static void hci_sched_le(struct hci_dev *hdev)
3530 struct hci_chan *chan;
3531 struct sk_buff *skb;
3532 int quote, cnt, tmp;
3534 BT_DBG("%s", hdev->name);
3536 if (!hci_conn_num(hdev, LE_LINK))
3539 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3541 __check_timeout(hdev, cnt);
3544 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3545 u32 priority = (skb_peek(&chan->data_q))->priority;
3546 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3547 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3548 skb->len, skb->priority);
3550 /* Stop if priority has changed */
3551 if (skb->priority < priority)
3554 skb = skb_dequeue(&chan->data_q);
3556 hci_send_frame(hdev, skb);
3557 hdev->le_last_tx = jiffies;
3563 /* Send pending SCO packets right away */
3564 hci_sched_sco(hdev);
3565 hci_sched_esco(hdev);
3572 hdev->acl_cnt = cnt;
3575 hci_prio_recalculate(hdev, LE_LINK);
3578 static void hci_tx_work(struct work_struct *work)
3580 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3581 struct sk_buff *skb;
3583 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3584 hdev->sco_cnt, hdev->le_cnt);
3586 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3587 /* Schedule queues and send stuff to HCI driver */
3588 hci_sched_sco(hdev);
3589 hci_sched_esco(hdev);
3590 hci_sched_acl(hdev);
3594 /* Send next queued raw (unknown type) packet */
3595 while ((skb = skb_dequeue(&hdev->raw_q)))
3596 hci_send_frame(hdev, skb);
3599 /* ----- HCI RX task (incoming data processing) ----- */
3601 /* ACL data packet */
3602 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3604 struct hci_acl_hdr *hdr = (void *) skb->data;
3605 struct hci_conn *conn;
3606 __u16 handle, flags;
3608 skb_pull(skb, HCI_ACL_HDR_SIZE);
3610 handle = __le16_to_cpu(hdr->handle);
3611 flags = hci_flags(handle);
3612 handle = hci_handle(handle);
3614 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3617 hdev->stat.acl_rx++;
3620 conn = hci_conn_hash_lookup_handle(hdev, handle);
3621 hci_dev_unlock(hdev);
3624 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3626 /* Send to upper protocol */
3627 l2cap_recv_acldata(conn, skb, flags);
3630 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3637 /* SCO data packet */
3638 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3640 struct hci_sco_hdr *hdr = (void *) skb->data;
3641 struct hci_conn *conn;
3642 __u16 handle, flags;
3644 skb_pull(skb, HCI_SCO_HDR_SIZE);
3646 handle = __le16_to_cpu(hdr->handle);
3647 flags = hci_flags(handle);
3648 handle = hci_handle(handle);
3650 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3653 hdev->stat.sco_rx++;
3656 conn = hci_conn_hash_lookup_handle(hdev, handle);
3657 hci_dev_unlock(hdev);
3660 /* Send to upper protocol */
3661 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3662 sco_recv_scodata(conn, skb);
3665 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3672 static bool hci_req_is_complete(struct hci_dev *hdev)
3674 struct sk_buff *skb;
3676 skb = skb_peek(&hdev->cmd_q);
3680 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3683 static void hci_resend_last(struct hci_dev *hdev)
3685 struct hci_command_hdr *sent;
3686 struct sk_buff *skb;
3689 if (!hdev->sent_cmd)
3692 sent = (void *) hdev->sent_cmd->data;
3693 opcode = __le16_to_cpu(sent->opcode);
3694 if (opcode == HCI_OP_RESET)
3697 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3701 skb_queue_head(&hdev->cmd_q, skb);
3702 queue_work(hdev->workqueue, &hdev->cmd_work);
3705 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3706 hci_req_complete_t *req_complete,
3707 hci_req_complete_skb_t *req_complete_skb)
3709 struct sk_buff *skb;
3710 unsigned long flags;
3712 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3714 /* If the completed command doesn't match the last one that was
3715 * sent we need to do special handling of it.
3717 if (!hci_sent_cmd_data(hdev, opcode)) {
3718 /* Some CSR based controllers generate a spontaneous
3719 * reset complete event during init and any pending
3720 * command will never be completed. In such a case we
3721 * need to resend whatever was the last sent
3724 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3725 hci_resend_last(hdev);
3730 /* If we reach this point this event matches the last command sent */
3731 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3733 /* If the command succeeded and there's still more commands in
3734 * this request the request is not yet complete.
3736 if (!status && !hci_req_is_complete(hdev))
3739 /* If this was the last command in a request the complete
3740 * callback would be found in hdev->sent_cmd instead of the
3741 * command queue (hdev->cmd_q).
3743 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
3744 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
3748 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
3749 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
3753 /* Remove all pending commands belonging to this request */
3754 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3755 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3756 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3757 __skb_queue_head(&hdev->cmd_q, skb);
3761 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3762 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3764 *req_complete = bt_cb(skb)->hci.req_complete;
3767 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3770 static void hci_rx_work(struct work_struct *work)
3772 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3773 struct sk_buff *skb;
3775 BT_DBG("%s", hdev->name);
3777 /* The kcov_remote functions used for collecting packet parsing
3778 * coverage information from this background thread and associate
3779 * the coverage with the syscall's thread which originally injected
3780 * the packet. This helps fuzzing the kernel.
3782 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
3783 kcov_remote_start_common(skb_get_kcov_handle(skb));
3785 /* Send copy to monitor */
3786 hci_send_to_monitor(hdev, skb);
3788 if (atomic_read(&hdev->promisc)) {
3789 /* Send copy to the sockets */
3790 hci_send_to_sock(hdev, skb);
3793 /* If the device has been opened in HCI_USER_CHANNEL,
3794 * the userspace has exclusive access to device.
3795 * When device is HCI_INIT, we still need to process
3796 * the data packets to the driver in order
3797 * to complete its setup().
3799 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3800 !test_bit(HCI_INIT, &hdev->flags)) {
3805 if (test_bit(HCI_INIT, &hdev->flags)) {
3806 /* Don't process data packets in this states. */
3807 switch (hci_skb_pkt_type(skb)) {
3808 case HCI_ACLDATA_PKT:
3809 case HCI_SCODATA_PKT:
3810 case HCI_ISODATA_PKT:
3817 switch (hci_skb_pkt_type(skb)) {
3819 BT_DBG("%s Event packet", hdev->name);
3820 hci_event_packet(hdev, skb);
3823 case HCI_ACLDATA_PKT:
3824 BT_DBG("%s ACL data packet", hdev->name);
3825 hci_acldata_packet(hdev, skb);
3828 case HCI_SCODATA_PKT:
3829 BT_DBG("%s SCO data packet", hdev->name);
3830 hci_scodata_packet(hdev, skb);
3840 static void hci_cmd_work(struct work_struct *work)
3842 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3843 struct sk_buff *skb;
3845 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3846 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3848 /* Send queued commands */
3849 if (atomic_read(&hdev->cmd_cnt)) {
3850 skb = skb_dequeue(&hdev->cmd_q);
3854 kfree_skb(hdev->sent_cmd);
3856 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3857 if (hdev->sent_cmd) {
3859 if (hci_req_status_pend(hdev))
3860 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
3861 atomic_dec(&hdev->cmd_cnt);
3863 res = hci_send_frame(hdev, skb);
3865 __hci_cmd_sync_cancel(hdev, -res);
3867 if (test_bit(HCI_RESET, &hdev->flags) ||
3868 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3869 cancel_delayed_work(&hdev->cmd_timer);
3871 schedule_delayed_work(&hdev->cmd_timer,
3874 skb_queue_head(&hdev->cmd_q, skb);
3875 queue_work(hdev->workqueue, &hdev->cmd_work);