2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
67 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69 u16 opcode = __le16_to_cpu(sent->opcode);
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
79 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
98 static void hci_req_cancel(struct hci_dev *hdev, int err)
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
109 /* Execute request and wait for completion. */
110 static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
114 DECLARE_WAITQUEUE(wait, current);
117 BT_DBG("%s start", hdev->name);
119 hdev->req_status = HCI_REQ_PEND;
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
125 schedule_timeout(timeout);
127 remove_wait_queue(&hdev->req_wait_q, &wait);
129 if (signal_pending(current))
132 switch (hdev->req_status) {
134 err = -bt_to_errno(hdev->req_result);
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
146 hdev->req_status = hdev->req_result = 0;
148 BT_DBG("%s end: err %d", hdev->name, err);
153 static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
159 if (!test_bit(HCI_UP, &hdev->flags))
162 /* Serialize all requests */
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
170 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
172 BT_DBG("%s %ld", hdev->name, opt);
175 set_bit(HCI_RESET, &hdev->flags);
176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
179 static void bredr_init(struct hci_dev *hdev)
181 struct hci_cp_delete_stored_link_key cp;
185 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
187 /* Mandatory initialization */
189 /* Read Local Supported Features */
190 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
192 /* Read Local Version */
193 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
195 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
196 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
198 /* Read BD Address */
199 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
201 /* Read Class of Device */
202 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
204 /* Read Local Name */
205 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
207 /* Read Voice Setting */
208 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
210 /* Optional initialization */
212 /* Clear Event Filters */
213 flt_type = HCI_FLT_CLEAR_ALL;
214 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
216 /* Connection accept timeout ~20 secs */
217 param = __constant_cpu_to_le16(0x7d00);
218 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
220 bacpy(&cp.bdaddr, BDADDR_ANY);
222 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
225 static void amp_init(struct hci_dev *hdev)
227 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
229 /* Read Local Version */
230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
232 /* Read Local AMP Info */
233 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
235 /* Read Data Blk size */
236 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
239 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
243 BT_DBG("%s %ld", hdev->name, opt);
245 /* Driver initialization */
247 /* Special commands */
248 while ((skb = skb_dequeue(&hdev->driver_init))) {
249 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
250 skb->dev = (void *) hdev;
252 skb_queue_tail(&hdev->cmd_q, skb);
253 queue_work(hdev->workqueue, &hdev->cmd_work);
255 skb_queue_purge(&hdev->driver_init);
258 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
259 hci_reset_req(hdev, 0);
261 switch (hdev->dev_type) {
271 BT_ERR("Unknown device type %d", hdev->dev_type);
276 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
278 BT_DBG("%s", hdev->name);
280 /* Read LE buffer size */
281 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
284 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
288 BT_DBG("%s %x", hdev->name, scan);
290 /* Inquiry and Page scans */
291 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
294 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
298 BT_DBG("%s %x", hdev->name, auth);
301 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
304 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
308 BT_DBG("%s %x", hdev->name, encrypt);
311 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
314 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
316 __le16 policy = cpu_to_le16(opt);
318 BT_DBG("%s %x", hdev->name, policy);
320 /* Default link policy */
321 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
324 /* Get HCI device by index.
325 * Device is held on return. */
326 struct hci_dev *hci_dev_get(int index)
328 struct hci_dev *hdev = NULL, *d;
335 read_lock(&hci_dev_list_lock);
336 list_for_each_entry(d, &hci_dev_list, list) {
337 if (d->id == index) {
338 hdev = hci_dev_hold(d);
342 read_unlock(&hci_dev_list_lock);
346 /* ---- Inquiry support ---- */
348 bool hci_discovery_active(struct hci_dev *hdev)
350 struct discovery_state *discov = &hdev->discovery;
352 switch (discov->state) {
353 case DISCOVERY_FINDING:
354 case DISCOVERY_RESOLVING:
362 void hci_discovery_set_state(struct hci_dev *hdev, int state)
364 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
366 if (hdev->discovery.state == state)
370 case DISCOVERY_STOPPED:
371 if (hdev->discovery.state != DISCOVERY_STARTING)
372 mgmt_discovering(hdev, 0);
374 case DISCOVERY_STARTING:
376 case DISCOVERY_FINDING:
377 mgmt_discovering(hdev, 1);
379 case DISCOVERY_RESOLVING:
381 case DISCOVERY_STOPPING:
385 hdev->discovery.state = state;
388 static void inquiry_cache_flush(struct hci_dev *hdev)
390 struct discovery_state *cache = &hdev->discovery;
391 struct inquiry_entry *p, *n;
393 list_for_each_entry_safe(p, n, &cache->all, all) {
398 INIT_LIST_HEAD(&cache->unknown);
399 INIT_LIST_HEAD(&cache->resolve);
402 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
405 struct discovery_state *cache = &hdev->discovery;
406 struct inquiry_entry *e;
408 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
410 list_for_each_entry(e, &cache->all, all) {
411 if (!bacmp(&e->data.bdaddr, bdaddr))
418 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
421 struct discovery_state *cache = &hdev->discovery;
422 struct inquiry_entry *e;
424 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
426 list_for_each_entry(e, &cache->unknown, list) {
427 if (!bacmp(&e->data.bdaddr, bdaddr))
434 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
438 struct discovery_state *cache = &hdev->discovery;
439 struct inquiry_entry *e;
441 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
443 list_for_each_entry(e, &cache->resolve, list) {
444 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
446 if (!bacmp(&e->data.bdaddr, bdaddr))
453 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
454 struct inquiry_entry *ie)
456 struct discovery_state *cache = &hdev->discovery;
457 struct list_head *pos = &cache->resolve;
458 struct inquiry_entry *p;
462 list_for_each_entry(p, &cache->resolve, list) {
463 if (p->name_state != NAME_PENDING &&
464 abs(p->data.rssi) >= abs(ie->data.rssi))
469 list_add(&ie->list, pos);
472 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
473 bool name_known, bool *ssp)
475 struct discovery_state *cache = &hdev->discovery;
476 struct inquiry_entry *ie;
478 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
481 *ssp = data->ssp_mode;
483 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
485 if (ie->data.ssp_mode && ssp)
488 if (ie->name_state == NAME_NEEDED &&
489 data->rssi != ie->data.rssi) {
490 ie->data.rssi = data->rssi;
491 hci_inquiry_cache_update_resolve(hdev, ie);
497 /* Entry not in the cache. Add new one. */
498 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
502 list_add(&ie->all, &cache->all);
505 ie->name_state = NAME_KNOWN;
507 ie->name_state = NAME_NOT_KNOWN;
508 list_add(&ie->list, &cache->unknown);
512 if (name_known && ie->name_state != NAME_KNOWN &&
513 ie->name_state != NAME_PENDING) {
514 ie->name_state = NAME_KNOWN;
518 memcpy(&ie->data, data, sizeof(*data));
519 ie->timestamp = jiffies;
520 cache->timestamp = jiffies;
522 if (ie->name_state == NAME_NOT_KNOWN)
528 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
530 struct discovery_state *cache = &hdev->discovery;
531 struct inquiry_info *info = (struct inquiry_info *) buf;
532 struct inquiry_entry *e;
535 list_for_each_entry(e, &cache->all, all) {
536 struct inquiry_data *data = &e->data;
541 bacpy(&info->bdaddr, &data->bdaddr);
542 info->pscan_rep_mode = data->pscan_rep_mode;
543 info->pscan_period_mode = data->pscan_period_mode;
544 info->pscan_mode = data->pscan_mode;
545 memcpy(info->dev_class, data->dev_class, 3);
546 info->clock_offset = data->clock_offset;
552 BT_DBG("cache %p, copied %d", cache, copied);
556 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
558 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
559 struct hci_cp_inquiry cp;
561 BT_DBG("%s", hdev->name);
563 if (test_bit(HCI_INQUIRY, &hdev->flags))
567 memcpy(&cp.lap, &ir->lap, 3);
568 cp.length = ir->length;
569 cp.num_rsp = ir->num_rsp;
570 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
573 int hci_inquiry(void __user *arg)
575 __u8 __user *ptr = arg;
576 struct hci_inquiry_req ir;
577 struct hci_dev *hdev;
578 int err = 0, do_inquiry = 0, max_rsp;
582 if (copy_from_user(&ir, ptr, sizeof(ir)))
585 hdev = hci_dev_get(ir.dev_id);
590 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
591 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
592 inquiry_cache_flush(hdev);
595 hci_dev_unlock(hdev);
597 timeo = ir.length * msecs_to_jiffies(2000);
600 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
605 /* for unlimited number of responses we will use buffer with
608 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
610 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
611 * copy it to the user space.
613 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
620 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
621 hci_dev_unlock(hdev);
623 BT_DBG("num_rsp %d", ir.num_rsp);
625 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
627 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
640 /* ---- HCI ioctl helpers ---- */
642 int hci_dev_open(__u16 dev)
644 struct hci_dev *hdev;
647 hdev = hci_dev_get(dev);
651 BT_DBG("%s %p", hdev->name, hdev);
655 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
660 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
665 if (test_bit(HCI_UP, &hdev->flags)) {
670 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
671 set_bit(HCI_RAW, &hdev->flags);
673 /* Treat all non BR/EDR controllers as raw devices if
674 enable_hs is not set */
675 if (hdev->dev_type != HCI_BREDR && !enable_hs)
676 set_bit(HCI_RAW, &hdev->flags);
678 if (hdev->open(hdev)) {
683 if (!test_bit(HCI_RAW, &hdev->flags)) {
684 atomic_set(&hdev->cmd_cnt, 1);
685 set_bit(HCI_INIT, &hdev->flags);
686 hdev->init_last_cmd = 0;
688 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
690 if (lmp_host_le_capable(hdev))
691 ret = __hci_request(hdev, hci_le_init_req, 0,
694 clear_bit(HCI_INIT, &hdev->flags);
699 set_bit(HCI_UP, &hdev->flags);
700 hci_notify(hdev, HCI_DEV_UP);
701 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
702 mgmt_valid_hdev(hdev)) {
704 mgmt_powered(hdev, 1);
705 hci_dev_unlock(hdev);
708 /* Init failed, cleanup */
709 flush_work(&hdev->tx_work);
710 flush_work(&hdev->cmd_work);
711 flush_work(&hdev->rx_work);
713 skb_queue_purge(&hdev->cmd_q);
714 skb_queue_purge(&hdev->rx_q);
719 if (hdev->sent_cmd) {
720 kfree_skb(hdev->sent_cmd);
721 hdev->sent_cmd = NULL;
729 hci_req_unlock(hdev);
734 static int hci_dev_do_close(struct hci_dev *hdev)
736 BT_DBG("%s %p", hdev->name, hdev);
738 cancel_work_sync(&hdev->le_scan);
740 hci_req_cancel(hdev, ENODEV);
743 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
744 del_timer_sync(&hdev->cmd_timer);
745 hci_req_unlock(hdev);
749 /* Flush RX and TX works */
750 flush_work(&hdev->tx_work);
751 flush_work(&hdev->rx_work);
753 if (hdev->discov_timeout > 0) {
754 cancel_delayed_work(&hdev->discov_off);
755 hdev->discov_timeout = 0;
756 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
759 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
760 cancel_delayed_work(&hdev->service_cache);
762 cancel_delayed_work_sync(&hdev->le_scan_disable);
765 inquiry_cache_flush(hdev);
766 hci_conn_hash_flush(hdev);
767 hci_dev_unlock(hdev);
769 hci_notify(hdev, HCI_DEV_DOWN);
775 skb_queue_purge(&hdev->cmd_q);
776 atomic_set(&hdev->cmd_cnt, 1);
777 if (!test_bit(HCI_RAW, &hdev->flags) &&
778 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
779 set_bit(HCI_INIT, &hdev->flags);
780 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
781 clear_bit(HCI_INIT, &hdev->flags);
785 flush_work(&hdev->cmd_work);
788 skb_queue_purge(&hdev->rx_q);
789 skb_queue_purge(&hdev->cmd_q);
790 skb_queue_purge(&hdev->raw_q);
792 /* Drop last sent command */
793 if (hdev->sent_cmd) {
794 del_timer_sync(&hdev->cmd_timer);
795 kfree_skb(hdev->sent_cmd);
796 hdev->sent_cmd = NULL;
799 /* After this point our queues are empty
800 * and no tasks are scheduled. */
803 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
804 mgmt_valid_hdev(hdev)) {
806 mgmt_powered(hdev, 0);
807 hci_dev_unlock(hdev);
813 memset(hdev->eir, 0, sizeof(hdev->eir));
814 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
816 hci_req_unlock(hdev);
822 int hci_dev_close(__u16 dev)
824 struct hci_dev *hdev;
827 hdev = hci_dev_get(dev);
831 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
832 cancel_delayed_work(&hdev->power_off);
834 err = hci_dev_do_close(hdev);
840 int hci_dev_reset(__u16 dev)
842 struct hci_dev *hdev;
845 hdev = hci_dev_get(dev);
851 if (!test_bit(HCI_UP, &hdev->flags))
855 skb_queue_purge(&hdev->rx_q);
856 skb_queue_purge(&hdev->cmd_q);
859 inquiry_cache_flush(hdev);
860 hci_conn_hash_flush(hdev);
861 hci_dev_unlock(hdev);
866 atomic_set(&hdev->cmd_cnt, 1);
867 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
869 if (!test_bit(HCI_RAW, &hdev->flags))
870 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
873 hci_req_unlock(hdev);
878 int hci_dev_reset_stat(__u16 dev)
880 struct hci_dev *hdev;
883 hdev = hci_dev_get(dev);
887 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
894 int hci_dev_cmd(unsigned int cmd, void __user *arg)
896 struct hci_dev *hdev;
897 struct hci_dev_req dr;
900 if (copy_from_user(&dr, arg, sizeof(dr)))
903 hdev = hci_dev_get(dr.dev_id);
909 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
914 if (!lmp_encrypt_capable(hdev)) {
919 if (!test_bit(HCI_AUTH, &hdev->flags)) {
920 /* Auth must be enabled first */
921 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
927 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
932 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
937 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
942 hdev->link_mode = ((__u16) dr.dev_opt) &
943 (HCI_LM_MASTER | HCI_LM_ACCEPT);
947 hdev->pkt_type = (__u16) dr.dev_opt;
951 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
952 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
956 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
957 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
969 int hci_get_dev_list(void __user *arg)
971 struct hci_dev *hdev;
972 struct hci_dev_list_req *dl;
973 struct hci_dev_req *dr;
974 int n = 0, size, err;
977 if (get_user(dev_num, (__u16 __user *) arg))
980 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
983 size = sizeof(*dl) + dev_num * sizeof(*dr);
985 dl = kzalloc(size, GFP_KERNEL);
991 read_lock(&hci_dev_list_lock);
992 list_for_each_entry(hdev, &hci_dev_list, list) {
993 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
994 cancel_delayed_work(&hdev->power_off);
996 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
997 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
999 (dr + n)->dev_id = hdev->id;
1000 (dr + n)->dev_opt = hdev->flags;
1005 read_unlock(&hci_dev_list_lock);
1008 size = sizeof(*dl) + n * sizeof(*dr);
1010 err = copy_to_user(arg, dl, size);
1013 return err ? -EFAULT : 0;
1016 int hci_get_dev_info(void __user *arg)
1018 struct hci_dev *hdev;
1019 struct hci_dev_info di;
1022 if (copy_from_user(&di, arg, sizeof(di)))
1025 hdev = hci_dev_get(di.dev_id);
1029 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1030 cancel_delayed_work_sync(&hdev->power_off);
1032 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1033 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1035 strcpy(di.name, hdev->name);
1036 di.bdaddr = hdev->bdaddr;
1037 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1038 di.flags = hdev->flags;
1039 di.pkt_type = hdev->pkt_type;
1040 di.acl_mtu = hdev->acl_mtu;
1041 di.acl_pkts = hdev->acl_pkts;
1042 di.sco_mtu = hdev->sco_mtu;
1043 di.sco_pkts = hdev->sco_pkts;
1044 di.link_policy = hdev->link_policy;
1045 di.link_mode = hdev->link_mode;
1047 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1048 memcpy(&di.features, &hdev->features, sizeof(di.features));
1050 if (copy_to_user(arg, &di, sizeof(di)))
1058 /* ---- Interface to HCI drivers ---- */
1060 static int hci_rfkill_set_block(void *data, bool blocked)
1062 struct hci_dev *hdev = data;
1064 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1069 hci_dev_do_close(hdev);
1074 static const struct rfkill_ops hci_rfkill_ops = {
1075 .set_block = hci_rfkill_set_block,
1078 static void hci_power_on(struct work_struct *work)
1080 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1082 BT_DBG("%s", hdev->name);
1084 if (hci_dev_open(hdev->id) < 0)
1087 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1088 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
1090 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1091 mgmt_index_added(hdev);
1094 static void hci_power_off(struct work_struct *work)
1096 struct hci_dev *hdev = container_of(work, struct hci_dev,
1099 BT_DBG("%s", hdev->name);
1101 hci_dev_do_close(hdev);
1104 static void hci_discov_off(struct work_struct *work)
1106 struct hci_dev *hdev;
1107 u8 scan = SCAN_PAGE;
1109 hdev = container_of(work, struct hci_dev, discov_off.work);
1111 BT_DBG("%s", hdev->name);
1115 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1117 hdev->discov_timeout = 0;
1119 hci_dev_unlock(hdev);
1122 int hci_uuids_clear(struct hci_dev *hdev)
1124 struct list_head *p, *n;
1126 list_for_each_safe(p, n, &hdev->uuids) {
1127 struct bt_uuid *uuid;
1129 uuid = list_entry(p, struct bt_uuid, list);
1138 int hci_link_keys_clear(struct hci_dev *hdev)
1140 struct list_head *p, *n;
1142 list_for_each_safe(p, n, &hdev->link_keys) {
1143 struct link_key *key;
1145 key = list_entry(p, struct link_key, list);
1154 int hci_smp_ltks_clear(struct hci_dev *hdev)
1156 struct smp_ltk *k, *tmp;
1158 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1166 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1170 list_for_each_entry(k, &hdev->link_keys, list)
1171 if (bacmp(bdaddr, &k->bdaddr) == 0)
1177 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1178 u8 key_type, u8 old_key_type)
1181 if (key_type < 0x03)
1184 /* Debug keys are insecure so don't store them persistently */
1185 if (key_type == HCI_LK_DEBUG_COMBINATION)
1188 /* Changed combination key and there's no previous one */
1189 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1192 /* Security mode 3 case */
1196 /* Neither local nor remote side had no-bonding as requirement */
1197 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1200 /* Local side had dedicated bonding as requirement */
1201 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1204 /* Remote side had dedicated bonding as requirement */
1205 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1208 /* If none of the above criteria match, then don't store the key
1213 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1217 list_for_each_entry(k, &hdev->long_term_keys, list) {
1218 if (k->ediv != ediv ||
1219 memcmp(rand, k->rand, sizeof(k->rand)))
1228 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1233 list_for_each_entry(k, &hdev->long_term_keys, list)
1234 if (addr_type == k->bdaddr_type &&
1235 bacmp(bdaddr, &k->bdaddr) == 0)
1241 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1242 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1244 struct link_key *key, *old_key;
1248 old_key = hci_find_link_key(hdev, bdaddr);
1250 old_key_type = old_key->type;
1253 old_key_type = conn ? conn->key_type : 0xff;
1254 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1257 list_add(&key->list, &hdev->link_keys);
1260 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1262 /* Some buggy controller combinations generate a changed
1263 * combination key for legacy pairing even when there's no
1265 if (type == HCI_LK_CHANGED_COMBINATION &&
1266 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1267 type = HCI_LK_COMBINATION;
1269 conn->key_type = type;
1272 bacpy(&key->bdaddr, bdaddr);
1273 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1274 key->pin_len = pin_len;
1276 if (type == HCI_LK_CHANGED_COMBINATION)
1277 key->type = old_key_type;
1284 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1286 mgmt_new_link_key(hdev, key, persistent);
1289 conn->flush_key = !persistent;
1294 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1295 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1298 struct smp_ltk *key, *old_key;
1300 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1303 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1307 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1310 list_add(&key->list, &hdev->long_term_keys);
1313 bacpy(&key->bdaddr, bdaddr);
1314 key->bdaddr_type = addr_type;
1315 memcpy(key->val, tk, sizeof(key->val));
1316 key->authenticated = authenticated;
1318 key->enc_size = enc_size;
1320 memcpy(key->rand, rand, sizeof(key->rand));
1325 if (type & HCI_SMP_LTK)
1326 mgmt_new_ltk(hdev, key, 1);
1331 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1333 struct link_key *key;
1335 key = hci_find_link_key(hdev, bdaddr);
1339 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1341 list_del(&key->list);
1347 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1349 struct smp_ltk *k, *tmp;
1351 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1352 if (bacmp(bdaddr, &k->bdaddr))
1355 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1364 /* HCI command timer function */
1365 static void hci_cmd_timeout(unsigned long arg)
1367 struct hci_dev *hdev = (void *) arg;
1369 if (hdev->sent_cmd) {
1370 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1371 u16 opcode = __le16_to_cpu(sent->opcode);
1373 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1375 BT_ERR("%s command tx timeout", hdev->name);
1378 atomic_set(&hdev->cmd_cnt, 1);
1379 queue_work(hdev->workqueue, &hdev->cmd_work);
1382 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1385 struct oob_data *data;
1387 list_for_each_entry(data, &hdev->remote_oob_data, list)
1388 if (bacmp(bdaddr, &data->bdaddr) == 0)
1394 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1396 struct oob_data *data;
1398 data = hci_find_remote_oob_data(hdev, bdaddr);
1402 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1404 list_del(&data->list);
1410 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1412 struct oob_data *data, *n;
1414 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1415 list_del(&data->list);
1422 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1425 struct oob_data *data;
1427 data = hci_find_remote_oob_data(hdev, bdaddr);
1430 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1434 bacpy(&data->bdaddr, bdaddr);
1435 list_add(&data->list, &hdev->remote_oob_data);
1438 memcpy(data->hash, hash, sizeof(data->hash));
1439 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1441 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1446 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1448 struct bdaddr_list *b;
1450 list_for_each_entry(b, &hdev->blacklist, list)
1451 if (bacmp(bdaddr, &b->bdaddr) == 0)
1457 int hci_blacklist_clear(struct hci_dev *hdev)
1459 struct list_head *p, *n;
1461 list_for_each_safe(p, n, &hdev->blacklist) {
1462 struct bdaddr_list *b;
1464 b = list_entry(p, struct bdaddr_list, list);
1473 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1475 struct bdaddr_list *entry;
1477 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1480 if (hci_blacklist_lookup(hdev, bdaddr))
1483 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1487 bacpy(&entry->bdaddr, bdaddr);
1489 list_add(&entry->list, &hdev->blacklist);
1491 return mgmt_device_blocked(hdev, bdaddr, type);
1494 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1496 struct bdaddr_list *entry;
1498 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1499 return hci_blacklist_clear(hdev);
1501 entry = hci_blacklist_lookup(hdev, bdaddr);
1505 list_del(&entry->list);
1508 return mgmt_device_unblocked(hdev, bdaddr, type);
1511 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1513 struct le_scan_params *param = (struct le_scan_params *) opt;
1514 struct hci_cp_le_set_scan_param cp;
1516 memset(&cp, 0, sizeof(cp));
1517 cp.type = param->type;
1518 cp.interval = cpu_to_le16(param->interval);
1519 cp.window = cpu_to_le16(param->window);
1521 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1524 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1526 struct hci_cp_le_set_scan_enable cp;
1528 memset(&cp, 0, sizeof(cp));
1532 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1535 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1536 u16 window, int timeout)
1538 long timeo = msecs_to_jiffies(3000);
1539 struct le_scan_params param;
1542 BT_DBG("%s", hdev->name);
1544 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1545 return -EINPROGRESS;
1548 param.interval = interval;
1549 param.window = window;
1553 err = __hci_request(hdev, le_scan_param_req, (unsigned long) ¶m,
1556 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1558 hci_req_unlock(hdev);
1563 schedule_delayed_work(&hdev->le_scan_disable,
1564 msecs_to_jiffies(timeout));
1569 int hci_cancel_le_scan(struct hci_dev *hdev)
1571 BT_DBG("%s", hdev->name);
1573 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1576 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1577 struct hci_cp_le_set_scan_enable cp;
1579 /* Send HCI command to disable LE Scan */
1580 memset(&cp, 0, sizeof(cp));
1581 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1587 static void le_scan_disable_work(struct work_struct *work)
1589 struct hci_dev *hdev = container_of(work, struct hci_dev,
1590 le_scan_disable.work);
1591 struct hci_cp_le_set_scan_enable cp;
1593 BT_DBG("%s", hdev->name);
1595 memset(&cp, 0, sizeof(cp));
1597 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1600 static void le_scan_work(struct work_struct *work)
1602 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1603 struct le_scan_params *param = &hdev->le_scan_params;
1605 BT_DBG("%s", hdev->name);
1607 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1611 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1614 struct le_scan_params *param = &hdev->le_scan_params;
1616 BT_DBG("%s", hdev->name);
1618 if (work_busy(&hdev->le_scan))
1619 return -EINPROGRESS;
1622 param->interval = interval;
1623 param->window = window;
1624 param->timeout = timeout;
1626 queue_work(system_long_wq, &hdev->le_scan);
1631 /* Alloc HCI device */
1632 struct hci_dev *hci_alloc_dev(void)
1634 struct hci_dev *hdev;
1636 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1640 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1641 hdev->esco_type = (ESCO_HV1);
1642 hdev->link_mode = (HCI_LM_ACCEPT);
1643 hdev->io_capability = 0x03; /* No Input No Output */
1645 hdev->sniff_max_interval = 800;
1646 hdev->sniff_min_interval = 80;
1648 mutex_init(&hdev->lock);
1649 mutex_init(&hdev->req_lock);
1651 INIT_LIST_HEAD(&hdev->mgmt_pending);
1652 INIT_LIST_HEAD(&hdev->blacklist);
1653 INIT_LIST_HEAD(&hdev->uuids);
1654 INIT_LIST_HEAD(&hdev->link_keys);
1655 INIT_LIST_HEAD(&hdev->long_term_keys);
1656 INIT_LIST_HEAD(&hdev->remote_oob_data);
1657 INIT_LIST_HEAD(&hdev->conn_hash.list);
1659 INIT_WORK(&hdev->rx_work, hci_rx_work);
1660 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1661 INIT_WORK(&hdev->tx_work, hci_tx_work);
1662 INIT_WORK(&hdev->power_on, hci_power_on);
1663 INIT_WORK(&hdev->le_scan, le_scan_work);
1665 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1666 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1667 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1669 skb_queue_head_init(&hdev->driver_init);
1670 skb_queue_head_init(&hdev->rx_q);
1671 skb_queue_head_init(&hdev->cmd_q);
1672 skb_queue_head_init(&hdev->raw_q);
1674 init_waitqueue_head(&hdev->req_wait_q);
1676 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1678 hci_init_sysfs(hdev);
1679 discovery_init(hdev);
1683 EXPORT_SYMBOL(hci_alloc_dev);
1685 /* Free HCI device */
1686 void hci_free_dev(struct hci_dev *hdev)
1688 skb_queue_purge(&hdev->driver_init);
1690 /* will free via device release */
1691 put_device(&hdev->dev);
1693 EXPORT_SYMBOL(hci_free_dev);
1695 /* Register HCI device */
1696 int hci_register_dev(struct hci_dev *hdev)
1700 if (!hdev->open || !hdev->close)
1703 /* Do not allow HCI_AMP devices to register at index 0,
1704 * so the index can be used as the AMP controller ID.
1706 switch (hdev->dev_type) {
1708 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1711 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1720 sprintf(hdev->name, "hci%d", id);
1723 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1725 write_lock(&hci_dev_list_lock);
1726 list_add(&hdev->list, &hci_dev_list);
1727 write_unlock(&hci_dev_list_lock);
1729 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1731 if (!hdev->workqueue) {
1736 error = hci_add_sysfs(hdev);
1740 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1741 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1744 if (rfkill_register(hdev->rfkill) < 0) {
1745 rfkill_destroy(hdev->rfkill);
1746 hdev->rfkill = NULL;
1750 set_bit(HCI_SETUP, &hdev->dev_flags);
1752 if (hdev->dev_type != HCI_AMP)
1753 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1755 schedule_work(&hdev->power_on);
1757 hci_notify(hdev, HCI_DEV_REG);
1763 destroy_workqueue(hdev->workqueue);
1765 ida_simple_remove(&hci_index_ida, hdev->id);
1766 write_lock(&hci_dev_list_lock);
1767 list_del(&hdev->list);
1768 write_unlock(&hci_dev_list_lock);
1772 EXPORT_SYMBOL(hci_register_dev);
1774 /* Unregister HCI device */
1775 void hci_unregister_dev(struct hci_dev *hdev)
1779 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1781 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1785 write_lock(&hci_dev_list_lock);
1786 list_del(&hdev->list);
1787 write_unlock(&hci_dev_list_lock);
1789 hci_dev_do_close(hdev);
1791 for (i = 0; i < NUM_REASSEMBLY; i++)
1792 kfree_skb(hdev->reassembly[i]);
1794 if (!test_bit(HCI_INIT, &hdev->flags) &&
1795 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1797 mgmt_index_removed(hdev);
1798 hci_dev_unlock(hdev);
1801 /* mgmt_index_removed should take care of emptying the
1803 BUG_ON(!list_empty(&hdev->mgmt_pending));
1805 hci_notify(hdev, HCI_DEV_UNREG);
1808 rfkill_unregister(hdev->rfkill);
1809 rfkill_destroy(hdev->rfkill);
1812 hci_del_sysfs(hdev);
1814 destroy_workqueue(hdev->workqueue);
1817 hci_blacklist_clear(hdev);
1818 hci_uuids_clear(hdev);
1819 hci_link_keys_clear(hdev);
1820 hci_smp_ltks_clear(hdev);
1821 hci_remote_oob_data_clear(hdev);
1822 hci_dev_unlock(hdev);
1826 ida_simple_remove(&hci_index_ida, id);
1828 EXPORT_SYMBOL(hci_unregister_dev);
1830 /* Suspend HCI device */
1831 int hci_suspend_dev(struct hci_dev *hdev)
1833 hci_notify(hdev, HCI_DEV_SUSPEND);
1836 EXPORT_SYMBOL(hci_suspend_dev);
1838 /* Resume HCI device */
1839 int hci_resume_dev(struct hci_dev *hdev)
1841 hci_notify(hdev, HCI_DEV_RESUME);
1844 EXPORT_SYMBOL(hci_resume_dev);
1846 /* Receive frame from HCI drivers */
1847 int hci_recv_frame(struct sk_buff *skb)
1849 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1850 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1851 && !test_bit(HCI_INIT, &hdev->flags))) {
1857 bt_cb(skb)->incoming = 1;
1860 __net_timestamp(skb);
1862 skb_queue_tail(&hdev->rx_q, skb);
1863 queue_work(hdev->workqueue, &hdev->rx_work);
1867 EXPORT_SYMBOL(hci_recv_frame);
1869 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1870 int count, __u8 index)
1875 struct sk_buff *skb;
1876 struct bt_skb_cb *scb;
1878 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1879 index >= NUM_REASSEMBLY)
1882 skb = hdev->reassembly[index];
1886 case HCI_ACLDATA_PKT:
1887 len = HCI_MAX_FRAME_SIZE;
1888 hlen = HCI_ACL_HDR_SIZE;
1891 len = HCI_MAX_EVENT_SIZE;
1892 hlen = HCI_EVENT_HDR_SIZE;
1894 case HCI_SCODATA_PKT:
1895 len = HCI_MAX_SCO_SIZE;
1896 hlen = HCI_SCO_HDR_SIZE;
1900 skb = bt_skb_alloc(len, GFP_ATOMIC);
1904 scb = (void *) skb->cb;
1906 scb->pkt_type = type;
1908 skb->dev = (void *) hdev;
1909 hdev->reassembly[index] = skb;
1913 scb = (void *) skb->cb;
1914 len = min_t(uint, scb->expect, count);
1916 memcpy(skb_put(skb, len), data, len);
1925 if (skb->len == HCI_EVENT_HDR_SIZE) {
1926 struct hci_event_hdr *h = hci_event_hdr(skb);
1927 scb->expect = h->plen;
1929 if (skb_tailroom(skb) < scb->expect) {
1931 hdev->reassembly[index] = NULL;
1937 case HCI_ACLDATA_PKT:
1938 if (skb->len == HCI_ACL_HDR_SIZE) {
1939 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1940 scb->expect = __le16_to_cpu(h->dlen);
1942 if (skb_tailroom(skb) < scb->expect) {
1944 hdev->reassembly[index] = NULL;
1950 case HCI_SCODATA_PKT:
1951 if (skb->len == HCI_SCO_HDR_SIZE) {
1952 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1953 scb->expect = h->dlen;
1955 if (skb_tailroom(skb) < scb->expect) {
1957 hdev->reassembly[index] = NULL;
1964 if (scb->expect == 0) {
1965 /* Complete frame */
1967 bt_cb(skb)->pkt_type = type;
1968 hci_recv_frame(skb);
1970 hdev->reassembly[index] = NULL;
1978 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1982 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1986 rem = hci_reassembly(hdev, type, data, count, type - 1);
1990 data += (count - rem);
1996 EXPORT_SYMBOL(hci_recv_fragment);
1998 #define STREAM_REASSEMBLY 0
2000 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2006 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2009 struct { char type; } *pkt;
2011 /* Start of the frame */
2018 type = bt_cb(skb)->pkt_type;
2020 rem = hci_reassembly(hdev, type, data, count,
2025 data += (count - rem);
2031 EXPORT_SYMBOL(hci_recv_stream_fragment);
2033 /* ---- Interface to upper protocols ---- */
2035 int hci_register_cb(struct hci_cb *cb)
2037 BT_DBG("%p name %s", cb, cb->name);
2039 write_lock(&hci_cb_list_lock);
2040 list_add(&cb->list, &hci_cb_list);
2041 write_unlock(&hci_cb_list_lock);
2045 EXPORT_SYMBOL(hci_register_cb);
2047 int hci_unregister_cb(struct hci_cb *cb)
2049 BT_DBG("%p name %s", cb, cb->name);
2051 write_lock(&hci_cb_list_lock);
2052 list_del(&cb->list);
2053 write_unlock(&hci_cb_list_lock);
2057 EXPORT_SYMBOL(hci_unregister_cb);
2059 static int hci_send_frame(struct sk_buff *skb)
2061 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2068 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2071 __net_timestamp(skb);
2073 /* Send copy to monitor */
2074 hci_send_to_monitor(hdev, skb);
2076 if (atomic_read(&hdev->promisc)) {
2077 /* Send copy to the sockets */
2078 hci_send_to_sock(hdev, skb);
2081 /* Get rid of skb owner, prior to sending to the driver. */
2084 return hdev->send(skb);
2087 /* Send HCI command */
2088 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2090 int len = HCI_COMMAND_HDR_SIZE + plen;
2091 struct hci_command_hdr *hdr;
2092 struct sk_buff *skb;
2094 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2096 skb = bt_skb_alloc(len, GFP_ATOMIC);
2098 BT_ERR("%s no memory for command", hdev->name);
2102 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2103 hdr->opcode = cpu_to_le16(opcode);
2107 memcpy(skb_put(skb, plen), param, plen);
2109 BT_DBG("skb len %d", skb->len);
2111 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2112 skb->dev = (void *) hdev;
2114 if (test_bit(HCI_INIT, &hdev->flags))
2115 hdev->init_last_cmd = opcode;
2117 skb_queue_tail(&hdev->cmd_q, skb);
2118 queue_work(hdev->workqueue, &hdev->cmd_work);
2123 /* Get data from the previously sent command */
2124 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2126 struct hci_command_hdr *hdr;
2128 if (!hdev->sent_cmd)
2131 hdr = (void *) hdev->sent_cmd->data;
2133 if (hdr->opcode != cpu_to_le16(opcode))
2136 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2138 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2142 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2144 struct hci_acl_hdr *hdr;
2147 skb_push(skb, HCI_ACL_HDR_SIZE);
2148 skb_reset_transport_header(skb);
2149 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2150 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2151 hdr->dlen = cpu_to_le16(len);
2154 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2155 struct sk_buff *skb, __u16 flags)
2157 struct hci_dev *hdev = conn->hdev;
2158 struct sk_buff *list;
2160 skb->len = skb_headlen(skb);
2163 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2164 hci_add_acl_hdr(skb, conn->handle, flags);
2166 list = skb_shinfo(skb)->frag_list;
2168 /* Non fragmented */
2169 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2171 skb_queue_tail(queue, skb);
2174 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2176 skb_shinfo(skb)->frag_list = NULL;
2178 /* Queue all fragments atomically */
2179 spin_lock(&queue->lock);
2181 __skb_queue_tail(queue, skb);
2183 flags &= ~ACL_START;
2186 skb = list; list = list->next;
2188 skb->dev = (void *) hdev;
2189 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2190 hci_add_acl_hdr(skb, conn->handle, flags);
2192 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2194 __skb_queue_tail(queue, skb);
2197 spin_unlock(&queue->lock);
2201 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2203 struct hci_conn *conn = chan->conn;
2204 struct hci_dev *hdev = conn->hdev;
2206 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2208 skb->dev = (void *) hdev;
2210 hci_queue_acl(conn, &chan->data_q, skb, flags);
2212 queue_work(hdev->workqueue, &hdev->tx_work);
2216 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2218 struct hci_dev *hdev = conn->hdev;
2219 struct hci_sco_hdr hdr;
2221 BT_DBG("%s len %d", hdev->name, skb->len);
2223 hdr.handle = cpu_to_le16(conn->handle);
2224 hdr.dlen = skb->len;
2226 skb_push(skb, HCI_SCO_HDR_SIZE);
2227 skb_reset_transport_header(skb);
2228 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2230 skb->dev = (void *) hdev;
2231 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2233 skb_queue_tail(&conn->data_q, skb);
2234 queue_work(hdev->workqueue, &hdev->tx_work);
2237 /* ---- HCI TX task (outgoing data) ---- */
2239 /* HCI Connection scheduler */
2240 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2243 struct hci_conn_hash *h = &hdev->conn_hash;
2244 struct hci_conn *conn = NULL, *c;
2245 unsigned int num = 0, min = ~0;
2247 /* We don't have to lock device here. Connections are always
2248 * added and removed with TX task disabled. */
2252 list_for_each_entry_rcu(c, &h->list, list) {
2253 if (c->type != type || skb_queue_empty(&c->data_q))
2256 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2261 if (c->sent < min) {
2266 if (hci_conn_num(hdev, type) == num)
2275 switch (conn->type) {
2277 cnt = hdev->acl_cnt;
2281 cnt = hdev->sco_cnt;
2284 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2288 BT_ERR("Unknown link type");
2296 BT_DBG("conn %p quote %d", conn, *quote);
2300 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2302 struct hci_conn_hash *h = &hdev->conn_hash;
2305 BT_ERR("%s link tx timeout", hdev->name);
2309 /* Kill stalled connections */
2310 list_for_each_entry_rcu(c, &h->list, list) {
2311 if (c->type == type && c->sent) {
2312 BT_ERR("%s killing stalled connection %s",
2313 hdev->name, batostr(&c->dst));
2314 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2321 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2324 struct hci_conn_hash *h = &hdev->conn_hash;
2325 struct hci_chan *chan = NULL;
2326 unsigned int num = 0, min = ~0, cur_prio = 0;
2327 struct hci_conn *conn;
2328 int cnt, q, conn_num = 0;
2330 BT_DBG("%s", hdev->name);
2334 list_for_each_entry_rcu(conn, &h->list, list) {
2335 struct hci_chan *tmp;
2337 if (conn->type != type)
2340 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2345 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2346 struct sk_buff *skb;
2348 if (skb_queue_empty(&tmp->data_q))
2351 skb = skb_peek(&tmp->data_q);
2352 if (skb->priority < cur_prio)
2355 if (skb->priority > cur_prio) {
2358 cur_prio = skb->priority;
2363 if (conn->sent < min) {
2369 if (hci_conn_num(hdev, type) == conn_num)
2378 switch (chan->conn->type) {
2380 cnt = hdev->acl_cnt;
2384 cnt = hdev->sco_cnt;
2387 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2391 BT_ERR("Unknown link type");
2396 BT_DBG("chan %p quote %d", chan, *quote);
2400 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2402 struct hci_conn_hash *h = &hdev->conn_hash;
2403 struct hci_conn *conn;
2406 BT_DBG("%s", hdev->name);
2410 list_for_each_entry_rcu(conn, &h->list, list) {
2411 struct hci_chan *chan;
2413 if (conn->type != type)
2416 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2421 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2422 struct sk_buff *skb;
2429 if (skb_queue_empty(&chan->data_q))
2432 skb = skb_peek(&chan->data_q);
2433 if (skb->priority >= HCI_PRIO_MAX - 1)
2436 skb->priority = HCI_PRIO_MAX - 1;
2438 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2442 if (hci_conn_num(hdev, type) == num)
2450 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2452 /* Calculate count of blocks used by this packet */
2453 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2456 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2458 if (!test_bit(HCI_RAW, &hdev->flags)) {
2459 /* ACL tx timeout must be longer than maximum
2460 * link supervision timeout (40.9 seconds) */
2461 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2462 HCI_ACL_TX_TIMEOUT))
2463 hci_link_tx_to(hdev, ACL_LINK);
2467 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2469 unsigned int cnt = hdev->acl_cnt;
2470 struct hci_chan *chan;
2471 struct sk_buff *skb;
2474 __check_timeout(hdev, cnt);
2476 while (hdev->acl_cnt &&
2477 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2478 u32 priority = (skb_peek(&chan->data_q))->priority;
2479 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2480 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2481 skb->len, skb->priority);
2483 /* Stop if priority has changed */
2484 if (skb->priority < priority)
2487 skb = skb_dequeue(&chan->data_q);
2489 hci_conn_enter_active_mode(chan->conn,
2490 bt_cb(skb)->force_active);
2492 hci_send_frame(skb);
2493 hdev->acl_last_tx = jiffies;
2501 if (cnt != hdev->acl_cnt)
2502 hci_prio_recalculate(hdev, ACL_LINK);
2505 static void hci_sched_acl_blk(struct hci_dev *hdev)
2507 unsigned int cnt = hdev->block_cnt;
2508 struct hci_chan *chan;
2509 struct sk_buff *skb;
2512 __check_timeout(hdev, cnt);
2514 while (hdev->block_cnt > 0 &&
2515 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2516 u32 priority = (skb_peek(&chan->data_q))->priority;
2517 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2520 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2521 skb->len, skb->priority);
2523 /* Stop if priority has changed */
2524 if (skb->priority < priority)
2527 skb = skb_dequeue(&chan->data_q);
2529 blocks = __get_blocks(hdev, skb);
2530 if (blocks > hdev->block_cnt)
2533 hci_conn_enter_active_mode(chan->conn,
2534 bt_cb(skb)->force_active);
2536 hci_send_frame(skb);
2537 hdev->acl_last_tx = jiffies;
2539 hdev->block_cnt -= blocks;
2542 chan->sent += blocks;
2543 chan->conn->sent += blocks;
2547 if (cnt != hdev->block_cnt)
2548 hci_prio_recalculate(hdev, ACL_LINK);
2551 static void hci_sched_acl(struct hci_dev *hdev)
2553 BT_DBG("%s", hdev->name);
2555 if (!hci_conn_num(hdev, ACL_LINK))
2558 switch (hdev->flow_ctl_mode) {
2559 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2560 hci_sched_acl_pkt(hdev);
2563 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2564 hci_sched_acl_blk(hdev);
2570 static void hci_sched_sco(struct hci_dev *hdev)
2572 struct hci_conn *conn;
2573 struct sk_buff *skb;
2576 BT_DBG("%s", hdev->name);
2578 if (!hci_conn_num(hdev, SCO_LINK))
2581 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
2582 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2583 BT_DBG("skb %p len %d", skb, skb->len);
2584 hci_send_frame(skb);
2587 if (conn->sent == ~0)
2593 static void hci_sched_esco(struct hci_dev *hdev)
2595 struct hci_conn *conn;
2596 struct sk_buff *skb;
2599 BT_DBG("%s", hdev->name);
2601 if (!hci_conn_num(hdev, ESCO_LINK))
2604 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2606 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2607 BT_DBG("skb %p len %d", skb, skb->len);
2608 hci_send_frame(skb);
2611 if (conn->sent == ~0)
2617 static void hci_sched_le(struct hci_dev *hdev)
2619 struct hci_chan *chan;
2620 struct sk_buff *skb;
2621 int quote, cnt, tmp;
2623 BT_DBG("%s", hdev->name);
2625 if (!hci_conn_num(hdev, LE_LINK))
2628 if (!test_bit(HCI_RAW, &hdev->flags)) {
2629 /* LE tx timeout must be longer than maximum
2630 * link supervision timeout (40.9 seconds) */
2631 if (!hdev->le_cnt && hdev->le_pkts &&
2632 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2633 hci_link_tx_to(hdev, LE_LINK);
2636 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2638 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
2639 u32 priority = (skb_peek(&chan->data_q))->priority;
2640 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2641 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2642 skb->len, skb->priority);
2644 /* Stop if priority has changed */
2645 if (skb->priority < priority)
2648 skb = skb_dequeue(&chan->data_q);
2650 hci_send_frame(skb);
2651 hdev->le_last_tx = jiffies;
2662 hdev->acl_cnt = cnt;
2665 hci_prio_recalculate(hdev, LE_LINK);
2668 static void hci_tx_work(struct work_struct *work)
2670 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2671 struct sk_buff *skb;
2673 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2674 hdev->sco_cnt, hdev->le_cnt);
2676 /* Schedule queues and send stuff to HCI driver */
2678 hci_sched_acl(hdev);
2680 hci_sched_sco(hdev);
2682 hci_sched_esco(hdev);
2686 /* Send next queued raw (unknown type) packet */
2687 while ((skb = skb_dequeue(&hdev->raw_q)))
2688 hci_send_frame(skb);
2691 /* ----- HCI RX task (incoming data processing) ----- */
2693 /* ACL data packet */
2694 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2696 struct hci_acl_hdr *hdr = (void *) skb->data;
2697 struct hci_conn *conn;
2698 __u16 handle, flags;
2700 skb_pull(skb, HCI_ACL_HDR_SIZE);
2702 handle = __le16_to_cpu(hdr->handle);
2703 flags = hci_flags(handle);
2704 handle = hci_handle(handle);
2706 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2709 hdev->stat.acl_rx++;
2712 conn = hci_conn_hash_lookup_handle(hdev, handle);
2713 hci_dev_unlock(hdev);
2716 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2719 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2720 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2721 mgmt_device_connected(hdev, &conn->dst, conn->type,
2722 conn->dst_type, 0, NULL, 0,
2724 hci_dev_unlock(hdev);
2726 /* Send to upper protocol */
2727 l2cap_recv_acldata(conn, skb, flags);
2730 BT_ERR("%s ACL packet for unknown connection handle %d",
2731 hdev->name, handle);
2737 /* SCO data packet */
2738 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2740 struct hci_sco_hdr *hdr = (void *) skb->data;
2741 struct hci_conn *conn;
2744 skb_pull(skb, HCI_SCO_HDR_SIZE);
2746 handle = __le16_to_cpu(hdr->handle);
2748 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2750 hdev->stat.sco_rx++;
2753 conn = hci_conn_hash_lookup_handle(hdev, handle);
2754 hci_dev_unlock(hdev);
2757 /* Send to upper protocol */
2758 sco_recv_scodata(conn, skb);
2761 BT_ERR("%s SCO packet for unknown connection handle %d",
2762 hdev->name, handle);
2768 static void hci_rx_work(struct work_struct *work)
2770 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2771 struct sk_buff *skb;
2773 BT_DBG("%s", hdev->name);
2775 while ((skb = skb_dequeue(&hdev->rx_q))) {
2776 /* Send copy to monitor */
2777 hci_send_to_monitor(hdev, skb);
2779 if (atomic_read(&hdev->promisc)) {
2780 /* Send copy to the sockets */
2781 hci_send_to_sock(hdev, skb);
2784 if (test_bit(HCI_RAW, &hdev->flags)) {
2789 if (test_bit(HCI_INIT, &hdev->flags)) {
2790 /* Don't process data packets in this states. */
2791 switch (bt_cb(skb)->pkt_type) {
2792 case HCI_ACLDATA_PKT:
2793 case HCI_SCODATA_PKT:
2800 switch (bt_cb(skb)->pkt_type) {
2802 BT_DBG("%s Event packet", hdev->name);
2803 hci_event_packet(hdev, skb);
2806 case HCI_ACLDATA_PKT:
2807 BT_DBG("%s ACL data packet", hdev->name);
2808 hci_acldata_packet(hdev, skb);
2811 case HCI_SCODATA_PKT:
2812 BT_DBG("%s SCO data packet", hdev->name);
2813 hci_scodata_packet(hdev, skb);
2823 static void hci_cmd_work(struct work_struct *work)
2825 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2826 struct sk_buff *skb;
2828 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2829 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2831 /* Send queued commands */
2832 if (atomic_read(&hdev->cmd_cnt)) {
2833 skb = skb_dequeue(&hdev->cmd_q);
2837 kfree_skb(hdev->sent_cmd);
2839 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2840 if (hdev->sent_cmd) {
2841 atomic_dec(&hdev->cmd_cnt);
2842 hci_send_frame(skb);
2843 if (test_bit(HCI_RESET, &hdev->flags))
2844 del_timer(&hdev->cmd_timer);
2846 mod_timer(&hdev->cmd_timer,
2847 jiffies + HCI_CMD_TIMEOUT);
2849 skb_queue_head(&hdev->cmd_q, skb);
2850 queue_work(hdev->workqueue, &hdev->cmd_work);
2855 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2857 /* General inquiry access code (GIAC) */
2858 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2859 struct hci_cp_inquiry cp;
2861 BT_DBG("%s", hdev->name);
2863 if (test_bit(HCI_INQUIRY, &hdev->flags))
2864 return -EINPROGRESS;
2866 inquiry_cache_flush(hdev);
2868 memset(&cp, 0, sizeof(cp));
2869 memcpy(&cp.lap, lap, sizeof(cp.lap));
2872 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2875 int hci_cancel_inquiry(struct hci_dev *hdev)
2877 BT_DBG("%s", hdev->name);
2879 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2882 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2885 u8 bdaddr_to_le(u8 bdaddr_type)
2887 switch (bdaddr_type) {
2888 case BDADDR_LE_PUBLIC:
2889 return ADDR_LE_DEV_PUBLIC;
2892 /* Fallback to LE Random address type */
2893 return ADDR_LE_DEV_RANDOM;