2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
92 hdev->recv_evt = NULL;
97 return ERR_PTR(-ENODATA);
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
108 if (hdr->evt != event)
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
126 if (opcode == __le16_to_cpu(ev->opcode))
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
134 return ERR_PTR(-ENODATA);
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138 const void *param, u8 event, u32 timeout)
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
144 BT_DBG("%s", hdev->name);
146 hci_req_init(&req, hdev);
148 hci_req_add_ev(&req, opcode, plen, param, event);
150 hdev->req_status = HCI_REQ_PEND;
152 err = hci_req_run(&req, hci_req_sync_complete);
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
159 schedule_timeout(timeout);
161 remove_wait_queue(&hdev->req_wait_q, &wait);
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
166 switch (hdev->req_status) {
168 err = -bt_to_errno(hdev->req_result);
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
180 hdev->req_status = hdev->req_result = 0;
182 BT_DBG("%s end: err %d", hdev->name, err);
187 return hci_get_cmd_complete(hdev, opcode, event);
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192 const void *param, u32 timeout)
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
196 EXPORT_SYMBOL(__hci_cmd_sync);
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200 void (*func)(struct hci_request *req,
202 unsigned long opt, __u32 timeout)
204 struct hci_request req;
205 DECLARE_WAITQUEUE(wait, current);
208 BT_DBG("%s start", hdev->name);
210 hci_req_init(&req, hdev);
212 hdev->req_status = HCI_REQ_PEND;
216 err = hci_req_run(&req, hci_req_sync_complete);
218 hdev->req_status = 0;
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
234 schedule_timeout(timeout);
236 remove_wait_queue(&hdev->req_wait_q, &wait);
238 if (signal_pending(current))
241 switch (hdev->req_status) {
243 err = -bt_to_errno(hdev->req_result);
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
255 hdev->req_status = hdev->req_result = 0;
257 BT_DBG("%s end: err %d", hdev->name, err);
262 static int hci_req_sync(struct hci_dev *hdev,
263 void (*req)(struct hci_request *req,
265 unsigned long opt, __u32 timeout)
269 if (!test_bit(HCI_UP, &hdev->flags))
272 /* Serialize all requests */
274 ret = __hci_req_sync(hdev, req, opt, timeout);
275 hci_req_unlock(hdev);
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
282 BT_DBG("%s %ld", req->hdev->name, opt);
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
289 static void bredr_init(struct hci_request *req)
291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
293 /* Read Local Supported Features */
294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
296 /* Read Local Version */
297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
299 /* Read BD Address */
300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
303 static void amp_init(struct hci_request *req)
305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
307 /* Read Local Version */
308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
310 /* Read Local AMP Info */
311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
313 /* Read Data Blk size */
314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
319 struct hci_dev *hdev = req->hdev;
321 BT_DBG("%s %ld", hdev->name, opt);
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325 hci_reset_req(req, 0);
327 switch (hdev->dev_type) {
337 BT_ERR("Unknown device type %d", hdev->dev_type);
342 static void bredr_setup(struct hci_request *req)
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
350 /* Read Class of Device */
351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
353 /* Read Local Name */
354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
356 /* Read Voice Setting */
357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
374 static void le_setup(struct hci_request *req)
376 struct hci_dev *hdev = req->hdev;
378 /* Read LE Buffer Size */
379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
381 /* Read LE Local Supported Features */
382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
384 /* Read LE Advertising Channel TX Power */
385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
387 /* Read LE White List Size */
388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
390 /* Read LE Supported States */
391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
400 if (lmp_ext_inq_capable(hdev))
403 if (lmp_inq_rssi_capable(hdev))
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
426 static void hci_setup_inquiry_mode(struct hci_request *req)
430 mode = hci_get_inquiry_mode(req->hdev);
432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
435 static void hci_setup_event_mask(struct hci_request *req)
437 struct hci_dev *hdev = req->hdev;
439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
459 if (lmp_inq_rssi_capable(hdev))
460 events[4] |= 0x02; /* Inquiry Result with RSSI */
462 if (lmp_sniffsubr_capable(hdev))
463 events[5] |= 0x20; /* Sniff Subrating */
465 if (lmp_pause_enc_capable(hdev))
466 events[5] |= 0x80; /* Encryption Key Refresh Complete */
468 if (lmp_ext_inq_capable(hdev))
469 events[5] |= 0x40; /* Extended Inquiry Result */
471 if (lmp_no_flush_capable(hdev))
472 events[7] |= 0x01; /* Enhanced Flush Complete */
474 if (lmp_lsto_capable(hdev))
475 events[6] |= 0x80; /* Link Supervision Timeout Changed */
477 if (lmp_ssp_capable(hdev)) {
478 events[6] |= 0x01; /* IO Capability Request */
479 events[6] |= 0x02; /* IO Capability Response */
480 events[6] |= 0x04; /* User Confirmation Request */
481 events[6] |= 0x08; /* User Passkey Request */
482 events[6] |= 0x10; /* Remote OOB Data Request */
483 events[6] |= 0x20; /* Simple Pairing Complete */
484 events[7] |= 0x04; /* User Passkey Notification */
485 events[7] |= 0x08; /* Keypress Notification */
486 events[7] |= 0x10; /* Remote Host Supported
487 * Features Notification
491 if (lmp_le_capable(hdev))
492 events[7] |= 0x20; /* LE Meta-Event */
494 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
496 if (lmp_le_capable(hdev)) {
497 memset(events, 0, sizeof(events));
499 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
500 sizeof(events), events);
504 static void hci_init2_req(struct hci_request *req, unsigned long opt)
506 struct hci_dev *hdev = req->hdev;
508 if (lmp_bredr_capable(hdev))
511 if (lmp_le_capable(hdev))
514 hci_setup_event_mask(req);
516 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
517 * local supported commands HCI command.
519 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
520 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
522 if (lmp_ssp_capable(hdev)) {
523 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
525 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
526 sizeof(mode), &mode);
528 struct hci_cp_write_eir cp;
530 memset(hdev->eir, 0, sizeof(hdev->eir));
531 memset(&cp, 0, sizeof(cp));
533 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
537 if (lmp_inq_rssi_capable(hdev))
538 hci_setup_inquiry_mode(req);
540 if (lmp_inq_tx_pwr_capable(hdev))
541 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
543 if (lmp_ext_feat_capable(hdev)) {
544 struct hci_cp_read_local_ext_features cp;
547 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
551 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
553 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
558 static void hci_setup_link_policy(struct hci_request *req)
560 struct hci_dev *hdev = req->hdev;
561 struct hci_cp_write_def_link_policy cp;
564 if (lmp_rswitch_capable(hdev))
565 link_policy |= HCI_LP_RSWITCH;
566 if (lmp_hold_capable(hdev))
567 link_policy |= HCI_LP_HOLD;
568 if (lmp_sniff_capable(hdev))
569 link_policy |= HCI_LP_SNIFF;
570 if (lmp_park_capable(hdev))
571 link_policy |= HCI_LP_PARK;
573 cp.policy = cpu_to_le16(link_policy);
574 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
577 static void hci_set_le_support(struct hci_request *req)
579 struct hci_dev *hdev = req->hdev;
580 struct hci_cp_write_le_host_supported cp;
582 /* LE-only devices do not support explicit enablement */
583 if (!lmp_bredr_capable(hdev))
586 memset(&cp, 0, sizeof(cp));
588 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
590 cp.simul = lmp_le_br_capable(hdev);
593 if (cp.le != lmp_host_le_capable(hdev))
594 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
598 static void hci_init3_req(struct hci_request *req, unsigned long opt)
600 struct hci_dev *hdev = req->hdev;
603 /* Only send HCI_Delete_Stored_Link_Key if it is supported */
604 if (hdev->commands[6] & 0x80) {
605 struct hci_cp_delete_stored_link_key cp;
607 bacpy(&cp.bdaddr, BDADDR_ANY);
608 cp.delete_all = 0x01;
609 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
613 if (hdev->commands[5] & 0x10)
614 hci_setup_link_policy(req);
616 if (lmp_le_capable(hdev)) {
617 hci_set_le_support(req);
621 /* Read features beyond page 1 if available */
622 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
623 struct hci_cp_read_local_ext_features cp;
626 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
631 static int __hci_init(struct hci_dev *hdev)
635 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
639 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
640 * BR/EDR/LE type controllers. AMP controllers only need the
643 if (hdev->dev_type != HCI_BREDR)
646 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
650 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
653 static void hci_scan_req(struct hci_request *req, unsigned long opt)
657 BT_DBG("%s %x", req->hdev->name, scan);
659 /* Inquiry and Page scans */
660 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
663 static void hci_auth_req(struct hci_request *req, unsigned long opt)
667 BT_DBG("%s %x", req->hdev->name, auth);
670 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
673 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
677 BT_DBG("%s %x", req->hdev->name, encrypt);
680 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
683 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
685 __le16 policy = cpu_to_le16(opt);
687 BT_DBG("%s %x", req->hdev->name, policy);
689 /* Default link policy */
690 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
693 /* Get HCI device by index.
694 * Device is held on return. */
695 struct hci_dev *hci_dev_get(int index)
697 struct hci_dev *hdev = NULL, *d;
704 read_lock(&hci_dev_list_lock);
705 list_for_each_entry(d, &hci_dev_list, list) {
706 if (d->id == index) {
707 hdev = hci_dev_hold(d);
711 read_unlock(&hci_dev_list_lock);
715 /* ---- Inquiry support ---- */
717 bool hci_discovery_active(struct hci_dev *hdev)
719 struct discovery_state *discov = &hdev->discovery;
721 switch (discov->state) {
722 case DISCOVERY_FINDING:
723 case DISCOVERY_RESOLVING:
731 void hci_discovery_set_state(struct hci_dev *hdev, int state)
733 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
735 if (hdev->discovery.state == state)
739 case DISCOVERY_STOPPED:
740 if (hdev->discovery.state != DISCOVERY_STARTING)
741 mgmt_discovering(hdev, 0);
743 case DISCOVERY_STARTING:
745 case DISCOVERY_FINDING:
746 mgmt_discovering(hdev, 1);
748 case DISCOVERY_RESOLVING:
750 case DISCOVERY_STOPPING:
754 hdev->discovery.state = state;
757 static void inquiry_cache_flush(struct hci_dev *hdev)
759 struct discovery_state *cache = &hdev->discovery;
760 struct inquiry_entry *p, *n;
762 list_for_each_entry_safe(p, n, &cache->all, all) {
767 INIT_LIST_HEAD(&cache->unknown);
768 INIT_LIST_HEAD(&cache->resolve);
771 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
774 struct discovery_state *cache = &hdev->discovery;
775 struct inquiry_entry *e;
777 BT_DBG("cache %p, %pMR", cache, bdaddr);
779 list_for_each_entry(e, &cache->all, all) {
780 if (!bacmp(&e->data.bdaddr, bdaddr))
787 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
790 struct discovery_state *cache = &hdev->discovery;
791 struct inquiry_entry *e;
793 BT_DBG("cache %p, %pMR", cache, bdaddr);
795 list_for_each_entry(e, &cache->unknown, list) {
796 if (!bacmp(&e->data.bdaddr, bdaddr))
803 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
807 struct discovery_state *cache = &hdev->discovery;
808 struct inquiry_entry *e;
810 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
812 list_for_each_entry(e, &cache->resolve, list) {
813 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
815 if (!bacmp(&e->data.bdaddr, bdaddr))
822 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
823 struct inquiry_entry *ie)
825 struct discovery_state *cache = &hdev->discovery;
826 struct list_head *pos = &cache->resolve;
827 struct inquiry_entry *p;
831 list_for_each_entry(p, &cache->resolve, list) {
832 if (p->name_state != NAME_PENDING &&
833 abs(p->data.rssi) >= abs(ie->data.rssi))
838 list_add(&ie->list, pos);
841 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
842 bool name_known, bool *ssp)
844 struct discovery_state *cache = &hdev->discovery;
845 struct inquiry_entry *ie;
847 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
849 hci_remove_remote_oob_data(hdev, &data->bdaddr);
852 *ssp = data->ssp_mode;
854 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
856 if (ie->data.ssp_mode && ssp)
859 if (ie->name_state == NAME_NEEDED &&
860 data->rssi != ie->data.rssi) {
861 ie->data.rssi = data->rssi;
862 hci_inquiry_cache_update_resolve(hdev, ie);
868 /* Entry not in the cache. Add new one. */
869 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
873 list_add(&ie->all, &cache->all);
876 ie->name_state = NAME_KNOWN;
878 ie->name_state = NAME_NOT_KNOWN;
879 list_add(&ie->list, &cache->unknown);
883 if (name_known && ie->name_state != NAME_KNOWN &&
884 ie->name_state != NAME_PENDING) {
885 ie->name_state = NAME_KNOWN;
889 memcpy(&ie->data, data, sizeof(*data));
890 ie->timestamp = jiffies;
891 cache->timestamp = jiffies;
893 if (ie->name_state == NAME_NOT_KNOWN)
899 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
901 struct discovery_state *cache = &hdev->discovery;
902 struct inquiry_info *info = (struct inquiry_info *) buf;
903 struct inquiry_entry *e;
906 list_for_each_entry(e, &cache->all, all) {
907 struct inquiry_data *data = &e->data;
912 bacpy(&info->bdaddr, &data->bdaddr);
913 info->pscan_rep_mode = data->pscan_rep_mode;
914 info->pscan_period_mode = data->pscan_period_mode;
915 info->pscan_mode = data->pscan_mode;
916 memcpy(info->dev_class, data->dev_class, 3);
917 info->clock_offset = data->clock_offset;
923 BT_DBG("cache %p, copied %d", cache, copied);
927 static void hci_inq_req(struct hci_request *req, unsigned long opt)
929 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
930 struct hci_dev *hdev = req->hdev;
931 struct hci_cp_inquiry cp;
933 BT_DBG("%s", hdev->name);
935 if (test_bit(HCI_INQUIRY, &hdev->flags))
939 memcpy(&cp.lap, &ir->lap, 3);
940 cp.length = ir->length;
941 cp.num_rsp = ir->num_rsp;
942 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
945 static int wait_inquiry(void *word)
948 return signal_pending(current);
951 int hci_inquiry(void __user *arg)
953 __u8 __user *ptr = arg;
954 struct hci_inquiry_req ir;
955 struct hci_dev *hdev;
956 int err = 0, do_inquiry = 0, max_rsp;
960 if (copy_from_user(&ir, ptr, sizeof(ir)))
963 hdev = hci_dev_get(ir.dev_id);
968 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
969 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
970 inquiry_cache_flush(hdev);
973 hci_dev_unlock(hdev);
975 timeo = ir.length * msecs_to_jiffies(2000);
978 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
983 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
984 * cleared). If it is interrupted by a signal, return -EINTR.
986 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
991 /* for unlimited number of responses we will use buffer with
994 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
996 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
997 * copy it to the user space.
999 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1006 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1007 hci_dev_unlock(hdev);
1009 BT_DBG("num_rsp %d", ir.num_rsp);
1011 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1013 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1026 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1028 u8 ad_len = 0, flags = 0;
1031 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1032 flags |= LE_AD_GENERAL;
1034 if (!lmp_bredr_capable(hdev))
1035 flags |= LE_AD_NO_BREDR;
1037 if (lmp_le_br_capable(hdev))
1038 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1040 if (lmp_host_le_br_capable(hdev))
1041 flags |= LE_AD_SIM_LE_BREDR_HOST;
1044 BT_DBG("adv flags 0x%02x", flags);
1054 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1056 ptr[1] = EIR_TX_POWER;
1057 ptr[2] = (u8) hdev->adv_tx_power;
1063 name_len = strlen(hdev->dev_name);
1065 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1067 if (name_len > max_len) {
1069 ptr[1] = EIR_NAME_SHORT;
1071 ptr[1] = EIR_NAME_COMPLETE;
1073 ptr[0] = name_len + 1;
1075 memcpy(ptr + 2, hdev->dev_name, name_len);
1077 ad_len += (name_len + 2);
1078 ptr += (name_len + 2);
1084 void hci_update_ad(struct hci_request *req)
1086 struct hci_dev *hdev = req->hdev;
1087 struct hci_cp_le_set_adv_data cp;
1090 if (!lmp_le_capable(hdev))
1093 memset(&cp, 0, sizeof(cp));
1095 len = create_ad(hdev, cp.data);
1097 if (hdev->adv_data_len == len &&
1098 memcmp(cp.data, hdev->adv_data, len) == 0)
1101 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1102 hdev->adv_data_len = len;
1106 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1109 /* ---- HCI ioctl helpers ---- */
1111 int hci_dev_open(__u16 dev)
1113 struct hci_dev *hdev;
1116 hdev = hci_dev_get(dev);
1120 BT_DBG("%s %p", hdev->name, hdev);
1124 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1129 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1134 if (test_bit(HCI_UP, &hdev->flags)) {
1139 if (hdev->open(hdev)) {
1144 atomic_set(&hdev->cmd_cnt, 1);
1145 set_bit(HCI_INIT, &hdev->flags);
1147 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1148 ret = hdev->setup(hdev);
1151 /* Treat all non BR/EDR controllers as raw devices if
1152 * enable_hs is not set.
1154 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1155 set_bit(HCI_RAW, &hdev->flags);
1157 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1158 set_bit(HCI_RAW, &hdev->flags);
1160 if (!test_bit(HCI_RAW, &hdev->flags))
1161 ret = __hci_init(hdev);
1164 clear_bit(HCI_INIT, &hdev->flags);
1168 set_bit(HCI_UP, &hdev->flags);
1169 hci_notify(hdev, HCI_DEV_UP);
1170 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1171 mgmt_valid_hdev(hdev)) {
1173 mgmt_powered(hdev, 1);
1174 hci_dev_unlock(hdev);
1177 /* Init failed, cleanup */
1178 flush_work(&hdev->tx_work);
1179 flush_work(&hdev->cmd_work);
1180 flush_work(&hdev->rx_work);
1182 skb_queue_purge(&hdev->cmd_q);
1183 skb_queue_purge(&hdev->rx_q);
1188 if (hdev->sent_cmd) {
1189 kfree_skb(hdev->sent_cmd);
1190 hdev->sent_cmd = NULL;
1198 hci_req_unlock(hdev);
1203 static int hci_dev_do_close(struct hci_dev *hdev)
1205 BT_DBG("%s %p", hdev->name, hdev);
1207 cancel_work_sync(&hdev->le_scan);
1209 cancel_delayed_work(&hdev->power_off);
1211 hci_req_cancel(hdev, ENODEV);
1214 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1215 del_timer_sync(&hdev->cmd_timer);
1216 hci_req_unlock(hdev);
1220 /* Flush RX and TX works */
1221 flush_work(&hdev->tx_work);
1222 flush_work(&hdev->rx_work);
1224 if (hdev->discov_timeout > 0) {
1225 cancel_delayed_work(&hdev->discov_off);
1226 hdev->discov_timeout = 0;
1227 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1230 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1231 cancel_delayed_work(&hdev->service_cache);
1233 cancel_delayed_work_sync(&hdev->le_scan_disable);
1236 inquiry_cache_flush(hdev);
1237 hci_conn_hash_flush(hdev);
1238 hci_dev_unlock(hdev);
1240 hci_notify(hdev, HCI_DEV_DOWN);
1246 skb_queue_purge(&hdev->cmd_q);
1247 atomic_set(&hdev->cmd_cnt, 1);
1248 if (!test_bit(HCI_RAW, &hdev->flags) &&
1249 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1250 set_bit(HCI_INIT, &hdev->flags);
1251 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1252 clear_bit(HCI_INIT, &hdev->flags);
1255 /* flush cmd work */
1256 flush_work(&hdev->cmd_work);
1259 skb_queue_purge(&hdev->rx_q);
1260 skb_queue_purge(&hdev->cmd_q);
1261 skb_queue_purge(&hdev->raw_q);
1263 /* Drop last sent command */
1264 if (hdev->sent_cmd) {
1265 del_timer_sync(&hdev->cmd_timer);
1266 kfree_skb(hdev->sent_cmd);
1267 hdev->sent_cmd = NULL;
1270 kfree_skb(hdev->recv_evt);
1271 hdev->recv_evt = NULL;
1273 /* After this point our queues are empty
1274 * and no tasks are scheduled. */
1279 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1281 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1282 mgmt_valid_hdev(hdev)) {
1284 mgmt_powered(hdev, 0);
1285 hci_dev_unlock(hdev);
1288 /* Controller radio is available but is currently powered down */
1289 hdev->amp_status = 0;
1291 memset(hdev->eir, 0, sizeof(hdev->eir));
1292 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1294 hci_req_unlock(hdev);
1300 int hci_dev_close(__u16 dev)
1302 struct hci_dev *hdev;
1305 hdev = hci_dev_get(dev);
1309 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1310 cancel_delayed_work(&hdev->power_off);
1312 err = hci_dev_do_close(hdev);
1318 int hci_dev_reset(__u16 dev)
1320 struct hci_dev *hdev;
1323 hdev = hci_dev_get(dev);
1329 if (!test_bit(HCI_UP, &hdev->flags))
1333 skb_queue_purge(&hdev->rx_q);
1334 skb_queue_purge(&hdev->cmd_q);
1337 inquiry_cache_flush(hdev);
1338 hci_conn_hash_flush(hdev);
1339 hci_dev_unlock(hdev);
1344 atomic_set(&hdev->cmd_cnt, 1);
1345 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1347 if (!test_bit(HCI_RAW, &hdev->flags))
1348 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1351 hci_req_unlock(hdev);
1356 int hci_dev_reset_stat(__u16 dev)
1358 struct hci_dev *hdev;
1361 hdev = hci_dev_get(dev);
1365 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1372 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1374 struct hci_dev *hdev;
1375 struct hci_dev_req dr;
1378 if (copy_from_user(&dr, arg, sizeof(dr)))
1381 hdev = hci_dev_get(dr.dev_id);
1387 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1392 if (!lmp_encrypt_capable(hdev)) {
1397 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1398 /* Auth must be enabled first */
1399 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1405 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1410 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1415 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1419 case HCISETLINKMODE:
1420 hdev->link_mode = ((__u16) dr.dev_opt) &
1421 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1425 hdev->pkt_type = (__u16) dr.dev_opt;
1429 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1430 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1434 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1435 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1447 int hci_get_dev_list(void __user *arg)
1449 struct hci_dev *hdev;
1450 struct hci_dev_list_req *dl;
1451 struct hci_dev_req *dr;
1452 int n = 0, size, err;
1455 if (get_user(dev_num, (__u16 __user *) arg))
1458 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1461 size = sizeof(*dl) + dev_num * sizeof(*dr);
1463 dl = kzalloc(size, GFP_KERNEL);
1469 read_lock(&hci_dev_list_lock);
1470 list_for_each_entry(hdev, &hci_dev_list, list) {
1471 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1472 cancel_delayed_work(&hdev->power_off);
1474 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1475 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1477 (dr + n)->dev_id = hdev->id;
1478 (dr + n)->dev_opt = hdev->flags;
1483 read_unlock(&hci_dev_list_lock);
1486 size = sizeof(*dl) + n * sizeof(*dr);
1488 err = copy_to_user(arg, dl, size);
1491 return err ? -EFAULT : 0;
1494 int hci_get_dev_info(void __user *arg)
1496 struct hci_dev *hdev;
1497 struct hci_dev_info di;
1500 if (copy_from_user(&di, arg, sizeof(di)))
1503 hdev = hci_dev_get(di.dev_id);
1507 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1508 cancel_delayed_work_sync(&hdev->power_off);
1510 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1511 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1513 strcpy(di.name, hdev->name);
1514 di.bdaddr = hdev->bdaddr;
1515 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1516 di.flags = hdev->flags;
1517 di.pkt_type = hdev->pkt_type;
1518 if (lmp_bredr_capable(hdev)) {
1519 di.acl_mtu = hdev->acl_mtu;
1520 di.acl_pkts = hdev->acl_pkts;
1521 di.sco_mtu = hdev->sco_mtu;
1522 di.sco_pkts = hdev->sco_pkts;
1524 di.acl_mtu = hdev->le_mtu;
1525 di.acl_pkts = hdev->le_pkts;
1529 di.link_policy = hdev->link_policy;
1530 di.link_mode = hdev->link_mode;
1532 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1533 memcpy(&di.features, &hdev->features, sizeof(di.features));
1535 if (copy_to_user(arg, &di, sizeof(di)))
1543 /* ---- Interface to HCI drivers ---- */
1545 static int hci_rfkill_set_block(void *data, bool blocked)
1547 struct hci_dev *hdev = data;
1549 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1554 hci_dev_do_close(hdev);
1559 static const struct rfkill_ops hci_rfkill_ops = {
1560 .set_block = hci_rfkill_set_block,
1563 static void hci_power_on(struct work_struct *work)
1565 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1568 BT_DBG("%s", hdev->name);
1570 err = hci_dev_open(hdev->id);
1572 mgmt_set_powered_failed(hdev, err);
1576 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1577 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1578 HCI_AUTO_OFF_TIMEOUT);
1580 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1581 mgmt_index_added(hdev);
1584 static void hci_power_off(struct work_struct *work)
1586 struct hci_dev *hdev = container_of(work, struct hci_dev,
1589 BT_DBG("%s", hdev->name);
1591 hci_dev_do_close(hdev);
1594 static void hci_discov_off(struct work_struct *work)
1596 struct hci_dev *hdev;
1597 u8 scan = SCAN_PAGE;
1599 hdev = container_of(work, struct hci_dev, discov_off.work);
1601 BT_DBG("%s", hdev->name);
1605 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1607 hdev->discov_timeout = 0;
1609 hci_dev_unlock(hdev);
1612 int hci_uuids_clear(struct hci_dev *hdev)
1614 struct bt_uuid *uuid, *tmp;
1616 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1617 list_del(&uuid->list);
1624 int hci_link_keys_clear(struct hci_dev *hdev)
1626 struct list_head *p, *n;
1628 list_for_each_safe(p, n, &hdev->link_keys) {
1629 struct link_key *key;
1631 key = list_entry(p, struct link_key, list);
1640 int hci_smp_ltks_clear(struct hci_dev *hdev)
1642 struct smp_ltk *k, *tmp;
1644 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1652 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1656 list_for_each_entry(k, &hdev->link_keys, list)
1657 if (bacmp(bdaddr, &k->bdaddr) == 0)
1663 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1664 u8 key_type, u8 old_key_type)
1667 if (key_type < 0x03)
1670 /* Debug keys are insecure so don't store them persistently */
1671 if (key_type == HCI_LK_DEBUG_COMBINATION)
1674 /* Changed combination key and there's no previous one */
1675 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1678 /* Security mode 3 case */
1682 /* Neither local nor remote side had no-bonding as requirement */
1683 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1686 /* Local side had dedicated bonding as requirement */
1687 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1690 /* Remote side had dedicated bonding as requirement */
1691 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1694 /* If none of the above criteria match, then don't store the key
1699 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1703 list_for_each_entry(k, &hdev->long_term_keys, list) {
1704 if (k->ediv != ediv ||
1705 memcmp(rand, k->rand, sizeof(k->rand)))
1714 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1719 list_for_each_entry(k, &hdev->long_term_keys, list)
1720 if (addr_type == k->bdaddr_type &&
1721 bacmp(bdaddr, &k->bdaddr) == 0)
1727 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1728 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1730 struct link_key *key, *old_key;
1734 old_key = hci_find_link_key(hdev, bdaddr);
1736 old_key_type = old_key->type;
1739 old_key_type = conn ? conn->key_type : 0xff;
1740 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1743 list_add(&key->list, &hdev->link_keys);
1746 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1748 /* Some buggy controller combinations generate a changed
1749 * combination key for legacy pairing even when there's no
1751 if (type == HCI_LK_CHANGED_COMBINATION &&
1752 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1753 type = HCI_LK_COMBINATION;
1755 conn->key_type = type;
1758 bacpy(&key->bdaddr, bdaddr);
1759 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1760 key->pin_len = pin_len;
1762 if (type == HCI_LK_CHANGED_COMBINATION)
1763 key->type = old_key_type;
1770 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1772 mgmt_new_link_key(hdev, key, persistent);
1775 conn->flush_key = !persistent;
1780 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1781 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1784 struct smp_ltk *key, *old_key;
1786 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1789 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1793 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1796 list_add(&key->list, &hdev->long_term_keys);
1799 bacpy(&key->bdaddr, bdaddr);
1800 key->bdaddr_type = addr_type;
1801 memcpy(key->val, tk, sizeof(key->val));
1802 key->authenticated = authenticated;
1804 key->enc_size = enc_size;
1806 memcpy(key->rand, rand, sizeof(key->rand));
1811 if (type & HCI_SMP_LTK)
1812 mgmt_new_ltk(hdev, key, 1);
1817 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1819 struct link_key *key;
1821 key = hci_find_link_key(hdev, bdaddr);
1825 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1827 list_del(&key->list);
1833 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1835 struct smp_ltk *k, *tmp;
1837 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1838 if (bacmp(bdaddr, &k->bdaddr))
1841 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1850 /* HCI command timer function */
1851 static void hci_cmd_timeout(unsigned long arg)
1853 struct hci_dev *hdev = (void *) arg;
1855 if (hdev->sent_cmd) {
1856 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1857 u16 opcode = __le16_to_cpu(sent->opcode);
1859 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1861 BT_ERR("%s command tx timeout", hdev->name);
1864 atomic_set(&hdev->cmd_cnt, 1);
1865 queue_work(hdev->workqueue, &hdev->cmd_work);
1868 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1871 struct oob_data *data;
1873 list_for_each_entry(data, &hdev->remote_oob_data, list)
1874 if (bacmp(bdaddr, &data->bdaddr) == 0)
1880 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1882 struct oob_data *data;
1884 data = hci_find_remote_oob_data(hdev, bdaddr);
1888 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1890 list_del(&data->list);
1896 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1898 struct oob_data *data, *n;
1900 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1901 list_del(&data->list);
1908 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1911 struct oob_data *data;
1913 data = hci_find_remote_oob_data(hdev, bdaddr);
1916 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1920 bacpy(&data->bdaddr, bdaddr);
1921 list_add(&data->list, &hdev->remote_oob_data);
1924 memcpy(data->hash, hash, sizeof(data->hash));
1925 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1927 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1932 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1934 struct bdaddr_list *b;
1936 list_for_each_entry(b, &hdev->blacklist, list)
1937 if (bacmp(bdaddr, &b->bdaddr) == 0)
1943 int hci_blacklist_clear(struct hci_dev *hdev)
1945 struct list_head *p, *n;
1947 list_for_each_safe(p, n, &hdev->blacklist) {
1948 struct bdaddr_list *b;
1950 b = list_entry(p, struct bdaddr_list, list);
1959 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1961 struct bdaddr_list *entry;
1963 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1966 if (hci_blacklist_lookup(hdev, bdaddr))
1969 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1973 bacpy(&entry->bdaddr, bdaddr);
1975 list_add(&entry->list, &hdev->blacklist);
1977 return mgmt_device_blocked(hdev, bdaddr, type);
1980 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1982 struct bdaddr_list *entry;
1984 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1985 return hci_blacklist_clear(hdev);
1987 entry = hci_blacklist_lookup(hdev, bdaddr);
1991 list_del(&entry->list);
1994 return mgmt_device_unblocked(hdev, bdaddr, type);
1997 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1999 struct le_scan_params *param = (struct le_scan_params *) opt;
2000 struct hci_cp_le_set_scan_param cp;
2002 memset(&cp, 0, sizeof(cp));
2003 cp.type = param->type;
2004 cp.interval = cpu_to_le16(param->interval);
2005 cp.window = cpu_to_le16(param->window);
2007 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
2010 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
2012 struct hci_cp_le_set_scan_enable cp;
2014 memset(&cp, 0, sizeof(cp));
2015 cp.enable = LE_SCAN_ENABLE;
2016 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2018 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2021 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
2022 u16 window, int timeout)
2024 long timeo = msecs_to_jiffies(3000);
2025 struct le_scan_params param;
2028 BT_DBG("%s", hdev->name);
2030 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2031 return -EINPROGRESS;
2034 param.interval = interval;
2035 param.window = window;
2039 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) ¶m,
2042 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2044 hci_req_unlock(hdev);
2049 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2055 int hci_cancel_le_scan(struct hci_dev *hdev)
2057 BT_DBG("%s", hdev->name);
2059 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2062 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2063 struct hci_cp_le_set_scan_enable cp;
2065 /* Send HCI command to disable LE Scan */
2066 memset(&cp, 0, sizeof(cp));
2067 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2073 static void le_scan_disable_work(struct work_struct *work)
2075 struct hci_dev *hdev = container_of(work, struct hci_dev,
2076 le_scan_disable.work);
2077 struct hci_cp_le_set_scan_enable cp;
2079 BT_DBG("%s", hdev->name);
2081 memset(&cp, 0, sizeof(cp));
2083 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2086 static void le_scan_work(struct work_struct *work)
2088 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2089 struct le_scan_params *param = &hdev->le_scan_params;
2091 BT_DBG("%s", hdev->name);
2093 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2097 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2100 struct le_scan_params *param = &hdev->le_scan_params;
2102 BT_DBG("%s", hdev->name);
2104 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2107 if (work_busy(&hdev->le_scan))
2108 return -EINPROGRESS;
2111 param->interval = interval;
2112 param->window = window;
2113 param->timeout = timeout;
2115 queue_work(system_long_wq, &hdev->le_scan);
2120 /* Alloc HCI device */
2121 struct hci_dev *hci_alloc_dev(void)
2123 struct hci_dev *hdev;
2125 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2129 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2130 hdev->esco_type = (ESCO_HV1);
2131 hdev->link_mode = (HCI_LM_ACCEPT);
2132 hdev->io_capability = 0x03; /* No Input No Output */
2133 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2134 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2136 hdev->sniff_max_interval = 800;
2137 hdev->sniff_min_interval = 80;
2139 mutex_init(&hdev->lock);
2140 mutex_init(&hdev->req_lock);
2142 INIT_LIST_HEAD(&hdev->mgmt_pending);
2143 INIT_LIST_HEAD(&hdev->blacklist);
2144 INIT_LIST_HEAD(&hdev->uuids);
2145 INIT_LIST_HEAD(&hdev->link_keys);
2146 INIT_LIST_HEAD(&hdev->long_term_keys);
2147 INIT_LIST_HEAD(&hdev->remote_oob_data);
2148 INIT_LIST_HEAD(&hdev->conn_hash.list);
2150 INIT_WORK(&hdev->rx_work, hci_rx_work);
2151 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2152 INIT_WORK(&hdev->tx_work, hci_tx_work);
2153 INIT_WORK(&hdev->power_on, hci_power_on);
2154 INIT_WORK(&hdev->le_scan, le_scan_work);
2156 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2157 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2158 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2160 skb_queue_head_init(&hdev->rx_q);
2161 skb_queue_head_init(&hdev->cmd_q);
2162 skb_queue_head_init(&hdev->raw_q);
2164 init_waitqueue_head(&hdev->req_wait_q);
2166 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2168 hci_init_sysfs(hdev);
2169 discovery_init(hdev);
2173 EXPORT_SYMBOL(hci_alloc_dev);
2175 /* Free HCI device */
2176 void hci_free_dev(struct hci_dev *hdev)
2178 /* will free via device release */
2179 put_device(&hdev->dev);
2181 EXPORT_SYMBOL(hci_free_dev);
2183 /* Register HCI device */
2184 int hci_register_dev(struct hci_dev *hdev)
2188 if (!hdev->open || !hdev->close)
2191 /* Do not allow HCI_AMP devices to register at index 0,
2192 * so the index can be used as the AMP controller ID.
2194 switch (hdev->dev_type) {
2196 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2199 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2208 sprintf(hdev->name, "hci%d", id);
2211 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2213 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2215 if (!hdev->workqueue) {
2220 hdev->req_workqueue = alloc_workqueue(hdev->name,
2221 WQ_HIGHPRI | WQ_UNBOUND |
2223 if (!hdev->req_workqueue) {
2224 destroy_workqueue(hdev->workqueue);
2229 error = hci_add_sysfs(hdev);
2233 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2234 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2237 if (rfkill_register(hdev->rfkill) < 0) {
2238 rfkill_destroy(hdev->rfkill);
2239 hdev->rfkill = NULL;
2243 set_bit(HCI_SETUP, &hdev->dev_flags);
2245 if (hdev->dev_type != HCI_AMP)
2246 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2248 write_lock(&hci_dev_list_lock);
2249 list_add(&hdev->list, &hci_dev_list);
2250 write_unlock(&hci_dev_list_lock);
2252 hci_notify(hdev, HCI_DEV_REG);
2255 queue_work(hdev->req_workqueue, &hdev->power_on);
2260 destroy_workqueue(hdev->workqueue);
2261 destroy_workqueue(hdev->req_workqueue);
2263 ida_simple_remove(&hci_index_ida, hdev->id);
2267 EXPORT_SYMBOL(hci_register_dev);
2269 /* Unregister HCI device */
2270 void hci_unregister_dev(struct hci_dev *hdev)
2274 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2276 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2280 write_lock(&hci_dev_list_lock);
2281 list_del(&hdev->list);
2282 write_unlock(&hci_dev_list_lock);
2284 hci_dev_do_close(hdev);
2286 for (i = 0; i < NUM_REASSEMBLY; i++)
2287 kfree_skb(hdev->reassembly[i]);
2289 cancel_work_sync(&hdev->power_on);
2291 if (!test_bit(HCI_INIT, &hdev->flags) &&
2292 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2294 mgmt_index_removed(hdev);
2295 hci_dev_unlock(hdev);
2298 /* mgmt_index_removed should take care of emptying the
2300 BUG_ON(!list_empty(&hdev->mgmt_pending));
2302 hci_notify(hdev, HCI_DEV_UNREG);
2305 rfkill_unregister(hdev->rfkill);
2306 rfkill_destroy(hdev->rfkill);
2309 hci_del_sysfs(hdev);
2311 destroy_workqueue(hdev->workqueue);
2312 destroy_workqueue(hdev->req_workqueue);
2315 hci_blacklist_clear(hdev);
2316 hci_uuids_clear(hdev);
2317 hci_link_keys_clear(hdev);
2318 hci_smp_ltks_clear(hdev);
2319 hci_remote_oob_data_clear(hdev);
2320 hci_dev_unlock(hdev);
2324 ida_simple_remove(&hci_index_ida, id);
2326 EXPORT_SYMBOL(hci_unregister_dev);
2328 /* Suspend HCI device */
2329 int hci_suspend_dev(struct hci_dev *hdev)
2331 hci_notify(hdev, HCI_DEV_SUSPEND);
2334 EXPORT_SYMBOL(hci_suspend_dev);
2336 /* Resume HCI device */
2337 int hci_resume_dev(struct hci_dev *hdev)
2339 hci_notify(hdev, HCI_DEV_RESUME);
2342 EXPORT_SYMBOL(hci_resume_dev);
2344 /* Receive frame from HCI drivers */
2345 int hci_recv_frame(struct sk_buff *skb)
2347 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2348 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2349 && !test_bit(HCI_INIT, &hdev->flags))) {
2355 bt_cb(skb)->incoming = 1;
2358 __net_timestamp(skb);
2360 skb_queue_tail(&hdev->rx_q, skb);
2361 queue_work(hdev->workqueue, &hdev->rx_work);
2365 EXPORT_SYMBOL(hci_recv_frame);
2367 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2368 int count, __u8 index)
2373 struct sk_buff *skb;
2374 struct bt_skb_cb *scb;
2376 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2377 index >= NUM_REASSEMBLY)
2380 skb = hdev->reassembly[index];
2384 case HCI_ACLDATA_PKT:
2385 len = HCI_MAX_FRAME_SIZE;
2386 hlen = HCI_ACL_HDR_SIZE;
2389 len = HCI_MAX_EVENT_SIZE;
2390 hlen = HCI_EVENT_HDR_SIZE;
2392 case HCI_SCODATA_PKT:
2393 len = HCI_MAX_SCO_SIZE;
2394 hlen = HCI_SCO_HDR_SIZE;
2398 skb = bt_skb_alloc(len, GFP_ATOMIC);
2402 scb = (void *) skb->cb;
2404 scb->pkt_type = type;
2406 skb->dev = (void *) hdev;
2407 hdev->reassembly[index] = skb;
2411 scb = (void *) skb->cb;
2412 len = min_t(uint, scb->expect, count);
2414 memcpy(skb_put(skb, len), data, len);
2423 if (skb->len == HCI_EVENT_HDR_SIZE) {
2424 struct hci_event_hdr *h = hci_event_hdr(skb);
2425 scb->expect = h->plen;
2427 if (skb_tailroom(skb) < scb->expect) {
2429 hdev->reassembly[index] = NULL;
2435 case HCI_ACLDATA_PKT:
2436 if (skb->len == HCI_ACL_HDR_SIZE) {
2437 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2438 scb->expect = __le16_to_cpu(h->dlen);
2440 if (skb_tailroom(skb) < scb->expect) {
2442 hdev->reassembly[index] = NULL;
2448 case HCI_SCODATA_PKT:
2449 if (skb->len == HCI_SCO_HDR_SIZE) {
2450 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2451 scb->expect = h->dlen;
2453 if (skb_tailroom(skb) < scb->expect) {
2455 hdev->reassembly[index] = NULL;
2462 if (scb->expect == 0) {
2463 /* Complete frame */
2465 bt_cb(skb)->pkt_type = type;
2466 hci_recv_frame(skb);
2468 hdev->reassembly[index] = NULL;
2476 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2480 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2484 rem = hci_reassembly(hdev, type, data, count, type - 1);
2488 data += (count - rem);
2494 EXPORT_SYMBOL(hci_recv_fragment);
2496 #define STREAM_REASSEMBLY 0
2498 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2504 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2507 struct { char type; } *pkt;
2509 /* Start of the frame */
2516 type = bt_cb(skb)->pkt_type;
2518 rem = hci_reassembly(hdev, type, data, count,
2523 data += (count - rem);
2529 EXPORT_SYMBOL(hci_recv_stream_fragment);
2531 /* ---- Interface to upper protocols ---- */
2533 int hci_register_cb(struct hci_cb *cb)
2535 BT_DBG("%p name %s", cb, cb->name);
2537 write_lock(&hci_cb_list_lock);
2538 list_add(&cb->list, &hci_cb_list);
2539 write_unlock(&hci_cb_list_lock);
2543 EXPORT_SYMBOL(hci_register_cb);
2545 int hci_unregister_cb(struct hci_cb *cb)
2547 BT_DBG("%p name %s", cb, cb->name);
2549 write_lock(&hci_cb_list_lock);
2550 list_del(&cb->list);
2551 write_unlock(&hci_cb_list_lock);
2555 EXPORT_SYMBOL(hci_unregister_cb);
2557 static int hci_send_frame(struct sk_buff *skb)
2559 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2566 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2569 __net_timestamp(skb);
2571 /* Send copy to monitor */
2572 hci_send_to_monitor(hdev, skb);
2574 if (atomic_read(&hdev->promisc)) {
2575 /* Send copy to the sockets */
2576 hci_send_to_sock(hdev, skb);
2579 /* Get rid of skb owner, prior to sending to the driver. */
2582 return hdev->send(skb);
2585 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2587 skb_queue_head_init(&req->cmd_q);
2592 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2594 struct hci_dev *hdev = req->hdev;
2595 struct sk_buff *skb;
2596 unsigned long flags;
2598 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2600 /* If an error occured during request building, remove all HCI
2601 * commands queued on the HCI request queue.
2604 skb_queue_purge(&req->cmd_q);
2608 /* Do not allow empty requests */
2609 if (skb_queue_empty(&req->cmd_q))
2612 skb = skb_peek_tail(&req->cmd_q);
2613 bt_cb(skb)->req.complete = complete;
2615 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2616 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2617 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2619 queue_work(hdev->workqueue, &hdev->cmd_work);
2624 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2625 u32 plen, const void *param)
2627 int len = HCI_COMMAND_HDR_SIZE + plen;
2628 struct hci_command_hdr *hdr;
2629 struct sk_buff *skb;
2631 skb = bt_skb_alloc(len, GFP_ATOMIC);
2635 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2636 hdr->opcode = cpu_to_le16(opcode);
2640 memcpy(skb_put(skb, plen), param, plen);
2642 BT_DBG("skb len %d", skb->len);
2644 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2645 skb->dev = (void *) hdev;
2650 /* Send HCI command */
2651 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2654 struct sk_buff *skb;
2656 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2658 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2660 BT_ERR("%s no memory for command", hdev->name);
2664 /* Stand-alone HCI commands must be flaged as
2665 * single-command requests.
2667 bt_cb(skb)->req.start = true;
2669 skb_queue_tail(&hdev->cmd_q, skb);
2670 queue_work(hdev->workqueue, &hdev->cmd_work);
2675 /* Queue a command to an asynchronous HCI request */
2676 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2677 const void *param, u8 event)
2679 struct hci_dev *hdev = req->hdev;
2680 struct sk_buff *skb;
2682 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2684 /* If an error occured during request building, there is no point in
2685 * queueing the HCI command. We can simply return.
2690 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2692 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2693 hdev->name, opcode);
2698 if (skb_queue_empty(&req->cmd_q))
2699 bt_cb(skb)->req.start = true;
2701 bt_cb(skb)->req.event = event;
2703 skb_queue_tail(&req->cmd_q, skb);
2706 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2709 hci_req_add_ev(req, opcode, plen, param, 0);
2712 /* Get data from the previously sent command */
2713 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2715 struct hci_command_hdr *hdr;
2717 if (!hdev->sent_cmd)
2720 hdr = (void *) hdev->sent_cmd->data;
2722 if (hdr->opcode != cpu_to_le16(opcode))
2725 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2727 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2731 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2733 struct hci_acl_hdr *hdr;
2736 skb_push(skb, HCI_ACL_HDR_SIZE);
2737 skb_reset_transport_header(skb);
2738 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2739 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2740 hdr->dlen = cpu_to_le16(len);
2743 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2744 struct sk_buff *skb, __u16 flags)
2746 struct hci_conn *conn = chan->conn;
2747 struct hci_dev *hdev = conn->hdev;
2748 struct sk_buff *list;
2750 skb->len = skb_headlen(skb);
2753 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2755 switch (hdev->dev_type) {
2757 hci_add_acl_hdr(skb, conn->handle, flags);
2760 hci_add_acl_hdr(skb, chan->handle, flags);
2763 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2767 list = skb_shinfo(skb)->frag_list;
2769 /* Non fragmented */
2770 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2772 skb_queue_tail(queue, skb);
2775 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2777 skb_shinfo(skb)->frag_list = NULL;
2779 /* Queue all fragments atomically */
2780 spin_lock(&queue->lock);
2782 __skb_queue_tail(queue, skb);
2784 flags &= ~ACL_START;
2787 skb = list; list = list->next;
2789 skb->dev = (void *) hdev;
2790 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2791 hci_add_acl_hdr(skb, conn->handle, flags);
2793 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2795 __skb_queue_tail(queue, skb);
2798 spin_unlock(&queue->lock);
2802 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2804 struct hci_dev *hdev = chan->conn->hdev;
2806 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2808 skb->dev = (void *) hdev;
2810 hci_queue_acl(chan, &chan->data_q, skb, flags);
2812 queue_work(hdev->workqueue, &hdev->tx_work);
2816 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2818 struct hci_dev *hdev = conn->hdev;
2819 struct hci_sco_hdr hdr;
2821 BT_DBG("%s len %d", hdev->name, skb->len);
2823 hdr.handle = cpu_to_le16(conn->handle);
2824 hdr.dlen = skb->len;
2826 skb_push(skb, HCI_SCO_HDR_SIZE);
2827 skb_reset_transport_header(skb);
2828 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2830 skb->dev = (void *) hdev;
2831 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2833 skb_queue_tail(&conn->data_q, skb);
2834 queue_work(hdev->workqueue, &hdev->tx_work);
2837 /* ---- HCI TX task (outgoing data) ---- */
2839 /* HCI Connection scheduler */
2840 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2843 struct hci_conn_hash *h = &hdev->conn_hash;
2844 struct hci_conn *conn = NULL, *c;
2845 unsigned int num = 0, min = ~0;
2847 /* We don't have to lock device here. Connections are always
2848 * added and removed with TX task disabled. */
2852 list_for_each_entry_rcu(c, &h->list, list) {
2853 if (c->type != type || skb_queue_empty(&c->data_q))
2856 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2861 if (c->sent < min) {
2866 if (hci_conn_num(hdev, type) == num)
2875 switch (conn->type) {
2877 cnt = hdev->acl_cnt;
2881 cnt = hdev->sco_cnt;
2884 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2888 BT_ERR("Unknown link type");
2896 BT_DBG("conn %p quote %d", conn, *quote);
2900 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2902 struct hci_conn_hash *h = &hdev->conn_hash;
2905 BT_ERR("%s link tx timeout", hdev->name);
2909 /* Kill stalled connections */
2910 list_for_each_entry_rcu(c, &h->list, list) {
2911 if (c->type == type && c->sent) {
2912 BT_ERR("%s killing stalled connection %pMR",
2913 hdev->name, &c->dst);
2914 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2921 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2924 struct hci_conn_hash *h = &hdev->conn_hash;
2925 struct hci_chan *chan = NULL;
2926 unsigned int num = 0, min = ~0, cur_prio = 0;
2927 struct hci_conn *conn;
2928 int cnt, q, conn_num = 0;
2930 BT_DBG("%s", hdev->name);
2934 list_for_each_entry_rcu(conn, &h->list, list) {
2935 struct hci_chan *tmp;
2937 if (conn->type != type)
2940 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2945 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2946 struct sk_buff *skb;
2948 if (skb_queue_empty(&tmp->data_q))
2951 skb = skb_peek(&tmp->data_q);
2952 if (skb->priority < cur_prio)
2955 if (skb->priority > cur_prio) {
2958 cur_prio = skb->priority;
2963 if (conn->sent < min) {
2969 if (hci_conn_num(hdev, type) == conn_num)
2978 switch (chan->conn->type) {
2980 cnt = hdev->acl_cnt;
2983 cnt = hdev->block_cnt;
2987 cnt = hdev->sco_cnt;
2990 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2994 BT_ERR("Unknown link type");
2999 BT_DBG("chan %p quote %d", chan, *quote);
3003 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3005 struct hci_conn_hash *h = &hdev->conn_hash;
3006 struct hci_conn *conn;
3009 BT_DBG("%s", hdev->name);
3013 list_for_each_entry_rcu(conn, &h->list, list) {
3014 struct hci_chan *chan;
3016 if (conn->type != type)
3019 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3024 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3025 struct sk_buff *skb;
3032 if (skb_queue_empty(&chan->data_q))
3035 skb = skb_peek(&chan->data_q);
3036 if (skb->priority >= HCI_PRIO_MAX - 1)
3039 skb->priority = HCI_PRIO_MAX - 1;
3041 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3045 if (hci_conn_num(hdev, type) == num)
3053 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3055 /* Calculate count of blocks used by this packet */
3056 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3059 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3061 if (!test_bit(HCI_RAW, &hdev->flags)) {
3062 /* ACL tx timeout must be longer than maximum
3063 * link supervision timeout (40.9 seconds) */
3064 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3065 HCI_ACL_TX_TIMEOUT))
3066 hci_link_tx_to(hdev, ACL_LINK);
3070 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3072 unsigned int cnt = hdev->acl_cnt;
3073 struct hci_chan *chan;
3074 struct sk_buff *skb;
3077 __check_timeout(hdev, cnt);
3079 while (hdev->acl_cnt &&
3080 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3081 u32 priority = (skb_peek(&chan->data_q))->priority;
3082 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3083 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3084 skb->len, skb->priority);
3086 /* Stop if priority has changed */
3087 if (skb->priority < priority)
3090 skb = skb_dequeue(&chan->data_q);
3092 hci_conn_enter_active_mode(chan->conn,
3093 bt_cb(skb)->force_active);
3095 hci_send_frame(skb);
3096 hdev->acl_last_tx = jiffies;
3104 if (cnt != hdev->acl_cnt)
3105 hci_prio_recalculate(hdev, ACL_LINK);
3108 static void hci_sched_acl_blk(struct hci_dev *hdev)
3110 unsigned int cnt = hdev->block_cnt;
3111 struct hci_chan *chan;
3112 struct sk_buff *skb;
3116 __check_timeout(hdev, cnt);
3118 BT_DBG("%s", hdev->name);
3120 if (hdev->dev_type == HCI_AMP)
3125 while (hdev->block_cnt > 0 &&
3126 (chan = hci_chan_sent(hdev, type, "e))) {
3127 u32 priority = (skb_peek(&chan->data_q))->priority;
3128 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3131 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3132 skb->len, skb->priority);
3134 /* Stop if priority has changed */
3135 if (skb->priority < priority)
3138 skb = skb_dequeue(&chan->data_q);
3140 blocks = __get_blocks(hdev, skb);
3141 if (blocks > hdev->block_cnt)
3144 hci_conn_enter_active_mode(chan->conn,
3145 bt_cb(skb)->force_active);
3147 hci_send_frame(skb);
3148 hdev->acl_last_tx = jiffies;
3150 hdev->block_cnt -= blocks;
3153 chan->sent += blocks;
3154 chan->conn->sent += blocks;
3158 if (cnt != hdev->block_cnt)
3159 hci_prio_recalculate(hdev, type);
3162 static void hci_sched_acl(struct hci_dev *hdev)
3164 BT_DBG("%s", hdev->name);
3166 /* No ACL link over BR/EDR controller */
3167 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3170 /* No AMP link over AMP controller */
3171 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3174 switch (hdev->flow_ctl_mode) {
3175 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3176 hci_sched_acl_pkt(hdev);
3179 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3180 hci_sched_acl_blk(hdev);
3186 static void hci_sched_sco(struct hci_dev *hdev)
3188 struct hci_conn *conn;
3189 struct sk_buff *skb;
3192 BT_DBG("%s", hdev->name);
3194 if (!hci_conn_num(hdev, SCO_LINK))
3197 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3198 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3199 BT_DBG("skb %p len %d", skb, skb->len);
3200 hci_send_frame(skb);
3203 if (conn->sent == ~0)
3209 static void hci_sched_esco(struct hci_dev *hdev)
3211 struct hci_conn *conn;
3212 struct sk_buff *skb;
3215 BT_DBG("%s", hdev->name);
3217 if (!hci_conn_num(hdev, ESCO_LINK))
3220 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3222 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3223 BT_DBG("skb %p len %d", skb, skb->len);
3224 hci_send_frame(skb);
3227 if (conn->sent == ~0)
3233 static void hci_sched_le(struct hci_dev *hdev)
3235 struct hci_chan *chan;
3236 struct sk_buff *skb;
3237 int quote, cnt, tmp;
3239 BT_DBG("%s", hdev->name);
3241 if (!hci_conn_num(hdev, LE_LINK))
3244 if (!test_bit(HCI_RAW, &hdev->flags)) {
3245 /* LE tx timeout must be longer than maximum
3246 * link supervision timeout (40.9 seconds) */
3247 if (!hdev->le_cnt && hdev->le_pkts &&
3248 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3249 hci_link_tx_to(hdev, LE_LINK);
3252 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3254 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3255 u32 priority = (skb_peek(&chan->data_q))->priority;
3256 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3257 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3258 skb->len, skb->priority);
3260 /* Stop if priority has changed */
3261 if (skb->priority < priority)
3264 skb = skb_dequeue(&chan->data_q);
3266 hci_send_frame(skb);
3267 hdev->le_last_tx = jiffies;
3278 hdev->acl_cnt = cnt;
3281 hci_prio_recalculate(hdev, LE_LINK);
3284 static void hci_tx_work(struct work_struct *work)
3286 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3287 struct sk_buff *skb;
3289 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3290 hdev->sco_cnt, hdev->le_cnt);
3292 /* Schedule queues and send stuff to HCI driver */
3294 hci_sched_acl(hdev);
3296 hci_sched_sco(hdev);
3298 hci_sched_esco(hdev);
3302 /* Send next queued raw (unknown type) packet */
3303 while ((skb = skb_dequeue(&hdev->raw_q)))
3304 hci_send_frame(skb);
3307 /* ----- HCI RX task (incoming data processing) ----- */
3309 /* ACL data packet */
3310 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3312 struct hci_acl_hdr *hdr = (void *) skb->data;
3313 struct hci_conn *conn;
3314 __u16 handle, flags;
3316 skb_pull(skb, HCI_ACL_HDR_SIZE);
3318 handle = __le16_to_cpu(hdr->handle);
3319 flags = hci_flags(handle);
3320 handle = hci_handle(handle);
3322 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3325 hdev->stat.acl_rx++;
3328 conn = hci_conn_hash_lookup_handle(hdev, handle);
3329 hci_dev_unlock(hdev);
3332 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3334 /* Send to upper protocol */
3335 l2cap_recv_acldata(conn, skb, flags);
3338 BT_ERR("%s ACL packet for unknown connection handle %d",
3339 hdev->name, handle);
3345 /* SCO data packet */
3346 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3348 struct hci_sco_hdr *hdr = (void *) skb->data;
3349 struct hci_conn *conn;
3352 skb_pull(skb, HCI_SCO_HDR_SIZE);
3354 handle = __le16_to_cpu(hdr->handle);
3356 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3358 hdev->stat.sco_rx++;
3361 conn = hci_conn_hash_lookup_handle(hdev, handle);
3362 hci_dev_unlock(hdev);
3365 /* Send to upper protocol */
3366 sco_recv_scodata(conn, skb);
3369 BT_ERR("%s SCO packet for unknown connection handle %d",
3370 hdev->name, handle);
3376 static bool hci_req_is_complete(struct hci_dev *hdev)
3378 struct sk_buff *skb;
3380 skb = skb_peek(&hdev->cmd_q);
3384 return bt_cb(skb)->req.start;
3387 static void hci_resend_last(struct hci_dev *hdev)
3389 struct hci_command_hdr *sent;
3390 struct sk_buff *skb;
3393 if (!hdev->sent_cmd)
3396 sent = (void *) hdev->sent_cmd->data;
3397 opcode = __le16_to_cpu(sent->opcode);
3398 if (opcode == HCI_OP_RESET)
3401 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3405 skb_queue_head(&hdev->cmd_q, skb);
3406 queue_work(hdev->workqueue, &hdev->cmd_work);
3409 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3411 hci_req_complete_t req_complete = NULL;
3412 struct sk_buff *skb;
3413 unsigned long flags;
3415 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3417 /* If the completed command doesn't match the last one that was
3418 * sent we need to do special handling of it.
3420 if (!hci_sent_cmd_data(hdev, opcode)) {
3421 /* Some CSR based controllers generate a spontaneous
3422 * reset complete event during init and any pending
3423 * command will never be completed. In such a case we
3424 * need to resend whatever was the last sent
3427 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3428 hci_resend_last(hdev);
3433 /* If the command succeeded and there's still more commands in
3434 * this request the request is not yet complete.
3436 if (!status && !hci_req_is_complete(hdev))
3439 /* If this was the last command in a request the complete
3440 * callback would be found in hdev->sent_cmd instead of the
3441 * command queue (hdev->cmd_q).
3443 if (hdev->sent_cmd) {
3444 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3447 /* We must set the complete callback to NULL to
3448 * avoid calling the callback more than once if
3449 * this function gets called again.
3451 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3457 /* Remove all pending commands belonging to this request */
3458 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3459 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3460 if (bt_cb(skb)->req.start) {
3461 __skb_queue_head(&hdev->cmd_q, skb);
3465 req_complete = bt_cb(skb)->req.complete;
3468 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3472 req_complete(hdev, status);
3475 static void hci_rx_work(struct work_struct *work)
3477 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3478 struct sk_buff *skb;
3480 BT_DBG("%s", hdev->name);
3482 while ((skb = skb_dequeue(&hdev->rx_q))) {
3483 /* Send copy to monitor */
3484 hci_send_to_monitor(hdev, skb);
3486 if (atomic_read(&hdev->promisc)) {
3487 /* Send copy to the sockets */
3488 hci_send_to_sock(hdev, skb);
3491 if (test_bit(HCI_RAW, &hdev->flags)) {
3496 if (test_bit(HCI_INIT, &hdev->flags)) {
3497 /* Don't process data packets in this states. */
3498 switch (bt_cb(skb)->pkt_type) {
3499 case HCI_ACLDATA_PKT:
3500 case HCI_SCODATA_PKT:
3507 switch (bt_cb(skb)->pkt_type) {
3509 BT_DBG("%s Event packet", hdev->name);
3510 hci_event_packet(hdev, skb);
3513 case HCI_ACLDATA_PKT:
3514 BT_DBG("%s ACL data packet", hdev->name);
3515 hci_acldata_packet(hdev, skb);
3518 case HCI_SCODATA_PKT:
3519 BT_DBG("%s SCO data packet", hdev->name);
3520 hci_scodata_packet(hdev, skb);
3530 static void hci_cmd_work(struct work_struct *work)
3532 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3533 struct sk_buff *skb;
3535 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3536 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3538 /* Send queued commands */
3539 if (atomic_read(&hdev->cmd_cnt)) {
3540 skb = skb_dequeue(&hdev->cmd_q);
3544 kfree_skb(hdev->sent_cmd);
3546 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3547 if (hdev->sent_cmd) {
3548 atomic_dec(&hdev->cmd_cnt);
3549 hci_send_frame(skb);
3550 if (test_bit(HCI_RESET, &hdev->flags))
3551 del_timer(&hdev->cmd_timer);
3553 mod_timer(&hdev->cmd_timer,
3554 jiffies + HCI_CMD_TIMEOUT);
3556 skb_queue_head(&hdev->cmd_q, skb);
3557 queue_work(hdev->workqueue, &hdev->cmd_work);
3562 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3564 /* General inquiry access code (GIAC) */
3565 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3566 struct hci_cp_inquiry cp;
3568 BT_DBG("%s", hdev->name);
3570 if (test_bit(HCI_INQUIRY, &hdev->flags))
3571 return -EINPROGRESS;
3573 inquiry_cache_flush(hdev);
3575 memset(&cp, 0, sizeof(cp));
3576 memcpy(&cp.lap, lap, sizeof(cp.lap));
3579 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3582 int hci_cancel_inquiry(struct hci_dev *hdev)
3584 BT_DBG("%s", hdev->name);
3586 if (!test_bit(HCI_INQUIRY, &hdev->flags))
3589 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3592 u8 bdaddr_to_le(u8 bdaddr_type)
3594 switch (bdaddr_type) {
3595 case BDADDR_LE_PUBLIC:
3596 return ADDR_LE_DEV_PUBLIC;
3599 /* Fallback to LE Random address type */
3600 return ADDR_LE_DEV_RANDOM;