2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ----- HCI requests ----- */
59 #define HCI_REQ_DONE 0
60 #define HCI_REQ_PEND 1
61 #define HCI_REQ_CANCELED 2
63 #define hci_req_lock(d) mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
66 /* ---- HCI notifications ---- */
68 static void hci_notify(struct hci_dev *hdev, int event)
70 hci_sock_dev_event(hdev, event);
73 /* ---- HCI debugfs entries ---- */
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
78 struct hci_dev *hdev = file->private_data;
81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
90 struct hci_dev *hdev = file->private_data;
93 size_t buf_size = min(count, (sizeof(buf)-1));
97 if (!test_bit(HCI_UP, &hdev->flags))
100 if (copy_from_user(buf, user_buf, buf_size))
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 hci_req_unlock(hdev);
122 err = -bt_to_errno(skb->data[0]);
128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
133 static const struct file_operations dut_mode_fops = {
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
140 static int features_show(struct seq_file *f, void *ptr)
142 struct hci_dev *hdev = f->private;
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
161 hci_dev_unlock(hdev);
166 static int features_open(struct inode *inode, struct file *file)
168 return single_open(file, features_show, inode->i_private);
171 static const struct file_operations features_fops = {
172 .open = features_open,
175 .release = single_release,
178 static int blacklist_show(struct seq_file *f, void *p)
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
184 list_for_each_entry(b, &hdev->blacklist, list)
185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186 hci_dev_unlock(hdev);
191 static int blacklist_open(struct inode *inode, struct file *file)
193 return single_open(file, blacklist_show, inode->i_private);
196 static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
200 .release = single_release,
203 static int uuids_show(struct seq_file *f, void *p)
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
209 list_for_each_entry(uuid, &hdev->uuids, list) {
212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
219 seq_printf(f, "%pUb\n", val);
221 hci_dev_unlock(hdev);
226 static int uuids_open(struct inode *inode, struct file *file)
228 return single_open(file, uuids_show, inode->i_private);
231 static const struct file_operations uuids_fops = {
235 .release = single_release,
238 static int inquiry_cache_show(struct seq_file *f, void *p)
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
257 hci_dev_unlock(hdev);
262 static int inquiry_cache_open(struct inode *inode, struct file *file)
264 return single_open(file, inquiry_cache_show, inode->i_private);
267 static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
271 .release = single_release,
274 static int link_keys_show(struct seq_file *f, void *ptr)
276 struct hci_dev *hdev = f->private;
277 struct link_key *key;
280 list_for_each_entry_rcu(key, &hdev->link_keys, list)
281 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
288 static int link_keys_open(struct inode *inode, struct file *file)
290 return single_open(file, link_keys_show, inode->i_private);
293 static const struct file_operations link_keys_fops = {
294 .open = link_keys_open,
297 .release = single_release,
300 static int dev_class_show(struct seq_file *f, void *ptr)
302 struct hci_dev *hdev = f->private;
305 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306 hdev->dev_class[1], hdev->dev_class[0]);
307 hci_dev_unlock(hdev);
312 static int dev_class_open(struct inode *inode, struct file *file)
314 return single_open(file, dev_class_show, inode->i_private);
317 static const struct file_operations dev_class_fops = {
318 .open = dev_class_open,
321 .release = single_release,
324 static int voice_setting_get(void *data, u64 *val)
326 struct hci_dev *hdev = data;
329 *val = hdev->voice_setting;
330 hci_dev_unlock(hdev);
335 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336 NULL, "0x%4.4llx\n");
338 static int auto_accept_delay_set(void *data, u64 val)
340 struct hci_dev *hdev = data;
343 hdev->auto_accept_delay = val;
344 hci_dev_unlock(hdev);
349 static int auto_accept_delay_get(void *data, u64 *val)
351 struct hci_dev *hdev = data;
354 *val = hdev->auto_accept_delay;
355 hci_dev_unlock(hdev);
360 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361 auto_accept_delay_set, "%llu\n");
363 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364 size_t count, loff_t *ppos)
366 struct hci_dev *hdev = file->private_data;
369 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
372 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
375 static ssize_t force_sc_support_write(struct file *file,
376 const char __user *user_buf,
377 size_t count, loff_t *ppos)
379 struct hci_dev *hdev = file->private_data;
381 size_t buf_size = min(count, (sizeof(buf)-1));
384 if (test_bit(HCI_UP, &hdev->flags))
387 if (copy_from_user(buf, user_buf, buf_size))
390 buf[buf_size] = '\0';
391 if (strtobool(buf, &enable))
394 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
397 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
402 static const struct file_operations force_sc_support_fops = {
404 .read = force_sc_support_read,
405 .write = force_sc_support_write,
406 .llseek = default_llseek,
409 static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
410 size_t count, loff_t *ppos)
412 struct hci_dev *hdev = file->private_data;
415 buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
418 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
421 static ssize_t force_lesc_support_write(struct file *file,
422 const char __user *user_buf,
423 size_t count, loff_t *ppos)
425 struct hci_dev *hdev = file->private_data;
427 size_t buf_size = min(count, (sizeof(buf)-1));
430 if (copy_from_user(buf, user_buf, buf_size))
433 buf[buf_size] = '\0';
434 if (strtobool(buf, &enable))
437 if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
440 change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
445 static const struct file_operations force_lesc_support_fops = {
447 .read = force_lesc_support_read,
448 .write = force_lesc_support_write,
449 .llseek = default_llseek,
452 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
453 size_t count, loff_t *ppos)
455 struct hci_dev *hdev = file->private_data;
458 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
461 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
464 static const struct file_operations sc_only_mode_fops = {
466 .read = sc_only_mode_read,
467 .llseek = default_llseek,
470 static int idle_timeout_set(void *data, u64 val)
472 struct hci_dev *hdev = data;
474 if (val != 0 && (val < 500 || val > 3600000))
478 hdev->idle_timeout = val;
479 hci_dev_unlock(hdev);
484 static int idle_timeout_get(void *data, u64 *val)
486 struct hci_dev *hdev = data;
489 *val = hdev->idle_timeout;
490 hci_dev_unlock(hdev);
495 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
496 idle_timeout_set, "%llu\n");
498 static int rpa_timeout_set(void *data, u64 val)
500 struct hci_dev *hdev = data;
502 /* Require the RPA timeout to be at least 30 seconds and at most
505 if (val < 30 || val > (60 * 60 * 24))
509 hdev->rpa_timeout = val;
510 hci_dev_unlock(hdev);
515 static int rpa_timeout_get(void *data, u64 *val)
517 struct hci_dev *hdev = data;
520 *val = hdev->rpa_timeout;
521 hci_dev_unlock(hdev);
526 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
527 rpa_timeout_set, "%llu\n");
529 static int sniff_min_interval_set(void *data, u64 val)
531 struct hci_dev *hdev = data;
533 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
537 hdev->sniff_min_interval = val;
538 hci_dev_unlock(hdev);
543 static int sniff_min_interval_get(void *data, u64 *val)
545 struct hci_dev *hdev = data;
548 *val = hdev->sniff_min_interval;
549 hci_dev_unlock(hdev);
554 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
555 sniff_min_interval_set, "%llu\n");
557 static int sniff_max_interval_set(void *data, u64 val)
559 struct hci_dev *hdev = data;
561 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
565 hdev->sniff_max_interval = val;
566 hci_dev_unlock(hdev);
571 static int sniff_max_interval_get(void *data, u64 *val)
573 struct hci_dev *hdev = data;
576 *val = hdev->sniff_max_interval;
577 hci_dev_unlock(hdev);
582 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
583 sniff_max_interval_set, "%llu\n");
585 static int conn_info_min_age_set(void *data, u64 val)
587 struct hci_dev *hdev = data;
589 if (val == 0 || val > hdev->conn_info_max_age)
593 hdev->conn_info_min_age = val;
594 hci_dev_unlock(hdev);
599 static int conn_info_min_age_get(void *data, u64 *val)
601 struct hci_dev *hdev = data;
604 *val = hdev->conn_info_min_age;
605 hci_dev_unlock(hdev);
610 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
611 conn_info_min_age_set, "%llu\n");
613 static int conn_info_max_age_set(void *data, u64 val)
615 struct hci_dev *hdev = data;
617 if (val == 0 || val < hdev->conn_info_min_age)
621 hdev->conn_info_max_age = val;
622 hci_dev_unlock(hdev);
627 static int conn_info_max_age_get(void *data, u64 *val)
629 struct hci_dev *hdev = data;
632 *val = hdev->conn_info_max_age;
633 hci_dev_unlock(hdev);
638 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
639 conn_info_max_age_set, "%llu\n");
641 static int identity_show(struct seq_file *f, void *p)
643 struct hci_dev *hdev = f->private;
649 hci_copy_identity_address(hdev, &addr, &addr_type);
651 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
652 16, hdev->irk, &hdev->rpa);
654 hci_dev_unlock(hdev);
659 static int identity_open(struct inode *inode, struct file *file)
661 return single_open(file, identity_show, inode->i_private);
664 static const struct file_operations identity_fops = {
665 .open = identity_open,
668 .release = single_release,
671 static int random_address_show(struct seq_file *f, void *p)
673 struct hci_dev *hdev = f->private;
676 seq_printf(f, "%pMR\n", &hdev->random_addr);
677 hci_dev_unlock(hdev);
682 static int random_address_open(struct inode *inode, struct file *file)
684 return single_open(file, random_address_show, inode->i_private);
687 static const struct file_operations random_address_fops = {
688 .open = random_address_open,
691 .release = single_release,
694 static int static_address_show(struct seq_file *f, void *p)
696 struct hci_dev *hdev = f->private;
699 seq_printf(f, "%pMR\n", &hdev->static_addr);
700 hci_dev_unlock(hdev);
705 static int static_address_open(struct inode *inode, struct file *file)
707 return single_open(file, static_address_show, inode->i_private);
710 static const struct file_operations static_address_fops = {
711 .open = static_address_open,
714 .release = single_release,
717 static ssize_t force_static_address_read(struct file *file,
718 char __user *user_buf,
719 size_t count, loff_t *ppos)
721 struct hci_dev *hdev = file->private_data;
724 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
727 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
730 static ssize_t force_static_address_write(struct file *file,
731 const char __user *user_buf,
732 size_t count, loff_t *ppos)
734 struct hci_dev *hdev = file->private_data;
736 size_t buf_size = min(count, (sizeof(buf)-1));
739 if (test_bit(HCI_UP, &hdev->flags))
742 if (copy_from_user(buf, user_buf, buf_size))
745 buf[buf_size] = '\0';
746 if (strtobool(buf, &enable))
749 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
752 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
757 static const struct file_operations force_static_address_fops = {
759 .read = force_static_address_read,
760 .write = force_static_address_write,
761 .llseek = default_llseek,
764 static int white_list_show(struct seq_file *f, void *ptr)
766 struct hci_dev *hdev = f->private;
767 struct bdaddr_list *b;
770 list_for_each_entry(b, &hdev->le_white_list, list)
771 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
772 hci_dev_unlock(hdev);
777 static int white_list_open(struct inode *inode, struct file *file)
779 return single_open(file, white_list_show, inode->i_private);
782 static const struct file_operations white_list_fops = {
783 .open = white_list_open,
786 .release = single_release,
789 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
791 struct hci_dev *hdev = f->private;
795 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
796 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
797 &irk->bdaddr, irk->addr_type,
798 16, irk->val, &irk->rpa);
805 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
807 return single_open(file, identity_resolving_keys_show,
811 static const struct file_operations identity_resolving_keys_fops = {
812 .open = identity_resolving_keys_open,
815 .release = single_release,
818 static int long_term_keys_show(struct seq_file *f, void *ptr)
820 struct hci_dev *hdev = f->private;
824 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
825 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
826 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
828 __le64_to_cpu(ltk->rand), 16, ltk->val);
834 static int long_term_keys_open(struct inode *inode, struct file *file)
836 return single_open(file, long_term_keys_show, inode->i_private);
839 static const struct file_operations long_term_keys_fops = {
840 .open = long_term_keys_open,
843 .release = single_release,
846 static int conn_min_interval_set(void *data, u64 val)
848 struct hci_dev *hdev = data;
850 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
854 hdev->le_conn_min_interval = val;
855 hci_dev_unlock(hdev);
860 static int conn_min_interval_get(void *data, u64 *val)
862 struct hci_dev *hdev = data;
865 *val = hdev->le_conn_min_interval;
866 hci_dev_unlock(hdev);
871 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
872 conn_min_interval_set, "%llu\n");
874 static int conn_max_interval_set(void *data, u64 val)
876 struct hci_dev *hdev = data;
878 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
882 hdev->le_conn_max_interval = val;
883 hci_dev_unlock(hdev);
888 static int conn_max_interval_get(void *data, u64 *val)
890 struct hci_dev *hdev = data;
893 *val = hdev->le_conn_max_interval;
894 hci_dev_unlock(hdev);
899 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
900 conn_max_interval_set, "%llu\n");
902 static int conn_latency_set(void *data, u64 val)
904 struct hci_dev *hdev = data;
910 hdev->le_conn_latency = val;
911 hci_dev_unlock(hdev);
916 static int conn_latency_get(void *data, u64 *val)
918 struct hci_dev *hdev = data;
921 *val = hdev->le_conn_latency;
922 hci_dev_unlock(hdev);
927 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
928 conn_latency_set, "%llu\n");
930 static int supervision_timeout_set(void *data, u64 val)
932 struct hci_dev *hdev = data;
934 if (val < 0x000a || val > 0x0c80)
938 hdev->le_supv_timeout = val;
939 hci_dev_unlock(hdev);
944 static int supervision_timeout_get(void *data, u64 *val)
946 struct hci_dev *hdev = data;
949 *val = hdev->le_supv_timeout;
950 hci_dev_unlock(hdev);
955 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
956 supervision_timeout_set, "%llu\n");
958 static int adv_channel_map_set(void *data, u64 val)
960 struct hci_dev *hdev = data;
962 if (val < 0x01 || val > 0x07)
966 hdev->le_adv_channel_map = val;
967 hci_dev_unlock(hdev);
972 static int adv_channel_map_get(void *data, u64 *val)
974 struct hci_dev *hdev = data;
977 *val = hdev->le_adv_channel_map;
978 hci_dev_unlock(hdev);
983 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
984 adv_channel_map_set, "%llu\n");
986 static int adv_min_interval_set(void *data, u64 val)
988 struct hci_dev *hdev = data;
990 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
994 hdev->le_adv_min_interval = val;
995 hci_dev_unlock(hdev);
1000 static int adv_min_interval_get(void *data, u64 *val)
1002 struct hci_dev *hdev = data;
1005 *val = hdev->le_adv_min_interval;
1006 hci_dev_unlock(hdev);
1011 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1012 adv_min_interval_set, "%llu\n");
1014 static int adv_max_interval_set(void *data, u64 val)
1016 struct hci_dev *hdev = data;
1018 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1022 hdev->le_adv_max_interval = val;
1023 hci_dev_unlock(hdev);
1028 static int adv_max_interval_get(void *data, u64 *val)
1030 struct hci_dev *hdev = data;
1033 *val = hdev->le_adv_max_interval;
1034 hci_dev_unlock(hdev);
1039 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1040 adv_max_interval_set, "%llu\n");
1042 static int device_list_show(struct seq_file *f, void *ptr)
1044 struct hci_dev *hdev = f->private;
1045 struct hci_conn_params *p;
1046 struct bdaddr_list *b;
1049 list_for_each_entry(b, &hdev->whitelist, list)
1050 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
1051 list_for_each_entry(p, &hdev->le_conn_params, list) {
1052 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
1055 hci_dev_unlock(hdev);
1060 static int device_list_open(struct inode *inode, struct file *file)
1062 return single_open(file, device_list_show, inode->i_private);
1065 static const struct file_operations device_list_fops = {
1066 .open = device_list_open,
1068 .llseek = seq_lseek,
1069 .release = single_release,
1072 /* ---- HCI requests ---- */
1074 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1076 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1078 if (hdev->req_status == HCI_REQ_PEND) {
1079 hdev->req_result = result;
1080 hdev->req_status = HCI_REQ_DONE;
1081 wake_up_interruptible(&hdev->req_wait_q);
1085 static void hci_req_cancel(struct hci_dev *hdev, int err)
1087 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1089 if (hdev->req_status == HCI_REQ_PEND) {
1090 hdev->req_result = err;
1091 hdev->req_status = HCI_REQ_CANCELED;
1092 wake_up_interruptible(&hdev->req_wait_q);
1096 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1099 struct hci_ev_cmd_complete *ev;
1100 struct hci_event_hdr *hdr;
1101 struct sk_buff *skb;
1105 skb = hdev->recv_evt;
1106 hdev->recv_evt = NULL;
1108 hci_dev_unlock(hdev);
1111 return ERR_PTR(-ENODATA);
1113 if (skb->len < sizeof(*hdr)) {
1114 BT_ERR("Too short HCI event");
1118 hdr = (void *) skb->data;
1119 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1122 if (hdr->evt != event)
1127 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1128 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1132 if (skb->len < sizeof(*ev)) {
1133 BT_ERR("Too short cmd_complete event");
1137 ev = (void *) skb->data;
1138 skb_pull(skb, sizeof(*ev));
1140 if (opcode == __le16_to_cpu(ev->opcode))
1143 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1144 __le16_to_cpu(ev->opcode));
1148 return ERR_PTR(-ENODATA);
1151 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1152 const void *param, u8 event, u32 timeout)
1154 DECLARE_WAITQUEUE(wait, current);
1155 struct hci_request req;
1158 BT_DBG("%s", hdev->name);
1160 hci_req_init(&req, hdev);
1162 hci_req_add_ev(&req, opcode, plen, param, event);
1164 hdev->req_status = HCI_REQ_PEND;
1166 add_wait_queue(&hdev->req_wait_q, &wait);
1167 set_current_state(TASK_INTERRUPTIBLE);
1169 err = hci_req_run(&req, hci_req_sync_complete);
1171 remove_wait_queue(&hdev->req_wait_q, &wait);
1172 set_current_state(TASK_RUNNING);
1173 return ERR_PTR(err);
1176 schedule_timeout(timeout);
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1180 if (signal_pending(current))
1181 return ERR_PTR(-EINTR);
1183 switch (hdev->req_status) {
1185 err = -bt_to_errno(hdev->req_result);
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1197 hdev->req_status = hdev->req_result = 0;
1199 BT_DBG("%s end: err %d", hdev->name, err);
1202 return ERR_PTR(err);
1204 return hci_get_cmd_complete(hdev, opcode, event);
1206 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1208 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1209 const void *param, u32 timeout)
1211 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1213 EXPORT_SYMBOL(__hci_cmd_sync);
1215 /* Execute request and wait for completion. */
1216 static int __hci_req_sync(struct hci_dev *hdev,
1217 void (*func)(struct hci_request *req,
1219 unsigned long opt, __u32 timeout)
1221 struct hci_request req;
1222 DECLARE_WAITQUEUE(wait, current);
1225 BT_DBG("%s start", hdev->name);
1227 hci_req_init(&req, hdev);
1229 hdev->req_status = HCI_REQ_PEND;
1233 add_wait_queue(&hdev->req_wait_q, &wait);
1234 set_current_state(TASK_INTERRUPTIBLE);
1236 err = hci_req_run(&req, hci_req_sync_complete);
1238 hdev->req_status = 0;
1240 remove_wait_queue(&hdev->req_wait_q, &wait);
1241 set_current_state(TASK_RUNNING);
1243 /* ENODATA means the HCI request command queue is empty.
1244 * This can happen when a request with conditionals doesn't
1245 * trigger any commands to be sent. This is normal behavior
1246 * and should not trigger an error return.
1248 if (err == -ENODATA)
1254 schedule_timeout(timeout);
1256 remove_wait_queue(&hdev->req_wait_q, &wait);
1258 if (signal_pending(current))
1261 switch (hdev->req_status) {
1263 err = -bt_to_errno(hdev->req_result);
1266 case HCI_REQ_CANCELED:
1267 err = -hdev->req_result;
1275 hdev->req_status = hdev->req_result = 0;
1277 BT_DBG("%s end: err %d", hdev->name, err);
1282 static int hci_req_sync(struct hci_dev *hdev,
1283 void (*req)(struct hci_request *req,
1285 unsigned long opt, __u32 timeout)
1289 if (!test_bit(HCI_UP, &hdev->flags))
1292 /* Serialize all requests */
1294 ret = __hci_req_sync(hdev, req, opt, timeout);
1295 hci_req_unlock(hdev);
1300 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1302 BT_DBG("%s %ld", req->hdev->name, opt);
1305 set_bit(HCI_RESET, &req->hdev->flags);
1306 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1309 static void bredr_init(struct hci_request *req)
1311 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1313 /* Read Local Supported Features */
1314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1316 /* Read Local Version */
1317 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1319 /* Read BD Address */
1320 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1323 static void amp_init(struct hci_request *req)
1325 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1327 /* Read Local Version */
1328 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1330 /* Read Local Supported Commands */
1331 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1333 /* Read Local Supported Features */
1334 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1336 /* Read Local AMP Info */
1337 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1339 /* Read Data Blk size */
1340 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1342 /* Read Flow Control Mode */
1343 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1345 /* Read Location Data */
1346 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1349 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1351 struct hci_dev *hdev = req->hdev;
1353 BT_DBG("%s %ld", hdev->name, opt);
1356 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1357 hci_reset_req(req, 0);
1359 switch (hdev->dev_type) {
1369 BT_ERR("Unknown device type %d", hdev->dev_type);
1374 static void bredr_setup(struct hci_request *req)
1376 struct hci_dev *hdev = req->hdev;
1381 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1382 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1384 /* Read Class of Device */
1385 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1387 /* Read Local Name */
1388 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1390 /* Read Voice Setting */
1391 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1393 /* Read Number of Supported IAC */
1394 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1396 /* Read Current IAC LAP */
1397 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1399 /* Clear Event Filters */
1400 flt_type = HCI_FLT_CLEAR_ALL;
1401 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1403 /* Connection accept timeout ~20 secs */
1404 param = cpu_to_le16(0x7d00);
1405 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1407 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1408 * but it does not support page scan related HCI commands.
1410 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1411 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1412 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1416 static void le_setup(struct hci_request *req)
1418 struct hci_dev *hdev = req->hdev;
1420 /* Read LE Buffer Size */
1421 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1423 /* Read LE Local Supported Features */
1424 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1426 /* Read LE Supported States */
1427 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1429 /* Read LE White List Size */
1430 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1432 /* Clear LE White List */
1433 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1435 /* LE-only controllers have LE implicitly enabled */
1436 if (!lmp_bredr_capable(hdev))
1437 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1440 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1442 if (lmp_ext_inq_capable(hdev))
1445 if (lmp_inq_rssi_capable(hdev))
1448 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1449 hdev->lmp_subver == 0x0757)
1452 if (hdev->manufacturer == 15) {
1453 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1455 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1457 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1461 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1462 hdev->lmp_subver == 0x1805)
1468 static void hci_setup_inquiry_mode(struct hci_request *req)
1472 mode = hci_get_inquiry_mode(req->hdev);
1474 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1477 static void hci_setup_event_mask(struct hci_request *req)
1479 struct hci_dev *hdev = req->hdev;
1481 /* The second byte is 0xff instead of 0x9f (two reserved bits
1482 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1483 * command otherwise.
1485 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1487 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1488 * any event mask for pre 1.2 devices.
1490 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1493 if (lmp_bredr_capable(hdev)) {
1494 events[4] |= 0x01; /* Flow Specification Complete */
1495 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1497 events[5] |= 0x08; /* Synchronous Connection Complete */
1498 events[5] |= 0x10; /* Synchronous Connection Changed */
1500 /* Use a different default for LE-only devices */
1501 memset(events, 0, sizeof(events));
1502 events[0] |= 0x10; /* Disconnection Complete */
1503 events[1] |= 0x08; /* Read Remote Version Information Complete */
1504 events[1] |= 0x20; /* Command Complete */
1505 events[1] |= 0x40; /* Command Status */
1506 events[1] |= 0x80; /* Hardware Error */
1507 events[2] |= 0x04; /* Number of Completed Packets */
1508 events[3] |= 0x02; /* Data Buffer Overflow */
1510 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1511 events[0] |= 0x80; /* Encryption Change */
1512 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1516 if (lmp_inq_rssi_capable(hdev))
1517 events[4] |= 0x02; /* Inquiry Result with RSSI */
1519 if (lmp_sniffsubr_capable(hdev))
1520 events[5] |= 0x20; /* Sniff Subrating */
1522 if (lmp_pause_enc_capable(hdev))
1523 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1525 if (lmp_ext_inq_capable(hdev))
1526 events[5] |= 0x40; /* Extended Inquiry Result */
1528 if (lmp_no_flush_capable(hdev))
1529 events[7] |= 0x01; /* Enhanced Flush Complete */
1531 if (lmp_lsto_capable(hdev))
1532 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1534 if (lmp_ssp_capable(hdev)) {
1535 events[6] |= 0x01; /* IO Capability Request */
1536 events[6] |= 0x02; /* IO Capability Response */
1537 events[6] |= 0x04; /* User Confirmation Request */
1538 events[6] |= 0x08; /* User Passkey Request */
1539 events[6] |= 0x10; /* Remote OOB Data Request */
1540 events[6] |= 0x20; /* Simple Pairing Complete */
1541 events[7] |= 0x04; /* User Passkey Notification */
1542 events[7] |= 0x08; /* Keypress Notification */
1543 events[7] |= 0x10; /* Remote Host Supported
1544 * Features Notification
1548 if (lmp_le_capable(hdev))
1549 events[7] |= 0x20; /* LE Meta-Event */
1551 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1554 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1556 struct hci_dev *hdev = req->hdev;
1558 if (lmp_bredr_capable(hdev))
1561 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1563 if (lmp_le_capable(hdev))
1566 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1567 * local supported commands HCI command.
1569 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1570 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1572 if (lmp_ssp_capable(hdev)) {
1573 /* When SSP is available, then the host features page
1574 * should also be available as well. However some
1575 * controllers list the max_page as 0 as long as SSP
1576 * has not been enabled. To achieve proper debugging
1577 * output, force the minimum max_page to 1 at least.
1579 hdev->max_page = 0x01;
1581 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1583 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1584 sizeof(mode), &mode);
1586 struct hci_cp_write_eir cp;
1588 memset(hdev->eir, 0, sizeof(hdev->eir));
1589 memset(&cp, 0, sizeof(cp));
1591 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1595 if (lmp_inq_rssi_capable(hdev))
1596 hci_setup_inquiry_mode(req);
1598 if (lmp_inq_tx_pwr_capable(hdev))
1599 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1601 if (lmp_ext_feat_capable(hdev)) {
1602 struct hci_cp_read_local_ext_features cp;
1605 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1609 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1611 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1616 static void hci_setup_link_policy(struct hci_request *req)
1618 struct hci_dev *hdev = req->hdev;
1619 struct hci_cp_write_def_link_policy cp;
1620 u16 link_policy = 0;
1622 if (lmp_rswitch_capable(hdev))
1623 link_policy |= HCI_LP_RSWITCH;
1624 if (lmp_hold_capable(hdev))
1625 link_policy |= HCI_LP_HOLD;
1626 if (lmp_sniff_capable(hdev))
1627 link_policy |= HCI_LP_SNIFF;
1628 if (lmp_park_capable(hdev))
1629 link_policy |= HCI_LP_PARK;
1631 cp.policy = cpu_to_le16(link_policy);
1632 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1635 static void hci_set_le_support(struct hci_request *req)
1637 struct hci_dev *hdev = req->hdev;
1638 struct hci_cp_write_le_host_supported cp;
1640 /* LE-only devices do not support explicit enablement */
1641 if (!lmp_bredr_capable(hdev))
1644 memset(&cp, 0, sizeof(cp));
1646 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1651 if (cp.le != lmp_host_le_capable(hdev))
1652 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1656 static void hci_set_event_mask_page_2(struct hci_request *req)
1658 struct hci_dev *hdev = req->hdev;
1659 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1661 /* If Connectionless Slave Broadcast master role is supported
1662 * enable all necessary events for it.
1664 if (lmp_csb_master_capable(hdev)) {
1665 events[1] |= 0x40; /* Triggered Clock Capture */
1666 events[1] |= 0x80; /* Synchronization Train Complete */
1667 events[2] |= 0x10; /* Slave Page Response Timeout */
1668 events[2] |= 0x20; /* CSB Channel Map Change */
1671 /* If Connectionless Slave Broadcast slave role is supported
1672 * enable all necessary events for it.
1674 if (lmp_csb_slave_capable(hdev)) {
1675 events[2] |= 0x01; /* Synchronization Train Received */
1676 events[2] |= 0x02; /* CSB Receive */
1677 events[2] |= 0x04; /* CSB Timeout */
1678 events[2] |= 0x08; /* Truncated Page Complete */
1681 /* Enable Authenticated Payload Timeout Expired event if supported */
1682 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1685 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1688 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1690 struct hci_dev *hdev = req->hdev;
1693 hci_setup_event_mask(req);
1695 /* Some Broadcom based Bluetooth controllers do not support the
1696 * Delete Stored Link Key command. They are clearly indicating its
1697 * absence in the bit mask of supported commands.
1699 * Check the supported commands and only if the the command is marked
1700 * as supported send it. If not supported assume that the controller
1701 * does not have actual support for stored link keys which makes this
1702 * command redundant anyway.
1704 * Some controllers indicate that they support handling deleting
1705 * stored link keys, but they don't. The quirk lets a driver
1706 * just disable this command.
1708 if (hdev->commands[6] & 0x80 &&
1709 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1710 struct hci_cp_delete_stored_link_key cp;
1712 bacpy(&cp.bdaddr, BDADDR_ANY);
1713 cp.delete_all = 0x01;
1714 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1718 if (hdev->commands[5] & 0x10)
1719 hci_setup_link_policy(req);
1721 if (lmp_le_capable(hdev)) {
1724 memset(events, 0, sizeof(events));
1727 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1728 events[0] |= 0x10; /* LE Long Term Key Request */
1730 /* If controller supports the Connection Parameters Request
1731 * Link Layer Procedure, enable the corresponding event.
1733 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1734 events[0] |= 0x20; /* LE Remote Connection
1738 /* If the controller supports Extended Scanner Filter
1739 * Policies, enable the correspondig event.
1741 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
1742 events[1] |= 0x04; /* LE Direct Advertising
1746 /* If the controller supports the LE Read Local P-256
1747 * Public Key command, enable the corresponding event.
1749 if (hdev->commands[34] & 0x02)
1750 events[0] |= 0x80; /* LE Read Local P-256
1751 * Public Key Complete
1754 /* If the controller supports the LE Generate DHKey
1755 * command, enable the corresponding event.
1757 if (hdev->commands[34] & 0x04)
1758 events[1] |= 0x01; /* LE Generate DHKey Complete */
1760 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1763 if (hdev->commands[25] & 0x40) {
1764 /* Read LE Advertising Channel TX Power */
1765 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1768 hci_set_le_support(req);
1771 /* Read features beyond page 1 if available */
1772 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1773 struct hci_cp_read_local_ext_features cp;
1776 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1781 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1783 struct hci_dev *hdev = req->hdev;
1785 /* Set event mask page 2 if the HCI command for it is supported */
1786 if (hdev->commands[22] & 0x04)
1787 hci_set_event_mask_page_2(req);
1789 /* Read local codec list if the HCI command is supported */
1790 if (hdev->commands[29] & 0x20)
1791 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1793 /* Get MWS transport configuration if the HCI command is supported */
1794 if (hdev->commands[30] & 0x08)
1795 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1797 /* Check for Synchronization Train support */
1798 if (lmp_sync_train_capable(hdev))
1799 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1801 /* Enable Secure Connections if supported and configured */
1802 if (bredr_sc_enabled(hdev)) {
1804 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1805 sizeof(support), &support);
1809 static int __hci_init(struct hci_dev *hdev)
1813 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1817 /* The Device Under Test (DUT) mode is special and available for
1818 * all controller types. So just create it early on.
1820 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1821 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1825 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1826 * BR/EDR/LE type controllers. AMP controllers only need the
1829 if (hdev->dev_type != HCI_BREDR)
1832 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1836 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1840 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1844 /* Only create debugfs entries during the initial setup
1845 * phase and not every time the controller gets powered on.
1847 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1850 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1852 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1853 &hdev->manufacturer);
1854 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1855 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1856 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1858 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1860 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1862 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1863 &conn_info_min_age_fops);
1864 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1865 &conn_info_max_age_fops);
1867 if (lmp_bredr_capable(hdev)) {
1868 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1869 hdev, &inquiry_cache_fops);
1870 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1871 hdev, &link_keys_fops);
1872 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1873 hdev, &dev_class_fops);
1874 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1875 hdev, &voice_setting_fops);
1878 if (lmp_ssp_capable(hdev)) {
1879 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1880 hdev, &auto_accept_delay_fops);
1881 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1882 hdev, &force_sc_support_fops);
1883 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1884 hdev, &sc_only_mode_fops);
1885 if (lmp_le_capable(hdev))
1886 debugfs_create_file("force_lesc_support", 0644,
1887 hdev->debugfs, hdev,
1888 &force_lesc_support_fops);
1891 if (lmp_sniff_capable(hdev)) {
1892 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1893 hdev, &idle_timeout_fops);
1894 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1895 hdev, &sniff_min_interval_fops);
1896 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1897 hdev, &sniff_max_interval_fops);
1900 if (lmp_le_capable(hdev)) {
1901 debugfs_create_file("identity", 0400, hdev->debugfs,
1902 hdev, &identity_fops);
1903 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1904 hdev, &rpa_timeout_fops);
1905 debugfs_create_file("random_address", 0444, hdev->debugfs,
1906 hdev, &random_address_fops);
1907 debugfs_create_file("static_address", 0444, hdev->debugfs,
1908 hdev, &static_address_fops);
1910 /* For controllers with a public address, provide a debug
1911 * option to force the usage of the configured static
1912 * address. By default the public address is used.
1914 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1915 debugfs_create_file("force_static_address", 0644,
1916 hdev->debugfs, hdev,
1917 &force_static_address_fops);
1919 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1920 &hdev->le_white_list_size);
1921 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1923 debugfs_create_file("identity_resolving_keys", 0400,
1924 hdev->debugfs, hdev,
1925 &identity_resolving_keys_fops);
1926 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1927 hdev, &long_term_keys_fops);
1928 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1929 hdev, &conn_min_interval_fops);
1930 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1931 hdev, &conn_max_interval_fops);
1932 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1933 hdev, &conn_latency_fops);
1934 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1935 hdev, &supervision_timeout_fops);
1936 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1937 hdev, &adv_channel_map_fops);
1938 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1939 hdev, &adv_min_interval_fops);
1940 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1941 hdev, &adv_max_interval_fops);
1942 debugfs_create_u16("discov_interleaved_timeout", 0644,
1944 &hdev->discov_interleaved_timeout);
1952 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1954 struct hci_dev *hdev = req->hdev;
1956 BT_DBG("%s %ld", hdev->name, opt);
1959 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1960 hci_reset_req(req, 0);
1962 /* Read Local Version */
1963 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1965 /* Read BD Address */
1966 if (hdev->set_bdaddr)
1967 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1970 static int __hci_unconf_init(struct hci_dev *hdev)
1974 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1977 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1984 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1988 BT_DBG("%s %x", req->hdev->name, scan);
1990 /* Inquiry and Page scans */
1991 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1994 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1998 BT_DBG("%s %x", req->hdev->name, auth);
2000 /* Authentication */
2001 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
2004 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
2008 BT_DBG("%s %x", req->hdev->name, encrypt);
2011 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
2014 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
2016 __le16 policy = cpu_to_le16(opt);
2018 BT_DBG("%s %x", req->hdev->name, policy);
2020 /* Default link policy */
2021 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
2024 /* Get HCI device by index.
2025 * Device is held on return. */
2026 struct hci_dev *hci_dev_get(int index)
2028 struct hci_dev *hdev = NULL, *d;
2030 BT_DBG("%d", index);
2035 read_lock(&hci_dev_list_lock);
2036 list_for_each_entry(d, &hci_dev_list, list) {
2037 if (d->id == index) {
2038 hdev = hci_dev_hold(d);
2042 read_unlock(&hci_dev_list_lock);
2046 /* ---- Inquiry support ---- */
2048 bool hci_discovery_active(struct hci_dev *hdev)
2050 struct discovery_state *discov = &hdev->discovery;
2052 switch (discov->state) {
2053 case DISCOVERY_FINDING:
2054 case DISCOVERY_RESOLVING:
2062 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2064 int old_state = hdev->discovery.state;
2066 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2068 if (old_state == state)
2071 hdev->discovery.state = state;
2074 case DISCOVERY_STOPPED:
2075 hci_update_background_scan(hdev);
2077 if (old_state != DISCOVERY_STARTING)
2078 mgmt_discovering(hdev, 0);
2080 case DISCOVERY_STARTING:
2082 case DISCOVERY_FINDING:
2083 mgmt_discovering(hdev, 1);
2085 case DISCOVERY_RESOLVING:
2087 case DISCOVERY_STOPPING:
2092 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2094 struct discovery_state *cache = &hdev->discovery;
2095 struct inquiry_entry *p, *n;
2097 list_for_each_entry_safe(p, n, &cache->all, all) {
2102 INIT_LIST_HEAD(&cache->unknown);
2103 INIT_LIST_HEAD(&cache->resolve);
2106 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2109 struct discovery_state *cache = &hdev->discovery;
2110 struct inquiry_entry *e;
2112 BT_DBG("cache %p, %pMR", cache, bdaddr);
2114 list_for_each_entry(e, &cache->all, all) {
2115 if (!bacmp(&e->data.bdaddr, bdaddr))
2122 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2125 struct discovery_state *cache = &hdev->discovery;
2126 struct inquiry_entry *e;
2128 BT_DBG("cache %p, %pMR", cache, bdaddr);
2130 list_for_each_entry(e, &cache->unknown, list) {
2131 if (!bacmp(&e->data.bdaddr, bdaddr))
2138 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2142 struct discovery_state *cache = &hdev->discovery;
2143 struct inquiry_entry *e;
2145 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2147 list_for_each_entry(e, &cache->resolve, list) {
2148 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2150 if (!bacmp(&e->data.bdaddr, bdaddr))
2157 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2158 struct inquiry_entry *ie)
2160 struct discovery_state *cache = &hdev->discovery;
2161 struct list_head *pos = &cache->resolve;
2162 struct inquiry_entry *p;
2164 list_del(&ie->list);
2166 list_for_each_entry(p, &cache->resolve, list) {
2167 if (p->name_state != NAME_PENDING &&
2168 abs(p->data.rssi) >= abs(ie->data.rssi))
2173 list_add(&ie->list, pos);
2176 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2179 struct discovery_state *cache = &hdev->discovery;
2180 struct inquiry_entry *ie;
2183 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2185 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2187 if (!data->ssp_mode)
2188 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2190 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2192 if (!ie->data.ssp_mode)
2193 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2195 if (ie->name_state == NAME_NEEDED &&
2196 data->rssi != ie->data.rssi) {
2197 ie->data.rssi = data->rssi;
2198 hci_inquiry_cache_update_resolve(hdev, ie);
2204 /* Entry not in the cache. Add new one. */
2205 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2207 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2211 list_add(&ie->all, &cache->all);
2214 ie->name_state = NAME_KNOWN;
2216 ie->name_state = NAME_NOT_KNOWN;
2217 list_add(&ie->list, &cache->unknown);
2221 if (name_known && ie->name_state != NAME_KNOWN &&
2222 ie->name_state != NAME_PENDING) {
2223 ie->name_state = NAME_KNOWN;
2224 list_del(&ie->list);
2227 memcpy(&ie->data, data, sizeof(*data));
2228 ie->timestamp = jiffies;
2229 cache->timestamp = jiffies;
2231 if (ie->name_state == NAME_NOT_KNOWN)
2232 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2238 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2240 struct discovery_state *cache = &hdev->discovery;
2241 struct inquiry_info *info = (struct inquiry_info *) buf;
2242 struct inquiry_entry *e;
2245 list_for_each_entry(e, &cache->all, all) {
2246 struct inquiry_data *data = &e->data;
2251 bacpy(&info->bdaddr, &data->bdaddr);
2252 info->pscan_rep_mode = data->pscan_rep_mode;
2253 info->pscan_period_mode = data->pscan_period_mode;
2254 info->pscan_mode = data->pscan_mode;
2255 memcpy(info->dev_class, data->dev_class, 3);
2256 info->clock_offset = data->clock_offset;
2262 BT_DBG("cache %p, copied %d", cache, copied);
2266 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2268 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2269 struct hci_dev *hdev = req->hdev;
2270 struct hci_cp_inquiry cp;
2272 BT_DBG("%s", hdev->name);
2274 if (test_bit(HCI_INQUIRY, &hdev->flags))
2278 memcpy(&cp.lap, &ir->lap, 3);
2279 cp.length = ir->length;
2280 cp.num_rsp = ir->num_rsp;
2281 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2284 int hci_inquiry(void __user *arg)
2286 __u8 __user *ptr = arg;
2287 struct hci_inquiry_req ir;
2288 struct hci_dev *hdev;
2289 int err = 0, do_inquiry = 0, max_rsp;
2293 if (copy_from_user(&ir, ptr, sizeof(ir)))
2296 hdev = hci_dev_get(ir.dev_id);
2300 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2305 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2310 if (hdev->dev_type != HCI_BREDR) {
2315 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2321 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2322 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2323 hci_inquiry_cache_flush(hdev);
2326 hci_dev_unlock(hdev);
2328 timeo = ir.length * msecs_to_jiffies(2000);
2331 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2336 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2337 * cleared). If it is interrupted by a signal, return -EINTR.
2339 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2340 TASK_INTERRUPTIBLE))
2344 /* for unlimited number of responses we will use buffer with
2347 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2349 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2350 * copy it to the user space.
2352 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2359 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2360 hci_dev_unlock(hdev);
2362 BT_DBG("num_rsp %d", ir.num_rsp);
2364 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2366 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2379 static int hci_dev_do_open(struct hci_dev *hdev)
2383 BT_DBG("%s %p", hdev->name, hdev);
2387 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2392 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2393 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2394 /* Check for rfkill but allow the HCI setup stage to
2395 * proceed (which in itself doesn't cause any RF activity).
2397 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2402 /* Check for valid public address or a configured static
2403 * random adddress, but let the HCI setup proceed to
2404 * be able to determine if there is a public address
2407 * In case of user channel usage, it is not important
2408 * if a public address or static random address is
2411 * This check is only valid for BR/EDR controllers
2412 * since AMP controllers do not have an address.
2414 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2415 hdev->dev_type == HCI_BREDR &&
2416 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2417 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2418 ret = -EADDRNOTAVAIL;
2423 if (test_bit(HCI_UP, &hdev->flags)) {
2428 if (hdev->open(hdev)) {
2433 atomic_set(&hdev->cmd_cnt, 1);
2434 set_bit(HCI_INIT, &hdev->flags);
2436 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2438 ret = hdev->setup(hdev);
2440 /* The transport driver can set these quirks before
2441 * creating the HCI device or in its setup callback.
2443 * In case any of them is set, the controller has to
2444 * start up as unconfigured.
2446 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2447 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2448 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2450 /* For an unconfigured controller it is required to
2451 * read at least the version information provided by
2452 * the Read Local Version Information command.
2454 * If the set_bdaddr driver callback is provided, then
2455 * also the original Bluetooth public device address
2456 * will be read using the Read BD Address command.
2458 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2459 ret = __hci_unconf_init(hdev);
2462 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2463 /* If public address change is configured, ensure that
2464 * the address gets programmed. If the driver does not
2465 * support changing the public address, fail the power
2468 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2470 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2472 ret = -EADDRNOTAVAIL;
2476 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2477 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2478 ret = __hci_init(hdev);
2481 clear_bit(HCI_INIT, &hdev->flags);
2485 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2486 set_bit(HCI_UP, &hdev->flags);
2487 hci_notify(hdev, HCI_DEV_UP);
2488 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2489 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2490 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2491 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2492 hdev->dev_type == HCI_BREDR) {
2494 mgmt_powered(hdev, 1);
2495 hci_dev_unlock(hdev);
2498 /* Init failed, cleanup */
2499 flush_work(&hdev->tx_work);
2500 flush_work(&hdev->cmd_work);
2501 flush_work(&hdev->rx_work);
2503 skb_queue_purge(&hdev->cmd_q);
2504 skb_queue_purge(&hdev->rx_q);
2509 if (hdev->sent_cmd) {
2510 kfree_skb(hdev->sent_cmd);
2511 hdev->sent_cmd = NULL;
2515 hdev->flags &= BIT(HCI_RAW);
2519 hci_req_unlock(hdev);
2523 /* ---- HCI ioctl helpers ---- */
2525 int hci_dev_open(__u16 dev)
2527 struct hci_dev *hdev;
2530 hdev = hci_dev_get(dev);
2534 /* Devices that are marked as unconfigured can only be powered
2535 * up as user channel. Trying to bring them up as normal devices
2536 * will result into a failure. Only user channel operation is
2539 * When this function is called for a user channel, the flag
2540 * HCI_USER_CHANNEL will be set first before attempting to
2543 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2544 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2549 /* We need to ensure that no other power on/off work is pending
2550 * before proceeding to call hci_dev_do_open. This is
2551 * particularly important if the setup procedure has not yet
2554 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2555 cancel_delayed_work(&hdev->power_off);
2557 /* After this call it is guaranteed that the setup procedure
2558 * has finished. This means that error conditions like RFKILL
2559 * or no valid public or static random address apply.
2561 flush_workqueue(hdev->req_workqueue);
2563 /* For controllers not using the management interface and that
2564 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2565 * so that pairing works for them. Once the management interface
2566 * is in use this bit will be cleared again and userspace has
2567 * to explicitly enable it.
2569 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2570 !test_bit(HCI_MGMT, &hdev->dev_flags))
2571 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2573 err = hci_dev_do_open(hdev);
2580 /* This function requires the caller holds hdev->lock */
2581 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2583 struct hci_conn_params *p;
2585 list_for_each_entry(p, &hdev->le_conn_params, list) {
2587 hci_conn_drop(p->conn);
2588 hci_conn_put(p->conn);
2591 list_del_init(&p->action);
2594 BT_DBG("All LE pending actions cleared");
2597 static int hci_dev_do_close(struct hci_dev *hdev)
2599 BT_DBG("%s %p", hdev->name, hdev);
2601 cancel_delayed_work(&hdev->power_off);
2603 hci_req_cancel(hdev, ENODEV);
2606 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2607 cancel_delayed_work_sync(&hdev->cmd_timer);
2608 hci_req_unlock(hdev);
2612 /* Flush RX and TX works */
2613 flush_work(&hdev->tx_work);
2614 flush_work(&hdev->rx_work);
2616 if (hdev->discov_timeout > 0) {
2617 cancel_delayed_work(&hdev->discov_off);
2618 hdev->discov_timeout = 0;
2619 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2620 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2623 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2624 cancel_delayed_work(&hdev->service_cache);
2626 cancel_delayed_work_sync(&hdev->le_scan_disable);
2628 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2629 cancel_delayed_work_sync(&hdev->rpa_expired);
2631 /* Avoid potential lockdep warnings from the *_flush() calls by
2632 * ensuring the workqueue is empty up front.
2634 drain_workqueue(hdev->workqueue);
2637 hci_inquiry_cache_flush(hdev);
2638 hci_pend_le_actions_clear(hdev);
2639 hci_conn_hash_flush(hdev);
2640 hci_dev_unlock(hdev);
2642 hci_notify(hdev, HCI_DEV_DOWN);
2648 skb_queue_purge(&hdev->cmd_q);
2649 atomic_set(&hdev->cmd_cnt, 1);
2650 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2651 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2652 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2653 set_bit(HCI_INIT, &hdev->flags);
2654 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2655 clear_bit(HCI_INIT, &hdev->flags);
2658 /* flush cmd work */
2659 flush_work(&hdev->cmd_work);
2662 skb_queue_purge(&hdev->rx_q);
2663 skb_queue_purge(&hdev->cmd_q);
2664 skb_queue_purge(&hdev->raw_q);
2666 /* Drop last sent command */
2667 if (hdev->sent_cmd) {
2668 cancel_delayed_work_sync(&hdev->cmd_timer);
2669 kfree_skb(hdev->sent_cmd);
2670 hdev->sent_cmd = NULL;
2673 kfree_skb(hdev->recv_evt);
2674 hdev->recv_evt = NULL;
2676 /* After this point our queues are empty
2677 * and no tasks are scheduled. */
2681 hdev->flags &= BIT(HCI_RAW);
2682 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2684 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2685 if (hdev->dev_type == HCI_BREDR) {
2687 mgmt_powered(hdev, 0);
2688 hci_dev_unlock(hdev);
2692 /* Controller radio is available but is currently powered down */
2693 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2695 memset(hdev->eir, 0, sizeof(hdev->eir));
2696 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2697 bacpy(&hdev->random_addr, BDADDR_ANY);
2699 hci_req_unlock(hdev);
2705 int hci_dev_close(__u16 dev)
2707 struct hci_dev *hdev;
2710 hdev = hci_dev_get(dev);
2714 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2719 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2720 cancel_delayed_work(&hdev->power_off);
2722 err = hci_dev_do_close(hdev);
2729 int hci_dev_reset(__u16 dev)
2731 struct hci_dev *hdev;
2734 hdev = hci_dev_get(dev);
2740 if (!test_bit(HCI_UP, &hdev->flags)) {
2745 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2750 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2756 skb_queue_purge(&hdev->rx_q);
2757 skb_queue_purge(&hdev->cmd_q);
2759 /* Avoid potential lockdep warnings from the *_flush() calls by
2760 * ensuring the workqueue is empty up front.
2762 drain_workqueue(hdev->workqueue);
2765 hci_inquiry_cache_flush(hdev);
2766 hci_conn_hash_flush(hdev);
2767 hci_dev_unlock(hdev);
2772 atomic_set(&hdev->cmd_cnt, 1);
2773 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2775 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2778 hci_req_unlock(hdev);
2783 int hci_dev_reset_stat(__u16 dev)
2785 struct hci_dev *hdev;
2788 hdev = hci_dev_get(dev);
2792 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2797 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2802 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2809 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2811 bool conn_changed, discov_changed;
2813 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2815 if ((scan & SCAN_PAGE))
2816 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2819 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2822 if ((scan & SCAN_INQUIRY)) {
2823 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2826 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2827 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2831 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2834 if (conn_changed || discov_changed) {
2835 /* In case this was disabled through mgmt */
2836 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2838 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2839 mgmt_update_adv_data(hdev);
2841 mgmt_new_settings(hdev);
2845 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2847 struct hci_dev *hdev;
2848 struct hci_dev_req dr;
2851 if (copy_from_user(&dr, arg, sizeof(dr)))
2854 hdev = hci_dev_get(dr.dev_id);
2858 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2863 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2868 if (hdev->dev_type != HCI_BREDR) {
2873 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2880 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2885 if (!lmp_encrypt_capable(hdev)) {
2890 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2891 /* Auth must be enabled first */
2892 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2898 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2903 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2906 /* Ensure that the connectable and discoverable states
2907 * get correctly modified as this was a non-mgmt change.
2910 hci_update_scan_state(hdev, dr.dev_opt);
2914 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2918 case HCISETLINKMODE:
2919 hdev->link_mode = ((__u16) dr.dev_opt) &
2920 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2924 hdev->pkt_type = (__u16) dr.dev_opt;
2928 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2929 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2933 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2934 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2947 int hci_get_dev_list(void __user *arg)
2949 struct hci_dev *hdev;
2950 struct hci_dev_list_req *dl;
2951 struct hci_dev_req *dr;
2952 int n = 0, size, err;
2955 if (get_user(dev_num, (__u16 __user *) arg))
2958 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2961 size = sizeof(*dl) + dev_num * sizeof(*dr);
2963 dl = kzalloc(size, GFP_KERNEL);
2969 read_lock(&hci_dev_list_lock);
2970 list_for_each_entry(hdev, &hci_dev_list, list) {
2971 unsigned long flags = hdev->flags;
2973 /* When the auto-off is configured it means the transport
2974 * is running, but in that case still indicate that the
2975 * device is actually down.
2977 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2978 flags &= ~BIT(HCI_UP);
2980 (dr + n)->dev_id = hdev->id;
2981 (dr + n)->dev_opt = flags;
2986 read_unlock(&hci_dev_list_lock);
2989 size = sizeof(*dl) + n * sizeof(*dr);
2991 err = copy_to_user(arg, dl, size);
2994 return err ? -EFAULT : 0;
2997 int hci_get_dev_info(void __user *arg)
2999 struct hci_dev *hdev;
3000 struct hci_dev_info di;
3001 unsigned long flags;
3004 if (copy_from_user(&di, arg, sizeof(di)))
3007 hdev = hci_dev_get(di.dev_id);
3011 /* When the auto-off is configured it means the transport
3012 * is running, but in that case still indicate that the
3013 * device is actually down.
3015 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3016 flags = hdev->flags & ~BIT(HCI_UP);
3018 flags = hdev->flags;
3020 strcpy(di.name, hdev->name);
3021 di.bdaddr = hdev->bdaddr;
3022 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
3024 di.pkt_type = hdev->pkt_type;
3025 if (lmp_bredr_capable(hdev)) {
3026 di.acl_mtu = hdev->acl_mtu;
3027 di.acl_pkts = hdev->acl_pkts;
3028 di.sco_mtu = hdev->sco_mtu;
3029 di.sco_pkts = hdev->sco_pkts;
3031 di.acl_mtu = hdev->le_mtu;
3032 di.acl_pkts = hdev->le_pkts;
3036 di.link_policy = hdev->link_policy;
3037 di.link_mode = hdev->link_mode;
3039 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3040 memcpy(&di.features, &hdev->features, sizeof(di.features));
3042 if (copy_to_user(arg, &di, sizeof(di)))
3050 /* ---- Interface to HCI drivers ---- */
3052 static int hci_rfkill_set_block(void *data, bool blocked)
3054 struct hci_dev *hdev = data;
3056 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3058 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3062 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3063 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3064 !test_bit(HCI_CONFIG, &hdev->dev_flags))
3065 hci_dev_do_close(hdev);
3067 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3073 static const struct rfkill_ops hci_rfkill_ops = {
3074 .set_block = hci_rfkill_set_block,
3077 static void hci_power_on(struct work_struct *work)
3079 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3082 BT_DBG("%s", hdev->name);
3084 err = hci_dev_do_open(hdev);
3086 mgmt_set_powered_failed(hdev, err);
3090 /* During the HCI setup phase, a few error conditions are
3091 * ignored and they need to be checked now. If they are still
3092 * valid, it is important to turn the device back off.
3094 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3095 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3096 (hdev->dev_type == HCI_BREDR &&
3097 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3098 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3099 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3100 hci_dev_do_close(hdev);
3101 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3102 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3103 HCI_AUTO_OFF_TIMEOUT);
3106 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3107 /* For unconfigured devices, set the HCI_RAW flag
3108 * so that userspace can easily identify them.
3110 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3111 set_bit(HCI_RAW, &hdev->flags);
3113 /* For fully configured devices, this will send
3114 * the Index Added event. For unconfigured devices,
3115 * it will send Unconfigued Index Added event.
3117 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3118 * and no event will be send.
3120 mgmt_index_added(hdev);
3121 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3122 /* When the controller is now configured, then it
3123 * is important to clear the HCI_RAW flag.
3125 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3126 clear_bit(HCI_RAW, &hdev->flags);
3128 /* Powering on the controller with HCI_CONFIG set only
3129 * happens with the transition from unconfigured to
3130 * configured. This will send the Index Added event.
3132 mgmt_index_added(hdev);
3136 static void hci_power_off(struct work_struct *work)
3138 struct hci_dev *hdev = container_of(work, struct hci_dev,
3141 BT_DBG("%s", hdev->name);
3143 hci_dev_do_close(hdev);
3146 static void hci_discov_off(struct work_struct *work)
3148 struct hci_dev *hdev;
3150 hdev = container_of(work, struct hci_dev, discov_off.work);
3152 BT_DBG("%s", hdev->name);
3154 mgmt_discoverable_timeout(hdev);
3157 void hci_uuids_clear(struct hci_dev *hdev)
3159 struct bt_uuid *uuid, *tmp;
3161 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3162 list_del(&uuid->list);
3167 void hci_link_keys_clear(struct hci_dev *hdev)
3169 struct link_key *key;
3171 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3172 list_del_rcu(&key->list);
3173 kfree_rcu(key, rcu);
3177 void hci_smp_ltks_clear(struct hci_dev *hdev)
3181 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3182 list_del_rcu(&k->list);
3187 void hci_smp_irks_clear(struct hci_dev *hdev)
3191 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3192 list_del_rcu(&k->list);
3197 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3202 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3203 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3213 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3214 u8 key_type, u8 old_key_type)
3217 if (key_type < 0x03)
3220 /* Debug keys are insecure so don't store them persistently */
3221 if (key_type == HCI_LK_DEBUG_COMBINATION)
3224 /* Changed combination key and there's no previous one */
3225 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3228 /* Security mode 3 case */
3232 /* BR/EDR key derived using SC from an LE link */
3233 if (conn->type == LE_LINK)
3236 /* Neither local nor remote side had no-bonding as requirement */
3237 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3240 /* Local side had dedicated bonding as requirement */
3241 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3244 /* Remote side had dedicated bonding as requirement */
3245 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3248 /* If none of the above criteria match, then don't store the key
3253 static u8 ltk_role(u8 type)
3255 if (type == SMP_LTK)
3256 return HCI_ROLE_MASTER;
3258 return HCI_ROLE_SLAVE;
3261 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3262 u8 addr_type, u8 role)
3267 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3268 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3271 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
3281 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3283 struct smp_irk *irk;
3286 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3287 if (!bacmp(&irk->rpa, rpa)) {
3293 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3294 if (smp_irk_matches(hdev, irk->val, rpa)) {
3295 bacpy(&irk->rpa, rpa);
3305 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3308 struct smp_irk *irk;
3310 /* Identity Address must be public or static random */
3311 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3315 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3316 if (addr_type == irk->addr_type &&
3317 bacmp(bdaddr, &irk->bdaddr) == 0) {
3327 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3328 bdaddr_t *bdaddr, u8 *val, u8 type,
3329 u8 pin_len, bool *persistent)
3331 struct link_key *key, *old_key;
3334 old_key = hci_find_link_key(hdev, bdaddr);
3336 old_key_type = old_key->type;
3339 old_key_type = conn ? conn->key_type : 0xff;
3340 key = kzalloc(sizeof(*key), GFP_KERNEL);
3343 list_add_rcu(&key->list, &hdev->link_keys);
3346 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3348 /* Some buggy controller combinations generate a changed
3349 * combination key for legacy pairing even when there's no
3351 if (type == HCI_LK_CHANGED_COMBINATION &&
3352 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3353 type = HCI_LK_COMBINATION;
3355 conn->key_type = type;
3358 bacpy(&key->bdaddr, bdaddr);
3359 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3360 key->pin_len = pin_len;
3362 if (type == HCI_LK_CHANGED_COMBINATION)
3363 key->type = old_key_type;
3368 *persistent = hci_persistent_key(hdev, conn, type,
3374 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3375 u8 addr_type, u8 type, u8 authenticated,
3376 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3378 struct smp_ltk *key, *old_key;
3379 u8 role = ltk_role(type);
3381 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
3385 key = kzalloc(sizeof(*key), GFP_KERNEL);
3388 list_add_rcu(&key->list, &hdev->long_term_keys);
3391 bacpy(&key->bdaddr, bdaddr);
3392 key->bdaddr_type = addr_type;
3393 memcpy(key->val, tk, sizeof(key->val));
3394 key->authenticated = authenticated;
3397 key->enc_size = enc_size;
3403 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3404 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3406 struct smp_irk *irk;
3408 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3410 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3414 bacpy(&irk->bdaddr, bdaddr);
3415 irk->addr_type = addr_type;
3417 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
3420 memcpy(irk->val, val, 16);
3421 bacpy(&irk->rpa, rpa);
3426 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3428 struct link_key *key;
3430 key = hci_find_link_key(hdev, bdaddr);
3434 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3436 list_del_rcu(&key->list);
3437 kfree_rcu(key, rcu);
3442 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3447 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3448 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3451 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3453 list_del_rcu(&k->list);
3458 return removed ? 0 : -ENOENT;
3461 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3465 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3466 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3469 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3471 list_del_rcu(&k->list);
3476 /* HCI command timer function */
3477 static void hci_cmd_timeout(struct work_struct *work)
3479 struct hci_dev *hdev = container_of(work, struct hci_dev,
3482 if (hdev->sent_cmd) {
3483 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3484 u16 opcode = __le16_to_cpu(sent->opcode);
3486 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3488 BT_ERR("%s command tx timeout", hdev->name);
3491 atomic_set(&hdev->cmd_cnt, 1);
3492 queue_work(hdev->workqueue, &hdev->cmd_work);
3495 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3496 bdaddr_t *bdaddr, u8 bdaddr_type)
3498 struct oob_data *data;
3500 list_for_each_entry(data, &hdev->remote_oob_data, list) {
3501 if (bacmp(bdaddr, &data->bdaddr) != 0)
3503 if (data->bdaddr_type != bdaddr_type)
3511 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3514 struct oob_data *data;
3516 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
3520 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
3522 list_del(&data->list);
3528 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3530 struct oob_data *data, *n;
3532 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3533 list_del(&data->list);
3538 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3539 u8 bdaddr_type, u8 *hash192, u8 *rand192,
3540 u8 *hash256, u8 *rand256)
3542 struct oob_data *data;
3544 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
3546 data = kmalloc(sizeof(*data), GFP_KERNEL);
3550 bacpy(&data->bdaddr, bdaddr);
3551 data->bdaddr_type = bdaddr_type;
3552 list_add(&data->list, &hdev->remote_oob_data);
3555 if (hash192 && rand192) {
3556 memcpy(data->hash192, hash192, sizeof(data->hash192));
3557 memcpy(data->rand192, rand192, sizeof(data->rand192));
3559 memset(data->hash192, 0, sizeof(data->hash192));
3560 memset(data->rand192, 0, sizeof(data->rand192));
3563 if (hash256 && rand256) {
3564 memcpy(data->hash256, hash256, sizeof(data->hash256));
3565 memcpy(data->rand256, rand256, sizeof(data->rand256));
3567 memset(data->hash256, 0, sizeof(data->hash256));
3568 memset(data->rand256, 0, sizeof(data->rand256));
3571 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3576 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3577 bdaddr_t *bdaddr, u8 type)
3579 struct bdaddr_list *b;
3581 list_for_each_entry(b, bdaddr_list, list) {
3582 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3589 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3591 struct list_head *p, *n;
3593 list_for_each_safe(p, n, bdaddr_list) {
3594 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3601 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3603 struct bdaddr_list *entry;
3605 if (!bacmp(bdaddr, BDADDR_ANY))
3608 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3611 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3615 bacpy(&entry->bdaddr, bdaddr);
3616 entry->bdaddr_type = type;
3618 list_add(&entry->list, list);
3623 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3625 struct bdaddr_list *entry;
3627 if (!bacmp(bdaddr, BDADDR_ANY)) {
3628 hci_bdaddr_list_clear(list);
3632 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3636 list_del(&entry->list);
3642 /* This function requires the caller holds hdev->lock */
3643 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3644 bdaddr_t *addr, u8 addr_type)
3646 struct hci_conn_params *params;
3648 /* The conn params list only contains identity addresses */
3649 if (!hci_is_identity_address(addr, addr_type))
3652 list_for_each_entry(params, &hdev->le_conn_params, list) {
3653 if (bacmp(¶ms->addr, addr) == 0 &&
3654 params->addr_type == addr_type) {
3662 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3664 struct hci_conn *conn;
3666 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3670 if (conn->dst_type != type)
3673 if (conn->state != BT_CONNECTED)
3679 /* This function requires the caller holds hdev->lock */
3680 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3681 bdaddr_t *addr, u8 addr_type)
3683 struct hci_conn_params *param;
3685 /* The list only contains identity addresses */
3686 if (!hci_is_identity_address(addr, addr_type))
3689 list_for_each_entry(param, list, action) {
3690 if (bacmp(¶m->addr, addr) == 0 &&
3691 param->addr_type == addr_type)
3698 /* This function requires the caller holds hdev->lock */
3699 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3700 bdaddr_t *addr, u8 addr_type)
3702 struct hci_conn_params *params;
3704 if (!hci_is_identity_address(addr, addr_type))
3707 params = hci_conn_params_lookup(hdev, addr, addr_type);
3711 params = kzalloc(sizeof(*params), GFP_KERNEL);
3713 BT_ERR("Out of memory");
3717 bacpy(¶ms->addr, addr);
3718 params->addr_type = addr_type;
3720 list_add(¶ms->list, &hdev->le_conn_params);
3721 INIT_LIST_HEAD(¶ms->action);
3723 params->conn_min_interval = hdev->le_conn_min_interval;
3724 params->conn_max_interval = hdev->le_conn_max_interval;
3725 params->conn_latency = hdev->le_conn_latency;
3726 params->supervision_timeout = hdev->le_supv_timeout;
3727 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3729 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3734 /* This function requires the caller holds hdev->lock */
3735 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3738 struct hci_conn_params *params;
3740 params = hci_conn_params_add(hdev, addr, addr_type);
3744 if (params->auto_connect == auto_connect)
3747 list_del_init(¶ms->action);
3749 switch (auto_connect) {
3750 case HCI_AUTO_CONN_DISABLED:
3751 case HCI_AUTO_CONN_LINK_LOSS:
3752 hci_update_background_scan(hdev);
3754 case HCI_AUTO_CONN_REPORT:
3755 list_add(¶ms->action, &hdev->pend_le_reports);
3756 hci_update_background_scan(hdev);
3758 case HCI_AUTO_CONN_DIRECT:
3759 case HCI_AUTO_CONN_ALWAYS:
3760 if (!is_connected(hdev, addr, addr_type)) {
3761 list_add(¶ms->action, &hdev->pend_le_conns);
3762 hci_update_background_scan(hdev);
3767 params->auto_connect = auto_connect;
3769 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3775 static void hci_conn_params_free(struct hci_conn_params *params)
3778 hci_conn_drop(params->conn);
3779 hci_conn_put(params->conn);
3782 list_del(¶ms->action);
3783 list_del(¶ms->list);
3787 /* This function requires the caller holds hdev->lock */
3788 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3790 struct hci_conn_params *params;
3792 params = hci_conn_params_lookup(hdev, addr, addr_type);
3796 hci_conn_params_free(params);
3798 hci_update_background_scan(hdev);
3800 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3803 /* This function requires the caller holds hdev->lock */
3804 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3806 struct hci_conn_params *params, *tmp;
3808 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3809 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3811 list_del(¶ms->list);
3815 BT_DBG("All LE disabled connection parameters were removed");
3818 /* This function requires the caller holds hdev->lock */
3819 void hci_conn_params_clear_all(struct hci_dev *hdev)
3821 struct hci_conn_params *params, *tmp;
3823 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3824 hci_conn_params_free(params);
3826 hci_update_background_scan(hdev);
3828 BT_DBG("All LE connection parameters were removed");
3831 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3834 BT_ERR("Failed to start inquiry: status %d", status);
3837 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3838 hci_dev_unlock(hdev);
3843 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3845 /* General inquiry access code (GIAC) */
3846 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3847 struct hci_request req;
3848 struct hci_cp_inquiry cp;
3852 BT_ERR("Failed to disable LE scanning: status %d", status);
3856 switch (hdev->discovery.type) {
3857 case DISCOV_TYPE_LE:
3859 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3860 hci_dev_unlock(hdev);
3863 case DISCOV_TYPE_INTERLEAVED:
3864 hci_req_init(&req, hdev);
3866 memset(&cp, 0, sizeof(cp));
3867 memcpy(&cp.lap, lap, sizeof(cp.lap));
3868 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3869 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3873 hci_inquiry_cache_flush(hdev);
3875 err = hci_req_run(&req, inquiry_complete);
3877 BT_ERR("Inquiry request failed: err %d", err);
3878 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3881 hci_dev_unlock(hdev);
3886 static void le_scan_disable_work(struct work_struct *work)
3888 struct hci_dev *hdev = container_of(work, struct hci_dev,
3889 le_scan_disable.work);
3890 struct hci_request req;
3893 BT_DBG("%s", hdev->name);
3895 hci_req_init(&req, hdev);
3897 hci_req_add_le_scan_disable(&req);
3899 err = hci_req_run(&req, le_scan_disable_work_complete);
3901 BT_ERR("Disable LE scanning request failed: err %d", err);
3904 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3906 struct hci_dev *hdev = req->hdev;
3908 /* If we're advertising or initiating an LE connection we can't
3909 * go ahead and change the random address at this time. This is
3910 * because the eventual initiator address used for the
3911 * subsequently created connection will be undefined (some
3912 * controllers use the new address and others the one we had
3913 * when the operation started).
3915 * In this kind of scenario skip the update and let the random
3916 * address be updated at the next cycle.
3918 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3919 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3920 BT_DBG("Deferring random address update");
3921 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
3925 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3928 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3931 struct hci_dev *hdev = req->hdev;
3934 /* If privacy is enabled use a resolvable private address. If
3935 * current RPA has expired or there is something else than
3936 * the current RPA in use, then generate a new one.
3938 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3941 *own_addr_type = ADDR_LE_DEV_RANDOM;
3943 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3944 !bacmp(&hdev->random_addr, &hdev->rpa))
3947 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
3949 BT_ERR("%s failed to generate new RPA", hdev->name);
3953 set_random_addr(req, &hdev->rpa);
3955 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3956 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3961 /* In case of required privacy without resolvable private address,
3962 * use an unresolvable private address. This is useful for active
3963 * scanning and non-connectable advertising.
3965 if (require_privacy) {
3968 get_random_bytes(&urpa, 6);
3969 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3971 *own_addr_type = ADDR_LE_DEV_RANDOM;
3972 set_random_addr(req, &urpa);
3976 /* If forcing static address is in use or there is no public
3977 * address use the static address as random address (but skip
3978 * the HCI command if the current random address is already the
3981 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3982 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3983 *own_addr_type = ADDR_LE_DEV_RANDOM;
3984 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3985 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3986 &hdev->static_addr);
3990 /* Neither privacy nor static address is being used so use a
3993 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3998 /* Copy the Identity Address of the controller.
4000 * If the controller has a public BD_ADDR, then by default use that one.
4001 * If this is a LE only controller without a public address, default to
4002 * the static random address.
4004 * For debugging purposes it is possible to force controllers with a
4005 * public address to use the static random address instead.
4007 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
4010 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
4011 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
4012 bacpy(bdaddr, &hdev->static_addr);
4013 *bdaddr_type = ADDR_LE_DEV_RANDOM;
4015 bacpy(bdaddr, &hdev->bdaddr);
4016 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
4020 /* Alloc HCI device */
4021 struct hci_dev *hci_alloc_dev(void)
4023 struct hci_dev *hdev;
4025 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
4029 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
4030 hdev->esco_type = (ESCO_HV1);
4031 hdev->link_mode = (HCI_LM_ACCEPT);
4032 hdev->num_iac = 0x01; /* One IAC support is mandatory */
4033 hdev->io_capability = 0x03; /* No Input No Output */
4034 hdev->manufacturer = 0xffff; /* Default to internal use */
4035 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
4036 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
4038 hdev->sniff_max_interval = 800;
4039 hdev->sniff_min_interval = 80;
4041 hdev->le_adv_channel_map = 0x07;
4042 hdev->le_adv_min_interval = 0x0800;
4043 hdev->le_adv_max_interval = 0x0800;
4044 hdev->le_scan_interval = 0x0060;
4045 hdev->le_scan_window = 0x0030;
4046 hdev->le_conn_min_interval = 0x0028;
4047 hdev->le_conn_max_interval = 0x0038;
4048 hdev->le_conn_latency = 0x0000;
4049 hdev->le_supv_timeout = 0x002a;
4051 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
4052 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
4053 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4054 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
4056 mutex_init(&hdev->lock);
4057 mutex_init(&hdev->req_lock);
4059 INIT_LIST_HEAD(&hdev->mgmt_pending);
4060 INIT_LIST_HEAD(&hdev->blacklist);
4061 INIT_LIST_HEAD(&hdev->whitelist);
4062 INIT_LIST_HEAD(&hdev->uuids);
4063 INIT_LIST_HEAD(&hdev->link_keys);
4064 INIT_LIST_HEAD(&hdev->long_term_keys);
4065 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4066 INIT_LIST_HEAD(&hdev->remote_oob_data);
4067 INIT_LIST_HEAD(&hdev->le_white_list);
4068 INIT_LIST_HEAD(&hdev->le_conn_params);
4069 INIT_LIST_HEAD(&hdev->pend_le_conns);
4070 INIT_LIST_HEAD(&hdev->pend_le_reports);
4071 INIT_LIST_HEAD(&hdev->conn_hash.list);
4073 INIT_WORK(&hdev->rx_work, hci_rx_work);
4074 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4075 INIT_WORK(&hdev->tx_work, hci_tx_work);
4076 INIT_WORK(&hdev->power_on, hci_power_on);
4078 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4079 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4080 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4082 skb_queue_head_init(&hdev->rx_q);
4083 skb_queue_head_init(&hdev->cmd_q);
4084 skb_queue_head_init(&hdev->raw_q);
4086 init_waitqueue_head(&hdev->req_wait_q);
4088 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4090 hci_init_sysfs(hdev);
4091 discovery_init(hdev);
4095 EXPORT_SYMBOL(hci_alloc_dev);
4097 /* Free HCI device */
4098 void hci_free_dev(struct hci_dev *hdev)
4100 /* will free via device release */
4101 put_device(&hdev->dev);
4103 EXPORT_SYMBOL(hci_free_dev);
4105 /* Register HCI device */
4106 int hci_register_dev(struct hci_dev *hdev)
4110 if (!hdev->open || !hdev->close || !hdev->send)
4113 /* Do not allow HCI_AMP devices to register at index 0,
4114 * so the index can be used as the AMP controller ID.
4116 switch (hdev->dev_type) {
4118 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4121 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4130 sprintf(hdev->name, "hci%d", id);
4133 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4135 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4136 WQ_MEM_RECLAIM, 1, hdev->name);
4137 if (!hdev->workqueue) {
4142 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4143 WQ_MEM_RECLAIM, 1, hdev->name);
4144 if (!hdev->req_workqueue) {
4145 destroy_workqueue(hdev->workqueue);
4150 if (!IS_ERR_OR_NULL(bt_debugfs))
4151 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4153 dev_set_name(&hdev->dev, "%s", hdev->name);
4155 error = device_add(&hdev->dev);
4159 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4160 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4163 if (rfkill_register(hdev->rfkill) < 0) {
4164 rfkill_destroy(hdev->rfkill);
4165 hdev->rfkill = NULL;
4169 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4170 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4172 set_bit(HCI_SETUP, &hdev->dev_flags);
4173 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4175 if (hdev->dev_type == HCI_BREDR) {
4176 /* Assume BR/EDR support until proven otherwise (such as
4177 * through reading supported features during init.
4179 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4182 write_lock(&hci_dev_list_lock);
4183 list_add(&hdev->list, &hci_dev_list);
4184 write_unlock(&hci_dev_list_lock);
4186 /* Devices that are marked for raw-only usage are unconfigured
4187 * and should not be included in normal operation.
4189 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4190 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4192 hci_notify(hdev, HCI_DEV_REG);
4195 queue_work(hdev->req_workqueue, &hdev->power_on);
4200 destroy_workqueue(hdev->workqueue);
4201 destroy_workqueue(hdev->req_workqueue);
4203 ida_simple_remove(&hci_index_ida, hdev->id);
4207 EXPORT_SYMBOL(hci_register_dev);
4209 /* Unregister HCI device */
4210 void hci_unregister_dev(struct hci_dev *hdev)
4214 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4216 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4220 write_lock(&hci_dev_list_lock);
4221 list_del(&hdev->list);
4222 write_unlock(&hci_dev_list_lock);
4224 hci_dev_do_close(hdev);
4226 for (i = 0; i < NUM_REASSEMBLY; i++)
4227 kfree_skb(hdev->reassembly[i]);
4229 cancel_work_sync(&hdev->power_on);
4231 if (!test_bit(HCI_INIT, &hdev->flags) &&
4232 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4233 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4235 mgmt_index_removed(hdev);
4236 hci_dev_unlock(hdev);
4239 /* mgmt_index_removed should take care of emptying the
4241 BUG_ON(!list_empty(&hdev->mgmt_pending));
4243 hci_notify(hdev, HCI_DEV_UNREG);
4246 rfkill_unregister(hdev->rfkill);
4247 rfkill_destroy(hdev->rfkill);
4250 smp_unregister(hdev);
4252 device_del(&hdev->dev);
4254 debugfs_remove_recursive(hdev->debugfs);
4256 destroy_workqueue(hdev->workqueue);
4257 destroy_workqueue(hdev->req_workqueue);
4260 hci_bdaddr_list_clear(&hdev->blacklist);
4261 hci_bdaddr_list_clear(&hdev->whitelist);
4262 hci_uuids_clear(hdev);
4263 hci_link_keys_clear(hdev);
4264 hci_smp_ltks_clear(hdev);
4265 hci_smp_irks_clear(hdev);
4266 hci_remote_oob_data_clear(hdev);
4267 hci_bdaddr_list_clear(&hdev->le_white_list);
4268 hci_conn_params_clear_all(hdev);
4269 hci_discovery_filter_clear(hdev);
4270 hci_dev_unlock(hdev);
4274 ida_simple_remove(&hci_index_ida, id);
4276 EXPORT_SYMBOL(hci_unregister_dev);
4278 /* Suspend HCI device */
4279 int hci_suspend_dev(struct hci_dev *hdev)
4281 hci_notify(hdev, HCI_DEV_SUSPEND);
4284 EXPORT_SYMBOL(hci_suspend_dev);
4286 /* Resume HCI device */
4287 int hci_resume_dev(struct hci_dev *hdev)
4289 hci_notify(hdev, HCI_DEV_RESUME);
4292 EXPORT_SYMBOL(hci_resume_dev);
4294 /* Reset HCI device */
4295 int hci_reset_dev(struct hci_dev *hdev)
4297 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4298 struct sk_buff *skb;
4300 skb = bt_skb_alloc(3, GFP_ATOMIC);
4304 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4305 memcpy(skb_put(skb, 3), hw_err, 3);
4307 /* Send Hardware Error to upper stack */
4308 return hci_recv_frame(hdev, skb);
4310 EXPORT_SYMBOL(hci_reset_dev);
4312 /* Receive frame from HCI drivers */
4313 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4315 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4316 && !test_bit(HCI_INIT, &hdev->flags))) {
4322 bt_cb(skb)->incoming = 1;
4325 __net_timestamp(skb);
4327 skb_queue_tail(&hdev->rx_q, skb);
4328 queue_work(hdev->workqueue, &hdev->rx_work);
4332 EXPORT_SYMBOL(hci_recv_frame);
4334 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4335 int count, __u8 index)
4340 struct sk_buff *skb;
4341 struct bt_skb_cb *scb;
4343 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4344 index >= NUM_REASSEMBLY)
4347 skb = hdev->reassembly[index];
4351 case HCI_ACLDATA_PKT:
4352 len = HCI_MAX_FRAME_SIZE;
4353 hlen = HCI_ACL_HDR_SIZE;
4356 len = HCI_MAX_EVENT_SIZE;
4357 hlen = HCI_EVENT_HDR_SIZE;
4359 case HCI_SCODATA_PKT:
4360 len = HCI_MAX_SCO_SIZE;
4361 hlen = HCI_SCO_HDR_SIZE;
4365 skb = bt_skb_alloc(len, GFP_ATOMIC);
4369 scb = (void *) skb->cb;
4371 scb->pkt_type = type;
4373 hdev->reassembly[index] = skb;
4377 scb = (void *) skb->cb;
4378 len = min_t(uint, scb->expect, count);
4380 memcpy(skb_put(skb, len), data, len);
4389 if (skb->len == HCI_EVENT_HDR_SIZE) {
4390 struct hci_event_hdr *h = hci_event_hdr(skb);
4391 scb->expect = h->plen;
4393 if (skb_tailroom(skb) < scb->expect) {
4395 hdev->reassembly[index] = NULL;
4401 case HCI_ACLDATA_PKT:
4402 if (skb->len == HCI_ACL_HDR_SIZE) {
4403 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4404 scb->expect = __le16_to_cpu(h->dlen);
4406 if (skb_tailroom(skb) < scb->expect) {
4408 hdev->reassembly[index] = NULL;
4414 case HCI_SCODATA_PKT:
4415 if (skb->len == HCI_SCO_HDR_SIZE) {
4416 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4417 scb->expect = h->dlen;
4419 if (skb_tailroom(skb) < scb->expect) {
4421 hdev->reassembly[index] = NULL;
4428 if (scb->expect == 0) {
4429 /* Complete frame */
4431 bt_cb(skb)->pkt_type = type;
4432 hci_recv_frame(hdev, skb);
4434 hdev->reassembly[index] = NULL;
4442 #define STREAM_REASSEMBLY 0
4444 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4450 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4453 struct { char type; } *pkt;
4455 /* Start of the frame */
4462 type = bt_cb(skb)->pkt_type;
4464 rem = hci_reassembly(hdev, type, data, count,
4469 data += (count - rem);
4475 EXPORT_SYMBOL(hci_recv_stream_fragment);
4477 /* ---- Interface to upper protocols ---- */
4479 int hci_register_cb(struct hci_cb *cb)
4481 BT_DBG("%p name %s", cb, cb->name);
4483 write_lock(&hci_cb_list_lock);
4484 list_add(&cb->list, &hci_cb_list);
4485 write_unlock(&hci_cb_list_lock);
4489 EXPORT_SYMBOL(hci_register_cb);
4491 int hci_unregister_cb(struct hci_cb *cb)
4493 BT_DBG("%p name %s", cb, cb->name);
4495 write_lock(&hci_cb_list_lock);
4496 list_del(&cb->list);
4497 write_unlock(&hci_cb_list_lock);
4501 EXPORT_SYMBOL(hci_unregister_cb);
4503 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4507 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4510 __net_timestamp(skb);
4512 /* Send copy to monitor */
4513 hci_send_to_monitor(hdev, skb);
4515 if (atomic_read(&hdev->promisc)) {
4516 /* Send copy to the sockets */
4517 hci_send_to_sock(hdev, skb);
4520 /* Get rid of skb owner, prior to sending to the driver. */
4523 err = hdev->send(hdev, skb);
4525 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4530 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4532 skb_queue_head_init(&req->cmd_q);
4537 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4539 struct hci_dev *hdev = req->hdev;
4540 struct sk_buff *skb;
4541 unsigned long flags;
4543 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4545 /* If an error occurred during request building, remove all HCI
4546 * commands queued on the HCI request queue.
4549 skb_queue_purge(&req->cmd_q);
4553 /* Do not allow empty requests */
4554 if (skb_queue_empty(&req->cmd_q))
4557 skb = skb_peek_tail(&req->cmd_q);
4558 bt_cb(skb)->req.complete = complete;
4560 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4561 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4562 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4564 queue_work(hdev->workqueue, &hdev->cmd_work);
4569 bool hci_req_pending(struct hci_dev *hdev)
4571 return (hdev->req_status == HCI_REQ_PEND);
4574 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4575 u32 plen, const void *param)
4577 int len = HCI_COMMAND_HDR_SIZE + plen;
4578 struct hci_command_hdr *hdr;
4579 struct sk_buff *skb;
4581 skb = bt_skb_alloc(len, GFP_ATOMIC);
4585 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4586 hdr->opcode = cpu_to_le16(opcode);
4590 memcpy(skb_put(skb, plen), param, plen);
4592 BT_DBG("skb len %d", skb->len);
4594 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4595 bt_cb(skb)->opcode = opcode;
4600 /* Send HCI command */
4601 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4604 struct sk_buff *skb;
4606 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4608 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4610 BT_ERR("%s no memory for command", hdev->name);
4614 /* Stand-alone HCI commands must be flagged as
4615 * single-command requests.
4617 bt_cb(skb)->req.start = true;
4619 skb_queue_tail(&hdev->cmd_q, skb);
4620 queue_work(hdev->workqueue, &hdev->cmd_work);
4625 /* Queue a command to an asynchronous HCI request */
4626 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4627 const void *param, u8 event)
4629 struct hci_dev *hdev = req->hdev;
4630 struct sk_buff *skb;
4632 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4634 /* If an error occurred during request building, there is no point in
4635 * queueing the HCI command. We can simply return.
4640 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4642 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4643 hdev->name, opcode);
4648 if (skb_queue_empty(&req->cmd_q))
4649 bt_cb(skb)->req.start = true;
4651 bt_cb(skb)->req.event = event;
4653 skb_queue_tail(&req->cmd_q, skb);
4656 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4659 hci_req_add_ev(req, opcode, plen, param, 0);
4662 /* Get data from the previously sent command */
4663 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4665 struct hci_command_hdr *hdr;
4667 if (!hdev->sent_cmd)
4670 hdr = (void *) hdev->sent_cmd->data;
4672 if (hdr->opcode != cpu_to_le16(opcode))
4675 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4677 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4681 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4683 struct hci_acl_hdr *hdr;
4686 skb_push(skb, HCI_ACL_HDR_SIZE);
4687 skb_reset_transport_header(skb);
4688 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4689 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4690 hdr->dlen = cpu_to_le16(len);
4693 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4694 struct sk_buff *skb, __u16 flags)
4696 struct hci_conn *conn = chan->conn;
4697 struct hci_dev *hdev = conn->hdev;
4698 struct sk_buff *list;
4700 skb->len = skb_headlen(skb);
4703 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4705 switch (hdev->dev_type) {
4707 hci_add_acl_hdr(skb, conn->handle, flags);
4710 hci_add_acl_hdr(skb, chan->handle, flags);
4713 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4717 list = skb_shinfo(skb)->frag_list;
4719 /* Non fragmented */
4720 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4722 skb_queue_tail(queue, skb);
4725 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4727 skb_shinfo(skb)->frag_list = NULL;
4729 /* Queue all fragments atomically. We need to use spin_lock_bh
4730 * here because of 6LoWPAN links, as there this function is
4731 * called from softirq and using normal spin lock could cause
4734 spin_lock_bh(&queue->lock);
4736 __skb_queue_tail(queue, skb);
4738 flags &= ~ACL_START;
4741 skb = list; list = list->next;
4743 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4744 hci_add_acl_hdr(skb, conn->handle, flags);
4746 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4748 __skb_queue_tail(queue, skb);
4751 spin_unlock_bh(&queue->lock);
4755 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4757 struct hci_dev *hdev = chan->conn->hdev;
4759 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4761 hci_queue_acl(chan, &chan->data_q, skb, flags);
4763 queue_work(hdev->workqueue, &hdev->tx_work);
4767 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4769 struct hci_dev *hdev = conn->hdev;
4770 struct hci_sco_hdr hdr;
4772 BT_DBG("%s len %d", hdev->name, skb->len);
4774 hdr.handle = cpu_to_le16(conn->handle);
4775 hdr.dlen = skb->len;
4777 skb_push(skb, HCI_SCO_HDR_SIZE);
4778 skb_reset_transport_header(skb);
4779 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4781 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4783 skb_queue_tail(&conn->data_q, skb);
4784 queue_work(hdev->workqueue, &hdev->tx_work);
4787 /* ---- HCI TX task (outgoing data) ---- */
4789 /* HCI Connection scheduler */
4790 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4793 struct hci_conn_hash *h = &hdev->conn_hash;
4794 struct hci_conn *conn = NULL, *c;
4795 unsigned int num = 0, min = ~0;
4797 /* We don't have to lock device here. Connections are always
4798 * added and removed with TX task disabled. */
4802 list_for_each_entry_rcu(c, &h->list, list) {
4803 if (c->type != type || skb_queue_empty(&c->data_q))
4806 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4811 if (c->sent < min) {
4816 if (hci_conn_num(hdev, type) == num)
4825 switch (conn->type) {
4827 cnt = hdev->acl_cnt;
4831 cnt = hdev->sco_cnt;
4834 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4838 BT_ERR("Unknown link type");
4846 BT_DBG("conn %p quote %d", conn, *quote);
4850 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4852 struct hci_conn_hash *h = &hdev->conn_hash;
4855 BT_ERR("%s link tx timeout", hdev->name);
4859 /* Kill stalled connections */
4860 list_for_each_entry_rcu(c, &h->list, list) {
4861 if (c->type == type && c->sent) {
4862 BT_ERR("%s killing stalled connection %pMR",
4863 hdev->name, &c->dst);
4864 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4871 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4874 struct hci_conn_hash *h = &hdev->conn_hash;
4875 struct hci_chan *chan = NULL;
4876 unsigned int num = 0, min = ~0, cur_prio = 0;
4877 struct hci_conn *conn;
4878 int cnt, q, conn_num = 0;
4880 BT_DBG("%s", hdev->name);
4884 list_for_each_entry_rcu(conn, &h->list, list) {
4885 struct hci_chan *tmp;
4887 if (conn->type != type)
4890 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4895 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4896 struct sk_buff *skb;
4898 if (skb_queue_empty(&tmp->data_q))
4901 skb = skb_peek(&tmp->data_q);
4902 if (skb->priority < cur_prio)
4905 if (skb->priority > cur_prio) {
4908 cur_prio = skb->priority;
4913 if (conn->sent < min) {
4919 if (hci_conn_num(hdev, type) == conn_num)
4928 switch (chan->conn->type) {
4930 cnt = hdev->acl_cnt;
4933 cnt = hdev->block_cnt;
4937 cnt = hdev->sco_cnt;
4940 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4944 BT_ERR("Unknown link type");
4949 BT_DBG("chan %p quote %d", chan, *quote);
4953 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4955 struct hci_conn_hash *h = &hdev->conn_hash;
4956 struct hci_conn *conn;
4959 BT_DBG("%s", hdev->name);
4963 list_for_each_entry_rcu(conn, &h->list, list) {
4964 struct hci_chan *chan;
4966 if (conn->type != type)
4969 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4974 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4975 struct sk_buff *skb;
4982 if (skb_queue_empty(&chan->data_q))
4985 skb = skb_peek(&chan->data_q);
4986 if (skb->priority >= HCI_PRIO_MAX - 1)
4989 skb->priority = HCI_PRIO_MAX - 1;
4991 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4995 if (hci_conn_num(hdev, type) == num)
5003 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
5005 /* Calculate count of blocks used by this packet */
5006 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
5009 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
5011 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5012 /* ACL tx timeout must be longer than maximum
5013 * link supervision timeout (40.9 seconds) */
5014 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5015 HCI_ACL_TX_TIMEOUT))
5016 hci_link_tx_to(hdev, ACL_LINK);
5020 static void hci_sched_acl_pkt(struct hci_dev *hdev)
5022 unsigned int cnt = hdev->acl_cnt;
5023 struct hci_chan *chan;
5024 struct sk_buff *skb;
5027 __check_timeout(hdev, cnt);
5029 while (hdev->acl_cnt &&
5030 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
5031 u32 priority = (skb_peek(&chan->data_q))->priority;
5032 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5033 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5034 skb->len, skb->priority);
5036 /* Stop if priority has changed */
5037 if (skb->priority < priority)
5040 skb = skb_dequeue(&chan->data_q);
5042 hci_conn_enter_active_mode(chan->conn,
5043 bt_cb(skb)->force_active);
5045 hci_send_frame(hdev, skb);
5046 hdev->acl_last_tx = jiffies;
5054 if (cnt != hdev->acl_cnt)
5055 hci_prio_recalculate(hdev, ACL_LINK);
5058 static void hci_sched_acl_blk(struct hci_dev *hdev)
5060 unsigned int cnt = hdev->block_cnt;
5061 struct hci_chan *chan;
5062 struct sk_buff *skb;
5066 __check_timeout(hdev, cnt);
5068 BT_DBG("%s", hdev->name);
5070 if (hdev->dev_type == HCI_AMP)
5075 while (hdev->block_cnt > 0 &&
5076 (chan = hci_chan_sent(hdev, type, "e))) {
5077 u32 priority = (skb_peek(&chan->data_q))->priority;
5078 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5081 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5082 skb->len, skb->priority);
5084 /* Stop if priority has changed */
5085 if (skb->priority < priority)
5088 skb = skb_dequeue(&chan->data_q);
5090 blocks = __get_blocks(hdev, skb);
5091 if (blocks > hdev->block_cnt)
5094 hci_conn_enter_active_mode(chan->conn,
5095 bt_cb(skb)->force_active);
5097 hci_send_frame(hdev, skb);
5098 hdev->acl_last_tx = jiffies;
5100 hdev->block_cnt -= blocks;
5103 chan->sent += blocks;
5104 chan->conn->sent += blocks;
5108 if (cnt != hdev->block_cnt)
5109 hci_prio_recalculate(hdev, type);
5112 static void hci_sched_acl(struct hci_dev *hdev)
5114 BT_DBG("%s", hdev->name);
5116 /* No ACL link over BR/EDR controller */
5117 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5120 /* No AMP link over AMP controller */
5121 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5124 switch (hdev->flow_ctl_mode) {
5125 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5126 hci_sched_acl_pkt(hdev);
5129 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5130 hci_sched_acl_blk(hdev);
5136 static void hci_sched_sco(struct hci_dev *hdev)
5138 struct hci_conn *conn;
5139 struct sk_buff *skb;
5142 BT_DBG("%s", hdev->name);
5144 if (!hci_conn_num(hdev, SCO_LINK))
5147 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
5148 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5149 BT_DBG("skb %p len %d", skb, skb->len);
5150 hci_send_frame(hdev, skb);
5153 if (conn->sent == ~0)
5159 static void hci_sched_esco(struct hci_dev *hdev)
5161 struct hci_conn *conn;
5162 struct sk_buff *skb;
5165 BT_DBG("%s", hdev->name);
5167 if (!hci_conn_num(hdev, ESCO_LINK))
5170 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5172 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5173 BT_DBG("skb %p len %d", skb, skb->len);
5174 hci_send_frame(hdev, skb);
5177 if (conn->sent == ~0)
5183 static void hci_sched_le(struct hci_dev *hdev)
5185 struct hci_chan *chan;
5186 struct sk_buff *skb;
5187 int quote, cnt, tmp;
5189 BT_DBG("%s", hdev->name);
5191 if (!hci_conn_num(hdev, LE_LINK))
5194 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5195 /* LE tx timeout must be longer than maximum
5196 * link supervision timeout (40.9 seconds) */
5197 if (!hdev->le_cnt && hdev->le_pkts &&
5198 time_after(jiffies, hdev->le_last_tx + HZ * 45))
5199 hci_link_tx_to(hdev, LE_LINK);
5202 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5204 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
5205 u32 priority = (skb_peek(&chan->data_q))->priority;
5206 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5207 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5208 skb->len, skb->priority);
5210 /* Stop if priority has changed */
5211 if (skb->priority < priority)
5214 skb = skb_dequeue(&chan->data_q);
5216 hci_send_frame(hdev, skb);
5217 hdev->le_last_tx = jiffies;
5228 hdev->acl_cnt = cnt;
5231 hci_prio_recalculate(hdev, LE_LINK);
5234 static void hci_tx_work(struct work_struct *work)
5236 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5237 struct sk_buff *skb;
5239 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5240 hdev->sco_cnt, hdev->le_cnt);
5242 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5243 /* Schedule queues and send stuff to HCI driver */
5244 hci_sched_acl(hdev);
5245 hci_sched_sco(hdev);
5246 hci_sched_esco(hdev);
5250 /* Send next queued raw (unknown type) packet */
5251 while ((skb = skb_dequeue(&hdev->raw_q)))
5252 hci_send_frame(hdev, skb);
5255 /* ----- HCI RX task (incoming data processing) ----- */
5257 /* ACL data packet */
5258 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5260 struct hci_acl_hdr *hdr = (void *) skb->data;
5261 struct hci_conn *conn;
5262 __u16 handle, flags;
5264 skb_pull(skb, HCI_ACL_HDR_SIZE);
5266 handle = __le16_to_cpu(hdr->handle);
5267 flags = hci_flags(handle);
5268 handle = hci_handle(handle);
5270 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5273 hdev->stat.acl_rx++;
5276 conn = hci_conn_hash_lookup_handle(hdev, handle);
5277 hci_dev_unlock(hdev);
5280 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5282 /* Send to upper protocol */
5283 l2cap_recv_acldata(conn, skb, flags);
5286 BT_ERR("%s ACL packet for unknown connection handle %d",
5287 hdev->name, handle);
5293 /* SCO data packet */
5294 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5296 struct hci_sco_hdr *hdr = (void *) skb->data;
5297 struct hci_conn *conn;
5300 skb_pull(skb, HCI_SCO_HDR_SIZE);
5302 handle = __le16_to_cpu(hdr->handle);
5304 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5306 hdev->stat.sco_rx++;
5309 conn = hci_conn_hash_lookup_handle(hdev, handle);
5310 hci_dev_unlock(hdev);
5313 /* Send to upper protocol */
5314 sco_recv_scodata(conn, skb);
5317 BT_ERR("%s SCO packet for unknown connection handle %d",
5318 hdev->name, handle);
5324 static bool hci_req_is_complete(struct hci_dev *hdev)
5326 struct sk_buff *skb;
5328 skb = skb_peek(&hdev->cmd_q);
5332 return bt_cb(skb)->req.start;
5335 static void hci_resend_last(struct hci_dev *hdev)
5337 struct hci_command_hdr *sent;
5338 struct sk_buff *skb;
5341 if (!hdev->sent_cmd)
5344 sent = (void *) hdev->sent_cmd->data;
5345 opcode = __le16_to_cpu(sent->opcode);
5346 if (opcode == HCI_OP_RESET)
5349 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5353 skb_queue_head(&hdev->cmd_q, skb);
5354 queue_work(hdev->workqueue, &hdev->cmd_work);
5357 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5359 hci_req_complete_t req_complete = NULL;
5360 struct sk_buff *skb;
5361 unsigned long flags;
5363 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5365 /* If the completed command doesn't match the last one that was
5366 * sent we need to do special handling of it.
5368 if (!hci_sent_cmd_data(hdev, opcode)) {
5369 /* Some CSR based controllers generate a spontaneous
5370 * reset complete event during init and any pending
5371 * command will never be completed. In such a case we
5372 * need to resend whatever was the last sent
5375 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5376 hci_resend_last(hdev);
5381 /* If the command succeeded and there's still more commands in
5382 * this request the request is not yet complete.
5384 if (!status && !hci_req_is_complete(hdev))
5387 /* If this was the last command in a request the complete
5388 * callback would be found in hdev->sent_cmd instead of the
5389 * command queue (hdev->cmd_q).
5391 if (hdev->sent_cmd) {
5392 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5395 /* We must set the complete callback to NULL to
5396 * avoid calling the callback more than once if
5397 * this function gets called again.
5399 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5405 /* Remove all pending commands belonging to this request */
5406 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5407 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5408 if (bt_cb(skb)->req.start) {
5409 __skb_queue_head(&hdev->cmd_q, skb);
5413 req_complete = bt_cb(skb)->req.complete;
5416 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5420 req_complete(hdev, status);
5423 static void hci_rx_work(struct work_struct *work)
5425 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5426 struct sk_buff *skb;
5428 BT_DBG("%s", hdev->name);
5430 while ((skb = skb_dequeue(&hdev->rx_q))) {
5431 /* Send copy to monitor */
5432 hci_send_to_monitor(hdev, skb);
5434 if (atomic_read(&hdev->promisc)) {
5435 /* Send copy to the sockets */
5436 hci_send_to_sock(hdev, skb);
5439 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5444 if (test_bit(HCI_INIT, &hdev->flags)) {
5445 /* Don't process data packets in this states. */
5446 switch (bt_cb(skb)->pkt_type) {
5447 case HCI_ACLDATA_PKT:
5448 case HCI_SCODATA_PKT:
5455 switch (bt_cb(skb)->pkt_type) {
5457 BT_DBG("%s Event packet", hdev->name);
5458 hci_event_packet(hdev, skb);
5461 case HCI_ACLDATA_PKT:
5462 BT_DBG("%s ACL data packet", hdev->name);
5463 hci_acldata_packet(hdev, skb);
5466 case HCI_SCODATA_PKT:
5467 BT_DBG("%s SCO data packet", hdev->name);
5468 hci_scodata_packet(hdev, skb);
5478 static void hci_cmd_work(struct work_struct *work)
5480 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5481 struct sk_buff *skb;
5483 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5484 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5486 /* Send queued commands */
5487 if (atomic_read(&hdev->cmd_cnt)) {
5488 skb = skb_dequeue(&hdev->cmd_q);
5492 kfree_skb(hdev->sent_cmd);
5494 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5495 if (hdev->sent_cmd) {
5496 atomic_dec(&hdev->cmd_cnt);
5497 hci_send_frame(hdev, skb);
5498 if (test_bit(HCI_RESET, &hdev->flags))
5499 cancel_delayed_work(&hdev->cmd_timer);
5501 schedule_delayed_work(&hdev->cmd_timer,
5504 skb_queue_head(&hdev->cmd_q, skb);
5505 queue_work(hdev->workqueue, &hdev->cmd_work);
5510 void hci_req_add_le_scan_disable(struct hci_request *req)
5512 struct hci_cp_le_set_scan_enable cp;
5514 memset(&cp, 0, sizeof(cp));
5515 cp.enable = LE_SCAN_DISABLE;
5516 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5519 static void add_to_white_list(struct hci_request *req,
5520 struct hci_conn_params *params)
5522 struct hci_cp_le_add_to_white_list cp;
5524 cp.bdaddr_type = params->addr_type;
5525 bacpy(&cp.bdaddr, ¶ms->addr);
5527 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5530 static u8 update_white_list(struct hci_request *req)
5532 struct hci_dev *hdev = req->hdev;
5533 struct hci_conn_params *params;
5534 struct bdaddr_list *b;
5535 uint8_t white_list_entries = 0;
5537 /* Go through the current white list programmed into the
5538 * controller one by one and check if that address is still
5539 * in the list of pending connections or list of devices to
5540 * report. If not present in either list, then queue the
5541 * command to remove it from the controller.
5543 list_for_each_entry(b, &hdev->le_white_list, list) {
5544 struct hci_cp_le_del_from_white_list cp;
5546 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5547 &b->bdaddr, b->bdaddr_type) ||
5548 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5549 &b->bdaddr, b->bdaddr_type)) {
5550 white_list_entries++;
5554 cp.bdaddr_type = b->bdaddr_type;
5555 bacpy(&cp.bdaddr, &b->bdaddr);
5557 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5561 /* Since all no longer valid white list entries have been
5562 * removed, walk through the list of pending connections
5563 * and ensure that any new device gets programmed into
5566 * If the list of the devices is larger than the list of
5567 * available white list entries in the controller, then
5568 * just abort and return filer policy value to not use the
5571 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5572 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5573 ¶ms->addr, params->addr_type))
5576 if (white_list_entries >= hdev->le_white_list_size) {
5577 /* Select filter policy to accept all advertising */
5581 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
5582 params->addr_type)) {
5583 /* White list can not be used with RPAs */
5587 white_list_entries++;
5588 add_to_white_list(req, params);
5591 /* After adding all new pending connections, walk through
5592 * the list of pending reports and also add these to the
5593 * white list if there is still space.
5595 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5596 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5597 ¶ms->addr, params->addr_type))
5600 if (white_list_entries >= hdev->le_white_list_size) {
5601 /* Select filter policy to accept all advertising */
5605 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
5606 params->addr_type)) {
5607 /* White list can not be used with RPAs */
5611 white_list_entries++;
5612 add_to_white_list(req, params);
5615 /* Select filter policy to use white list */
5619 void hci_req_add_le_passive_scan(struct hci_request *req)
5621 struct hci_cp_le_set_scan_param param_cp;
5622 struct hci_cp_le_set_scan_enable enable_cp;
5623 struct hci_dev *hdev = req->hdev;
5627 /* Set require_privacy to false since no SCAN_REQ are send
5628 * during passive scanning. Not using an unresolvable address
5629 * here is important so that peer devices using direct
5630 * advertising with our address will be correctly reported
5631 * by the controller.
5633 if (hci_update_random_address(req, false, &own_addr_type))
5636 /* Adding or removing entries from the white list must
5637 * happen before enabling scanning. The controller does
5638 * not allow white list modification while scanning.
5640 filter_policy = update_white_list(req);
5642 /* When the controller is using random resolvable addresses and
5643 * with that having LE privacy enabled, then controllers with
5644 * Extended Scanner Filter Policies support can now enable support
5645 * for handling directed advertising.
5647 * So instead of using filter polices 0x00 (no whitelist)
5648 * and 0x01 (whitelist enabled) use the new filter policies
5649 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
5651 if (test_bit(HCI_PRIVACY, &hdev->dev_flags) &&
5652 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
5653 filter_policy |= 0x02;
5655 memset(¶m_cp, 0, sizeof(param_cp));
5656 param_cp.type = LE_SCAN_PASSIVE;
5657 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5658 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5659 param_cp.own_address_type = own_addr_type;
5660 param_cp.filter_policy = filter_policy;
5661 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5664 memset(&enable_cp, 0, sizeof(enable_cp));
5665 enable_cp.enable = LE_SCAN_ENABLE;
5666 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5667 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5671 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5674 BT_DBG("HCI request failed to update background scanning: "
5675 "status 0x%2.2x", status);
5678 /* This function controls the background scanning based on hdev->pend_le_conns
5679 * list. If there are pending LE connection we start the background scanning,
5680 * otherwise we stop it.
5682 * This function requires the caller holds hdev->lock.
5684 void hci_update_background_scan(struct hci_dev *hdev)
5686 struct hci_request req;
5687 struct hci_conn *conn;
5690 if (!test_bit(HCI_UP, &hdev->flags) ||
5691 test_bit(HCI_INIT, &hdev->flags) ||
5692 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5693 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5694 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5695 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5698 /* No point in doing scanning if LE support hasn't been enabled */
5699 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5702 /* If discovery is active don't interfere with it */
5703 if (hdev->discovery.state != DISCOVERY_STOPPED)
5706 /* Reset RSSI and UUID filters when starting background scanning
5707 * since these filters are meant for service discovery only.
5709 * The Start Discovery and Start Service Discovery operations
5710 * ensure to set proper values for RSSI threshold and UUID
5711 * filter list. So it is safe to just reset them here.
5713 hci_discovery_filter_clear(hdev);
5715 hci_req_init(&req, hdev);
5717 if (list_empty(&hdev->pend_le_conns) &&
5718 list_empty(&hdev->pend_le_reports)) {
5719 /* If there is no pending LE connections or devices
5720 * to be scanned for, we should stop the background
5724 /* If controller is not scanning we are done. */
5725 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5728 hci_req_add_le_scan_disable(&req);
5730 BT_DBG("%s stopping background scanning", hdev->name);
5732 /* If there is at least one pending LE connection, we should
5733 * keep the background scan running.
5736 /* If controller is connecting, we should not start scanning
5737 * since some controllers are not able to scan and connect at
5740 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5744 /* If controller is currently scanning, we stop it to ensure we
5745 * don't miss any advertising (due to duplicates filter).
5747 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5748 hci_req_add_le_scan_disable(&req);
5750 hci_req_add_le_passive_scan(&req);
5752 BT_DBG("%s starting background scanning", hdev->name);
5755 err = hci_req_run(&req, update_background_scan_complete);
5757 BT_ERR("Failed to run HCI request: err %d", err);
5760 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5762 struct bdaddr_list *b;
5764 list_for_each_entry(b, &hdev->whitelist, list) {
5765 struct hci_conn *conn;
5767 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5771 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5778 void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5782 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5785 if (!hdev_is_powered(hdev))
5788 if (mgmt_powering_down(hdev))
5791 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
5792 disconnected_whitelist_entries(hdev))
5795 scan = SCAN_DISABLED;
5797 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5800 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5801 scan |= SCAN_INQUIRY;
5804 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5806 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);