2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ----- HCI requests ----- */
59 #define HCI_REQ_DONE 0
60 #define HCI_REQ_PEND 1
61 #define HCI_REQ_CANCELED 2
63 #define hci_req_lock(d) mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
66 /* ---- HCI notifications ---- */
68 static void hci_notify(struct hci_dev *hdev, int event)
70 hci_sock_dev_event(hdev, event);
73 /* ---- HCI debugfs entries ---- */
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
78 struct hci_dev *hdev = file->private_data;
81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
90 struct hci_dev *hdev = file->private_data;
93 size_t buf_size = min(count, (sizeof(buf)-1));
97 if (!test_bit(HCI_UP, &hdev->flags))
100 if (copy_from_user(buf, user_buf, buf_size))
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 hci_req_unlock(hdev);
122 err = -bt_to_errno(skb->data[0]);
128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
133 static const struct file_operations dut_mode_fops = {
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
140 static int features_show(struct seq_file *f, void *ptr)
142 struct hci_dev *hdev = f->private;
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
161 hci_dev_unlock(hdev);
166 static int features_open(struct inode *inode, struct file *file)
168 return single_open(file, features_show, inode->i_private);
171 static const struct file_operations features_fops = {
172 .open = features_open,
175 .release = single_release,
178 static int blacklist_show(struct seq_file *f, void *p)
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
184 list_for_each_entry(b, &hdev->blacklist, list)
185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186 hci_dev_unlock(hdev);
191 static int blacklist_open(struct inode *inode, struct file *file)
193 return single_open(file, blacklist_show, inode->i_private);
196 static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
200 .release = single_release,
203 static int uuids_show(struct seq_file *f, void *p)
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
209 list_for_each_entry(uuid, &hdev->uuids, list) {
212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
219 seq_printf(f, "%pUb\n", val);
221 hci_dev_unlock(hdev);
226 static int uuids_open(struct inode *inode, struct file *file)
228 return single_open(file, uuids_show, inode->i_private);
231 static const struct file_operations uuids_fops = {
235 .release = single_release,
238 static int inquiry_cache_show(struct seq_file *f, void *p)
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
257 hci_dev_unlock(hdev);
262 static int inquiry_cache_open(struct inode *inode, struct file *file)
264 return single_open(file, inquiry_cache_show, inode->i_private);
267 static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
271 .release = single_release,
274 static int link_keys_show(struct seq_file *f, void *ptr)
276 struct hci_dev *hdev = f->private;
277 struct list_head *p, *n;
280 list_for_each_safe(p, n, &hdev->link_keys) {
281 struct link_key *key = list_entry(p, struct link_key, list);
282 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
283 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
285 hci_dev_unlock(hdev);
290 static int link_keys_open(struct inode *inode, struct file *file)
292 return single_open(file, link_keys_show, inode->i_private);
295 static const struct file_operations link_keys_fops = {
296 .open = link_keys_open,
299 .release = single_release,
302 static int dev_class_show(struct seq_file *f, void *ptr)
304 struct hci_dev *hdev = f->private;
307 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
308 hdev->dev_class[1], hdev->dev_class[0]);
309 hci_dev_unlock(hdev);
314 static int dev_class_open(struct inode *inode, struct file *file)
316 return single_open(file, dev_class_show, inode->i_private);
319 static const struct file_operations dev_class_fops = {
320 .open = dev_class_open,
323 .release = single_release,
326 static int voice_setting_get(void *data, u64 *val)
328 struct hci_dev *hdev = data;
331 *val = hdev->voice_setting;
332 hci_dev_unlock(hdev);
337 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
338 NULL, "0x%4.4llx\n");
340 static int auto_accept_delay_set(void *data, u64 val)
342 struct hci_dev *hdev = data;
345 hdev->auto_accept_delay = val;
346 hci_dev_unlock(hdev);
351 static int auto_accept_delay_get(void *data, u64 *val)
353 struct hci_dev *hdev = data;
356 *val = hdev->auto_accept_delay;
357 hci_dev_unlock(hdev);
362 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
363 auto_accept_delay_set, "%llu\n");
365 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
366 size_t count, loff_t *ppos)
368 struct hci_dev *hdev = file->private_data;
371 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
374 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
377 static ssize_t force_sc_support_write(struct file *file,
378 const char __user *user_buf,
379 size_t count, loff_t *ppos)
381 struct hci_dev *hdev = file->private_data;
383 size_t buf_size = min(count, (sizeof(buf)-1));
386 if (test_bit(HCI_UP, &hdev->flags))
389 if (copy_from_user(buf, user_buf, buf_size))
392 buf[buf_size] = '\0';
393 if (strtobool(buf, &enable))
396 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
399 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
404 static const struct file_operations force_sc_support_fops = {
406 .read = force_sc_support_read,
407 .write = force_sc_support_write,
408 .llseek = default_llseek,
411 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
412 size_t count, loff_t *ppos)
414 struct hci_dev *hdev = file->private_data;
417 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
420 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
423 static const struct file_operations sc_only_mode_fops = {
425 .read = sc_only_mode_read,
426 .llseek = default_llseek,
429 static int idle_timeout_set(void *data, u64 val)
431 struct hci_dev *hdev = data;
433 if (val != 0 && (val < 500 || val > 3600000))
437 hdev->idle_timeout = val;
438 hci_dev_unlock(hdev);
443 static int idle_timeout_get(void *data, u64 *val)
445 struct hci_dev *hdev = data;
448 *val = hdev->idle_timeout;
449 hci_dev_unlock(hdev);
454 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
455 idle_timeout_set, "%llu\n");
457 static int rpa_timeout_set(void *data, u64 val)
459 struct hci_dev *hdev = data;
461 /* Require the RPA timeout to be at least 30 seconds and at most
464 if (val < 30 || val > (60 * 60 * 24))
468 hdev->rpa_timeout = val;
469 hci_dev_unlock(hdev);
474 static int rpa_timeout_get(void *data, u64 *val)
476 struct hci_dev *hdev = data;
479 *val = hdev->rpa_timeout;
480 hci_dev_unlock(hdev);
485 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
486 rpa_timeout_set, "%llu\n");
488 static int sniff_min_interval_set(void *data, u64 val)
490 struct hci_dev *hdev = data;
492 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
496 hdev->sniff_min_interval = val;
497 hci_dev_unlock(hdev);
502 static int sniff_min_interval_get(void *data, u64 *val)
504 struct hci_dev *hdev = data;
507 *val = hdev->sniff_min_interval;
508 hci_dev_unlock(hdev);
513 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
514 sniff_min_interval_set, "%llu\n");
516 static int sniff_max_interval_set(void *data, u64 val)
518 struct hci_dev *hdev = data;
520 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
524 hdev->sniff_max_interval = val;
525 hci_dev_unlock(hdev);
530 static int sniff_max_interval_get(void *data, u64 *val)
532 struct hci_dev *hdev = data;
535 *val = hdev->sniff_max_interval;
536 hci_dev_unlock(hdev);
541 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
542 sniff_max_interval_set, "%llu\n");
544 static int conn_info_min_age_set(void *data, u64 val)
546 struct hci_dev *hdev = data;
548 if (val == 0 || val > hdev->conn_info_max_age)
552 hdev->conn_info_min_age = val;
553 hci_dev_unlock(hdev);
558 static int conn_info_min_age_get(void *data, u64 *val)
560 struct hci_dev *hdev = data;
563 *val = hdev->conn_info_min_age;
564 hci_dev_unlock(hdev);
569 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
570 conn_info_min_age_set, "%llu\n");
572 static int conn_info_max_age_set(void *data, u64 val)
574 struct hci_dev *hdev = data;
576 if (val == 0 || val < hdev->conn_info_min_age)
580 hdev->conn_info_max_age = val;
581 hci_dev_unlock(hdev);
586 static int conn_info_max_age_get(void *data, u64 *val)
588 struct hci_dev *hdev = data;
591 *val = hdev->conn_info_max_age;
592 hci_dev_unlock(hdev);
597 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
598 conn_info_max_age_set, "%llu\n");
600 static int identity_show(struct seq_file *f, void *p)
602 struct hci_dev *hdev = f->private;
608 hci_copy_identity_address(hdev, &addr, &addr_type);
610 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
611 16, hdev->irk, &hdev->rpa);
613 hci_dev_unlock(hdev);
618 static int identity_open(struct inode *inode, struct file *file)
620 return single_open(file, identity_show, inode->i_private);
623 static const struct file_operations identity_fops = {
624 .open = identity_open,
627 .release = single_release,
630 static int random_address_show(struct seq_file *f, void *p)
632 struct hci_dev *hdev = f->private;
635 seq_printf(f, "%pMR\n", &hdev->random_addr);
636 hci_dev_unlock(hdev);
641 static int random_address_open(struct inode *inode, struct file *file)
643 return single_open(file, random_address_show, inode->i_private);
646 static const struct file_operations random_address_fops = {
647 .open = random_address_open,
650 .release = single_release,
653 static int static_address_show(struct seq_file *f, void *p)
655 struct hci_dev *hdev = f->private;
658 seq_printf(f, "%pMR\n", &hdev->static_addr);
659 hci_dev_unlock(hdev);
664 static int static_address_open(struct inode *inode, struct file *file)
666 return single_open(file, static_address_show, inode->i_private);
669 static const struct file_operations static_address_fops = {
670 .open = static_address_open,
673 .release = single_release,
676 static ssize_t force_static_address_read(struct file *file,
677 char __user *user_buf,
678 size_t count, loff_t *ppos)
680 struct hci_dev *hdev = file->private_data;
683 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
686 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
689 static ssize_t force_static_address_write(struct file *file,
690 const char __user *user_buf,
691 size_t count, loff_t *ppos)
693 struct hci_dev *hdev = file->private_data;
695 size_t buf_size = min(count, (sizeof(buf)-1));
698 if (test_bit(HCI_UP, &hdev->flags))
701 if (copy_from_user(buf, user_buf, buf_size))
704 buf[buf_size] = '\0';
705 if (strtobool(buf, &enable))
708 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
711 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
716 static const struct file_operations force_static_address_fops = {
718 .read = force_static_address_read,
719 .write = force_static_address_write,
720 .llseek = default_llseek,
723 static int white_list_show(struct seq_file *f, void *ptr)
725 struct hci_dev *hdev = f->private;
726 struct bdaddr_list *b;
729 list_for_each_entry(b, &hdev->le_white_list, list)
730 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
731 hci_dev_unlock(hdev);
736 static int white_list_open(struct inode *inode, struct file *file)
738 return single_open(file, white_list_show, inode->i_private);
741 static const struct file_operations white_list_fops = {
742 .open = white_list_open,
745 .release = single_release,
748 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
750 struct hci_dev *hdev = f->private;
754 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
755 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
756 &irk->bdaddr, irk->addr_type,
757 16, irk->val, &irk->rpa);
764 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
766 return single_open(file, identity_resolving_keys_show,
770 static const struct file_operations identity_resolving_keys_fops = {
771 .open = identity_resolving_keys_open,
774 .release = single_release,
777 static int long_term_keys_show(struct seq_file *f, void *ptr)
779 struct hci_dev *hdev = f->private;
783 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
784 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
785 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
786 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
787 __le64_to_cpu(ltk->rand), 16, ltk->val);
793 static int long_term_keys_open(struct inode *inode, struct file *file)
795 return single_open(file, long_term_keys_show, inode->i_private);
798 static const struct file_operations long_term_keys_fops = {
799 .open = long_term_keys_open,
802 .release = single_release,
805 static int conn_min_interval_set(void *data, u64 val)
807 struct hci_dev *hdev = data;
809 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
813 hdev->le_conn_min_interval = val;
814 hci_dev_unlock(hdev);
819 static int conn_min_interval_get(void *data, u64 *val)
821 struct hci_dev *hdev = data;
824 *val = hdev->le_conn_min_interval;
825 hci_dev_unlock(hdev);
830 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
831 conn_min_interval_set, "%llu\n");
833 static int conn_max_interval_set(void *data, u64 val)
835 struct hci_dev *hdev = data;
837 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
841 hdev->le_conn_max_interval = val;
842 hci_dev_unlock(hdev);
847 static int conn_max_interval_get(void *data, u64 *val)
849 struct hci_dev *hdev = data;
852 *val = hdev->le_conn_max_interval;
853 hci_dev_unlock(hdev);
858 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
859 conn_max_interval_set, "%llu\n");
861 static int conn_latency_set(void *data, u64 val)
863 struct hci_dev *hdev = data;
869 hdev->le_conn_latency = val;
870 hci_dev_unlock(hdev);
875 static int conn_latency_get(void *data, u64 *val)
877 struct hci_dev *hdev = data;
880 *val = hdev->le_conn_latency;
881 hci_dev_unlock(hdev);
886 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
887 conn_latency_set, "%llu\n");
889 static int supervision_timeout_set(void *data, u64 val)
891 struct hci_dev *hdev = data;
893 if (val < 0x000a || val > 0x0c80)
897 hdev->le_supv_timeout = val;
898 hci_dev_unlock(hdev);
903 static int supervision_timeout_get(void *data, u64 *val)
905 struct hci_dev *hdev = data;
908 *val = hdev->le_supv_timeout;
909 hci_dev_unlock(hdev);
914 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
915 supervision_timeout_set, "%llu\n");
917 static int adv_channel_map_set(void *data, u64 val)
919 struct hci_dev *hdev = data;
921 if (val < 0x01 || val > 0x07)
925 hdev->le_adv_channel_map = val;
926 hci_dev_unlock(hdev);
931 static int adv_channel_map_get(void *data, u64 *val)
933 struct hci_dev *hdev = data;
936 *val = hdev->le_adv_channel_map;
937 hci_dev_unlock(hdev);
942 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
943 adv_channel_map_set, "%llu\n");
945 static int adv_min_interval_set(void *data, u64 val)
947 struct hci_dev *hdev = data;
949 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
953 hdev->le_adv_min_interval = val;
954 hci_dev_unlock(hdev);
959 static int adv_min_interval_get(void *data, u64 *val)
961 struct hci_dev *hdev = data;
964 *val = hdev->le_adv_min_interval;
965 hci_dev_unlock(hdev);
970 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
971 adv_min_interval_set, "%llu\n");
973 static int adv_max_interval_set(void *data, u64 val)
975 struct hci_dev *hdev = data;
977 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
981 hdev->le_adv_max_interval = val;
982 hci_dev_unlock(hdev);
987 static int adv_max_interval_get(void *data, u64 *val)
989 struct hci_dev *hdev = data;
992 *val = hdev->le_adv_max_interval;
993 hci_dev_unlock(hdev);
998 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
999 adv_max_interval_set, "%llu\n");
1001 static int device_list_show(struct seq_file *f, void *ptr)
1003 struct hci_dev *hdev = f->private;
1004 struct hci_conn_params *p;
1005 struct bdaddr_list *b;
1008 list_for_each_entry(b, &hdev->whitelist, list)
1009 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
1010 list_for_each_entry(p, &hdev->le_conn_params, list) {
1011 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
1014 hci_dev_unlock(hdev);
1019 static int device_list_open(struct inode *inode, struct file *file)
1021 return single_open(file, device_list_show, inode->i_private);
1024 static const struct file_operations device_list_fops = {
1025 .open = device_list_open,
1027 .llseek = seq_lseek,
1028 .release = single_release,
1031 /* ---- HCI requests ---- */
1033 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1035 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1037 if (hdev->req_status == HCI_REQ_PEND) {
1038 hdev->req_result = result;
1039 hdev->req_status = HCI_REQ_DONE;
1040 wake_up_interruptible(&hdev->req_wait_q);
1044 static void hci_req_cancel(struct hci_dev *hdev, int err)
1046 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1048 if (hdev->req_status == HCI_REQ_PEND) {
1049 hdev->req_result = err;
1050 hdev->req_status = HCI_REQ_CANCELED;
1051 wake_up_interruptible(&hdev->req_wait_q);
1055 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1058 struct hci_ev_cmd_complete *ev;
1059 struct hci_event_hdr *hdr;
1060 struct sk_buff *skb;
1064 skb = hdev->recv_evt;
1065 hdev->recv_evt = NULL;
1067 hci_dev_unlock(hdev);
1070 return ERR_PTR(-ENODATA);
1072 if (skb->len < sizeof(*hdr)) {
1073 BT_ERR("Too short HCI event");
1077 hdr = (void *) skb->data;
1078 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1081 if (hdr->evt != event)
1086 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1087 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1091 if (skb->len < sizeof(*ev)) {
1092 BT_ERR("Too short cmd_complete event");
1096 ev = (void *) skb->data;
1097 skb_pull(skb, sizeof(*ev));
1099 if (opcode == __le16_to_cpu(ev->opcode))
1102 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1103 __le16_to_cpu(ev->opcode));
1107 return ERR_PTR(-ENODATA);
1110 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1111 const void *param, u8 event, u32 timeout)
1113 DECLARE_WAITQUEUE(wait, current);
1114 struct hci_request req;
1117 BT_DBG("%s", hdev->name);
1119 hci_req_init(&req, hdev);
1121 hci_req_add_ev(&req, opcode, plen, param, event);
1123 hdev->req_status = HCI_REQ_PEND;
1125 add_wait_queue(&hdev->req_wait_q, &wait);
1126 set_current_state(TASK_INTERRUPTIBLE);
1128 err = hci_req_run(&req, hci_req_sync_complete);
1130 remove_wait_queue(&hdev->req_wait_q, &wait);
1131 return ERR_PTR(err);
1134 schedule_timeout(timeout);
1136 remove_wait_queue(&hdev->req_wait_q, &wait);
1138 if (signal_pending(current))
1139 return ERR_PTR(-EINTR);
1141 switch (hdev->req_status) {
1143 err = -bt_to_errno(hdev->req_result);
1146 case HCI_REQ_CANCELED:
1147 err = -hdev->req_result;
1155 hdev->req_status = hdev->req_result = 0;
1157 BT_DBG("%s end: err %d", hdev->name, err);
1160 return ERR_PTR(err);
1162 return hci_get_cmd_complete(hdev, opcode, event);
1164 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1166 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1167 const void *param, u32 timeout)
1169 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1171 EXPORT_SYMBOL(__hci_cmd_sync);
1173 /* Execute request and wait for completion. */
1174 static int __hci_req_sync(struct hci_dev *hdev,
1175 void (*func)(struct hci_request *req,
1177 unsigned long opt, __u32 timeout)
1179 struct hci_request req;
1180 DECLARE_WAITQUEUE(wait, current);
1183 BT_DBG("%s start", hdev->name);
1185 hci_req_init(&req, hdev);
1187 hdev->req_status = HCI_REQ_PEND;
1191 add_wait_queue(&hdev->req_wait_q, &wait);
1192 set_current_state(TASK_INTERRUPTIBLE);
1194 err = hci_req_run(&req, hci_req_sync_complete);
1196 hdev->req_status = 0;
1198 remove_wait_queue(&hdev->req_wait_q, &wait);
1200 /* ENODATA means the HCI request command queue is empty.
1201 * This can happen when a request with conditionals doesn't
1202 * trigger any commands to be sent. This is normal behavior
1203 * and should not trigger an error return.
1205 if (err == -ENODATA)
1211 schedule_timeout(timeout);
1213 remove_wait_queue(&hdev->req_wait_q, &wait);
1215 if (signal_pending(current))
1218 switch (hdev->req_status) {
1220 err = -bt_to_errno(hdev->req_result);
1223 case HCI_REQ_CANCELED:
1224 err = -hdev->req_result;
1232 hdev->req_status = hdev->req_result = 0;
1234 BT_DBG("%s end: err %d", hdev->name, err);
1239 static int hci_req_sync(struct hci_dev *hdev,
1240 void (*req)(struct hci_request *req,
1242 unsigned long opt, __u32 timeout)
1246 if (!test_bit(HCI_UP, &hdev->flags))
1249 /* Serialize all requests */
1251 ret = __hci_req_sync(hdev, req, opt, timeout);
1252 hci_req_unlock(hdev);
1257 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1259 BT_DBG("%s %ld", req->hdev->name, opt);
1262 set_bit(HCI_RESET, &req->hdev->flags);
1263 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1266 static void bredr_init(struct hci_request *req)
1268 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1270 /* Read Local Supported Features */
1271 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1273 /* Read Local Version */
1274 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1276 /* Read BD Address */
1277 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1280 static void amp_init(struct hci_request *req)
1282 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1284 /* Read Local Version */
1285 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1287 /* Read Local Supported Commands */
1288 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1290 /* Read Local Supported Features */
1291 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1293 /* Read Local AMP Info */
1294 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1296 /* Read Data Blk size */
1297 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1299 /* Read Flow Control Mode */
1300 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1302 /* Read Location Data */
1303 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1306 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1308 struct hci_dev *hdev = req->hdev;
1310 BT_DBG("%s %ld", hdev->name, opt);
1313 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1314 hci_reset_req(req, 0);
1316 switch (hdev->dev_type) {
1326 BT_ERR("Unknown device type %d", hdev->dev_type);
1331 static void bredr_setup(struct hci_request *req)
1333 struct hci_dev *hdev = req->hdev;
1338 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1339 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1341 /* Read Class of Device */
1342 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1344 /* Read Local Name */
1345 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1347 /* Read Voice Setting */
1348 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1350 /* Read Number of Supported IAC */
1351 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1353 /* Read Current IAC LAP */
1354 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1356 /* Clear Event Filters */
1357 flt_type = HCI_FLT_CLEAR_ALL;
1358 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1360 /* Connection accept timeout ~20 secs */
1361 param = cpu_to_le16(0x7d00);
1362 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1364 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1365 * but it does not support page scan related HCI commands.
1367 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1368 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1373 static void le_setup(struct hci_request *req)
1375 struct hci_dev *hdev = req->hdev;
1377 /* Read LE Buffer Size */
1378 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1380 /* Read LE Local Supported Features */
1381 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1383 /* Read LE Supported States */
1384 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1386 /* Read LE White List Size */
1387 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1389 /* Clear LE White List */
1390 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1392 /* LE-only controllers have LE implicitly enabled */
1393 if (!lmp_bredr_capable(hdev))
1394 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1397 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1399 if (lmp_ext_inq_capable(hdev))
1402 if (lmp_inq_rssi_capable(hdev))
1405 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1406 hdev->lmp_subver == 0x0757)
1409 if (hdev->manufacturer == 15) {
1410 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1412 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1414 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1418 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1419 hdev->lmp_subver == 0x1805)
1425 static void hci_setup_inquiry_mode(struct hci_request *req)
1429 mode = hci_get_inquiry_mode(req->hdev);
1431 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1434 static void hci_setup_event_mask(struct hci_request *req)
1436 struct hci_dev *hdev = req->hdev;
1438 /* The second byte is 0xff instead of 0x9f (two reserved bits
1439 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1440 * command otherwise.
1442 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1444 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1445 * any event mask for pre 1.2 devices.
1447 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1450 if (lmp_bredr_capable(hdev)) {
1451 events[4] |= 0x01; /* Flow Specification Complete */
1452 events[4] |= 0x02; /* Inquiry Result with RSSI */
1453 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1454 events[5] |= 0x08; /* Synchronous Connection Complete */
1455 events[5] |= 0x10; /* Synchronous Connection Changed */
1457 /* Use a different default for LE-only devices */
1458 memset(events, 0, sizeof(events));
1459 events[0] |= 0x10; /* Disconnection Complete */
1460 events[1] |= 0x08; /* Read Remote Version Information Complete */
1461 events[1] |= 0x20; /* Command Complete */
1462 events[1] |= 0x40; /* Command Status */
1463 events[1] |= 0x80; /* Hardware Error */
1464 events[2] |= 0x04; /* Number of Completed Packets */
1465 events[3] |= 0x02; /* Data Buffer Overflow */
1467 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1468 events[0] |= 0x80; /* Encryption Change */
1469 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1473 if (lmp_inq_rssi_capable(hdev))
1474 events[4] |= 0x02; /* Inquiry Result with RSSI */
1476 if (lmp_sniffsubr_capable(hdev))
1477 events[5] |= 0x20; /* Sniff Subrating */
1479 if (lmp_pause_enc_capable(hdev))
1480 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1482 if (lmp_ext_inq_capable(hdev))
1483 events[5] |= 0x40; /* Extended Inquiry Result */
1485 if (lmp_no_flush_capable(hdev))
1486 events[7] |= 0x01; /* Enhanced Flush Complete */
1488 if (lmp_lsto_capable(hdev))
1489 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1491 if (lmp_ssp_capable(hdev)) {
1492 events[6] |= 0x01; /* IO Capability Request */
1493 events[6] |= 0x02; /* IO Capability Response */
1494 events[6] |= 0x04; /* User Confirmation Request */
1495 events[6] |= 0x08; /* User Passkey Request */
1496 events[6] |= 0x10; /* Remote OOB Data Request */
1497 events[6] |= 0x20; /* Simple Pairing Complete */
1498 events[7] |= 0x04; /* User Passkey Notification */
1499 events[7] |= 0x08; /* Keypress Notification */
1500 events[7] |= 0x10; /* Remote Host Supported
1501 * Features Notification
1505 if (lmp_le_capable(hdev))
1506 events[7] |= 0x20; /* LE Meta-Event */
1508 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1511 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1513 struct hci_dev *hdev = req->hdev;
1515 if (lmp_bredr_capable(hdev))
1518 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1520 if (lmp_le_capable(hdev))
1523 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1524 * local supported commands HCI command.
1526 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1527 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1529 if (lmp_ssp_capable(hdev)) {
1530 /* When SSP is available, then the host features page
1531 * should also be available as well. However some
1532 * controllers list the max_page as 0 as long as SSP
1533 * has not been enabled. To achieve proper debugging
1534 * output, force the minimum max_page to 1 at least.
1536 hdev->max_page = 0x01;
1538 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1540 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1541 sizeof(mode), &mode);
1543 struct hci_cp_write_eir cp;
1545 memset(hdev->eir, 0, sizeof(hdev->eir));
1546 memset(&cp, 0, sizeof(cp));
1548 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1552 if (lmp_inq_rssi_capable(hdev))
1553 hci_setup_inquiry_mode(req);
1555 if (lmp_inq_tx_pwr_capable(hdev))
1556 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1558 if (lmp_ext_feat_capable(hdev)) {
1559 struct hci_cp_read_local_ext_features cp;
1562 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1566 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1568 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1573 static void hci_setup_link_policy(struct hci_request *req)
1575 struct hci_dev *hdev = req->hdev;
1576 struct hci_cp_write_def_link_policy cp;
1577 u16 link_policy = 0;
1579 if (lmp_rswitch_capable(hdev))
1580 link_policy |= HCI_LP_RSWITCH;
1581 if (lmp_hold_capable(hdev))
1582 link_policy |= HCI_LP_HOLD;
1583 if (lmp_sniff_capable(hdev))
1584 link_policy |= HCI_LP_SNIFF;
1585 if (lmp_park_capable(hdev))
1586 link_policy |= HCI_LP_PARK;
1588 cp.policy = cpu_to_le16(link_policy);
1589 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1592 static void hci_set_le_support(struct hci_request *req)
1594 struct hci_dev *hdev = req->hdev;
1595 struct hci_cp_write_le_host_supported cp;
1597 /* LE-only devices do not support explicit enablement */
1598 if (!lmp_bredr_capable(hdev))
1601 memset(&cp, 0, sizeof(cp));
1603 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1608 if (cp.le != lmp_host_le_capable(hdev))
1609 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1613 static void hci_set_event_mask_page_2(struct hci_request *req)
1615 struct hci_dev *hdev = req->hdev;
1616 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1618 /* If Connectionless Slave Broadcast master role is supported
1619 * enable all necessary events for it.
1621 if (lmp_csb_master_capable(hdev)) {
1622 events[1] |= 0x40; /* Triggered Clock Capture */
1623 events[1] |= 0x80; /* Synchronization Train Complete */
1624 events[2] |= 0x10; /* Slave Page Response Timeout */
1625 events[2] |= 0x20; /* CSB Channel Map Change */
1628 /* If Connectionless Slave Broadcast slave role is supported
1629 * enable all necessary events for it.
1631 if (lmp_csb_slave_capable(hdev)) {
1632 events[2] |= 0x01; /* Synchronization Train Received */
1633 events[2] |= 0x02; /* CSB Receive */
1634 events[2] |= 0x04; /* CSB Timeout */
1635 events[2] |= 0x08; /* Truncated Page Complete */
1638 /* Enable Authenticated Payload Timeout Expired event if supported */
1639 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1642 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1645 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1647 struct hci_dev *hdev = req->hdev;
1650 hci_setup_event_mask(req);
1652 /* Some Broadcom based Bluetooth controllers do not support the
1653 * Delete Stored Link Key command. They are clearly indicating its
1654 * absence in the bit mask of supported commands.
1656 * Check the supported commands and only if the the command is marked
1657 * as supported send it. If not supported assume that the controller
1658 * does not have actual support for stored link keys which makes this
1659 * command redundant anyway.
1661 * Some controllers indicate that they support handling deleting
1662 * stored link keys, but they don't. The quirk lets a driver
1663 * just disable this command.
1665 if (hdev->commands[6] & 0x80 &&
1666 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1667 struct hci_cp_delete_stored_link_key cp;
1669 bacpy(&cp.bdaddr, BDADDR_ANY);
1670 cp.delete_all = 0x01;
1671 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1675 if (hdev->commands[5] & 0x10)
1676 hci_setup_link_policy(req);
1678 if (lmp_le_capable(hdev)) {
1681 memset(events, 0, sizeof(events));
1684 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1685 events[0] |= 0x10; /* LE Long Term Key Request */
1687 /* If controller supports the Connection Parameters Request
1688 * Link Layer Procedure, enable the corresponding event.
1690 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1691 events[0] |= 0x20; /* LE Remote Connection
1695 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1698 if (hdev->commands[25] & 0x40) {
1699 /* Read LE Advertising Channel TX Power */
1700 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1703 hci_set_le_support(req);
1706 /* Read features beyond page 1 if available */
1707 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1708 struct hci_cp_read_local_ext_features cp;
1711 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1716 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1718 struct hci_dev *hdev = req->hdev;
1720 /* Set event mask page 2 if the HCI command for it is supported */
1721 if (hdev->commands[22] & 0x04)
1722 hci_set_event_mask_page_2(req);
1724 /* Read local codec list if the HCI command is supported */
1725 if (hdev->commands[29] & 0x20)
1726 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1728 /* Get MWS transport configuration if the HCI command is supported */
1729 if (hdev->commands[30] & 0x08)
1730 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1732 /* Check for Synchronization Train support */
1733 if (lmp_sync_train_capable(hdev))
1734 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1736 /* Enable Secure Connections if supported and configured */
1737 if ((lmp_sc_capable(hdev) ||
1738 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1739 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1741 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1742 sizeof(support), &support);
1746 static int __hci_init(struct hci_dev *hdev)
1750 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1754 /* The Device Under Test (DUT) mode is special and available for
1755 * all controller types. So just create it early on.
1757 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1758 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1762 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1763 * BR/EDR/LE type controllers. AMP controllers only need the
1766 if (hdev->dev_type != HCI_BREDR)
1769 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1773 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1777 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1781 /* Only create debugfs entries during the initial setup
1782 * phase and not every time the controller gets powered on.
1784 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1787 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1789 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1790 &hdev->manufacturer);
1791 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1792 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1793 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1795 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1797 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1799 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1800 &conn_info_min_age_fops);
1801 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1802 &conn_info_max_age_fops);
1804 if (lmp_bredr_capable(hdev)) {
1805 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1806 hdev, &inquiry_cache_fops);
1807 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1808 hdev, &link_keys_fops);
1809 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1810 hdev, &dev_class_fops);
1811 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1812 hdev, &voice_setting_fops);
1815 if (lmp_ssp_capable(hdev)) {
1816 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1817 hdev, &auto_accept_delay_fops);
1818 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1819 hdev, &force_sc_support_fops);
1820 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1821 hdev, &sc_only_mode_fops);
1824 if (lmp_sniff_capable(hdev)) {
1825 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1826 hdev, &idle_timeout_fops);
1827 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1828 hdev, &sniff_min_interval_fops);
1829 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1830 hdev, &sniff_max_interval_fops);
1833 if (lmp_le_capable(hdev)) {
1834 debugfs_create_file("identity", 0400, hdev->debugfs,
1835 hdev, &identity_fops);
1836 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1837 hdev, &rpa_timeout_fops);
1838 debugfs_create_file("random_address", 0444, hdev->debugfs,
1839 hdev, &random_address_fops);
1840 debugfs_create_file("static_address", 0444, hdev->debugfs,
1841 hdev, &static_address_fops);
1843 /* For controllers with a public address, provide a debug
1844 * option to force the usage of the configured static
1845 * address. By default the public address is used.
1847 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1848 debugfs_create_file("force_static_address", 0644,
1849 hdev->debugfs, hdev,
1850 &force_static_address_fops);
1852 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1853 &hdev->le_white_list_size);
1854 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1856 debugfs_create_file("identity_resolving_keys", 0400,
1857 hdev->debugfs, hdev,
1858 &identity_resolving_keys_fops);
1859 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1860 hdev, &long_term_keys_fops);
1861 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1862 hdev, &conn_min_interval_fops);
1863 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1864 hdev, &conn_max_interval_fops);
1865 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1866 hdev, &conn_latency_fops);
1867 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1868 hdev, &supervision_timeout_fops);
1869 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1870 hdev, &adv_channel_map_fops);
1871 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1872 hdev, &adv_min_interval_fops);
1873 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1874 hdev, &adv_max_interval_fops);
1875 debugfs_create_u16("discov_interleaved_timeout", 0644,
1877 &hdev->discov_interleaved_timeout);
1885 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1887 struct hci_dev *hdev = req->hdev;
1889 BT_DBG("%s %ld", hdev->name, opt);
1892 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1893 hci_reset_req(req, 0);
1895 /* Read Local Version */
1896 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1898 /* Read BD Address */
1899 if (hdev->set_bdaddr)
1900 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1903 static int __hci_unconf_init(struct hci_dev *hdev)
1907 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1910 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1917 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1921 BT_DBG("%s %x", req->hdev->name, scan);
1923 /* Inquiry and Page scans */
1924 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1927 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1931 BT_DBG("%s %x", req->hdev->name, auth);
1933 /* Authentication */
1934 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1937 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1941 BT_DBG("%s %x", req->hdev->name, encrypt);
1944 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1947 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1949 __le16 policy = cpu_to_le16(opt);
1951 BT_DBG("%s %x", req->hdev->name, policy);
1953 /* Default link policy */
1954 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1957 /* Get HCI device by index.
1958 * Device is held on return. */
1959 struct hci_dev *hci_dev_get(int index)
1961 struct hci_dev *hdev = NULL, *d;
1963 BT_DBG("%d", index);
1968 read_lock(&hci_dev_list_lock);
1969 list_for_each_entry(d, &hci_dev_list, list) {
1970 if (d->id == index) {
1971 hdev = hci_dev_hold(d);
1975 read_unlock(&hci_dev_list_lock);
1979 /* ---- Inquiry support ---- */
1981 bool hci_discovery_active(struct hci_dev *hdev)
1983 struct discovery_state *discov = &hdev->discovery;
1985 switch (discov->state) {
1986 case DISCOVERY_FINDING:
1987 case DISCOVERY_RESOLVING:
1995 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1997 int old_state = hdev->discovery.state;
1999 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2001 if (old_state == state)
2004 hdev->discovery.state = state;
2007 case DISCOVERY_STOPPED:
2008 hci_update_background_scan(hdev);
2010 if (old_state != DISCOVERY_STARTING)
2011 mgmt_discovering(hdev, 0);
2013 case DISCOVERY_STARTING:
2015 case DISCOVERY_FINDING:
2016 mgmt_discovering(hdev, 1);
2018 case DISCOVERY_RESOLVING:
2020 case DISCOVERY_STOPPING:
2025 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2027 struct discovery_state *cache = &hdev->discovery;
2028 struct inquiry_entry *p, *n;
2030 list_for_each_entry_safe(p, n, &cache->all, all) {
2035 INIT_LIST_HEAD(&cache->unknown);
2036 INIT_LIST_HEAD(&cache->resolve);
2039 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2042 struct discovery_state *cache = &hdev->discovery;
2043 struct inquiry_entry *e;
2045 BT_DBG("cache %p, %pMR", cache, bdaddr);
2047 list_for_each_entry(e, &cache->all, all) {
2048 if (!bacmp(&e->data.bdaddr, bdaddr))
2055 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2058 struct discovery_state *cache = &hdev->discovery;
2059 struct inquiry_entry *e;
2061 BT_DBG("cache %p, %pMR", cache, bdaddr);
2063 list_for_each_entry(e, &cache->unknown, list) {
2064 if (!bacmp(&e->data.bdaddr, bdaddr))
2071 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2075 struct discovery_state *cache = &hdev->discovery;
2076 struct inquiry_entry *e;
2078 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2080 list_for_each_entry(e, &cache->resolve, list) {
2081 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2083 if (!bacmp(&e->data.bdaddr, bdaddr))
2090 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2091 struct inquiry_entry *ie)
2093 struct discovery_state *cache = &hdev->discovery;
2094 struct list_head *pos = &cache->resolve;
2095 struct inquiry_entry *p;
2097 list_del(&ie->list);
2099 list_for_each_entry(p, &cache->resolve, list) {
2100 if (p->name_state != NAME_PENDING &&
2101 abs(p->data.rssi) >= abs(ie->data.rssi))
2106 list_add(&ie->list, pos);
2109 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2112 struct discovery_state *cache = &hdev->discovery;
2113 struct inquiry_entry *ie;
2116 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2118 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2120 if (!data->ssp_mode)
2121 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2123 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2125 if (!ie->data.ssp_mode)
2126 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2128 if (ie->name_state == NAME_NEEDED &&
2129 data->rssi != ie->data.rssi) {
2130 ie->data.rssi = data->rssi;
2131 hci_inquiry_cache_update_resolve(hdev, ie);
2137 /* Entry not in the cache. Add new one. */
2138 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2140 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2144 list_add(&ie->all, &cache->all);
2147 ie->name_state = NAME_KNOWN;
2149 ie->name_state = NAME_NOT_KNOWN;
2150 list_add(&ie->list, &cache->unknown);
2154 if (name_known && ie->name_state != NAME_KNOWN &&
2155 ie->name_state != NAME_PENDING) {
2156 ie->name_state = NAME_KNOWN;
2157 list_del(&ie->list);
2160 memcpy(&ie->data, data, sizeof(*data));
2161 ie->timestamp = jiffies;
2162 cache->timestamp = jiffies;
2164 if (ie->name_state == NAME_NOT_KNOWN)
2165 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2171 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2173 struct discovery_state *cache = &hdev->discovery;
2174 struct inquiry_info *info = (struct inquiry_info *) buf;
2175 struct inquiry_entry *e;
2178 list_for_each_entry(e, &cache->all, all) {
2179 struct inquiry_data *data = &e->data;
2184 bacpy(&info->bdaddr, &data->bdaddr);
2185 info->pscan_rep_mode = data->pscan_rep_mode;
2186 info->pscan_period_mode = data->pscan_period_mode;
2187 info->pscan_mode = data->pscan_mode;
2188 memcpy(info->dev_class, data->dev_class, 3);
2189 info->clock_offset = data->clock_offset;
2195 BT_DBG("cache %p, copied %d", cache, copied);
2199 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2201 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2202 struct hci_dev *hdev = req->hdev;
2203 struct hci_cp_inquiry cp;
2205 BT_DBG("%s", hdev->name);
2207 if (test_bit(HCI_INQUIRY, &hdev->flags))
2211 memcpy(&cp.lap, &ir->lap, 3);
2212 cp.length = ir->length;
2213 cp.num_rsp = ir->num_rsp;
2214 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2217 int hci_inquiry(void __user *arg)
2219 __u8 __user *ptr = arg;
2220 struct hci_inquiry_req ir;
2221 struct hci_dev *hdev;
2222 int err = 0, do_inquiry = 0, max_rsp;
2226 if (copy_from_user(&ir, ptr, sizeof(ir)))
2229 hdev = hci_dev_get(ir.dev_id);
2233 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2238 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2243 if (hdev->dev_type != HCI_BREDR) {
2248 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2254 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2255 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2256 hci_inquiry_cache_flush(hdev);
2259 hci_dev_unlock(hdev);
2261 timeo = ir.length * msecs_to_jiffies(2000);
2264 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2269 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2270 * cleared). If it is interrupted by a signal, return -EINTR.
2272 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2273 TASK_INTERRUPTIBLE))
2277 /* for unlimited number of responses we will use buffer with
2280 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2282 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2283 * copy it to the user space.
2285 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2292 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2293 hci_dev_unlock(hdev);
2295 BT_DBG("num_rsp %d", ir.num_rsp);
2297 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2299 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2312 static int hci_dev_do_open(struct hci_dev *hdev)
2316 BT_DBG("%s %p", hdev->name, hdev);
2320 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2325 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2326 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2327 /* Check for rfkill but allow the HCI setup stage to
2328 * proceed (which in itself doesn't cause any RF activity).
2330 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2335 /* Check for valid public address or a configured static
2336 * random adddress, but let the HCI setup proceed to
2337 * be able to determine if there is a public address
2340 * In case of user channel usage, it is not important
2341 * if a public address or static random address is
2344 * This check is only valid for BR/EDR controllers
2345 * since AMP controllers do not have an address.
2347 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2348 hdev->dev_type == HCI_BREDR &&
2349 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2350 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2351 ret = -EADDRNOTAVAIL;
2356 if (test_bit(HCI_UP, &hdev->flags)) {
2361 if (hdev->open(hdev)) {
2366 atomic_set(&hdev->cmd_cnt, 1);
2367 set_bit(HCI_INIT, &hdev->flags);
2369 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2371 ret = hdev->setup(hdev);
2373 /* The transport driver can set these quirks before
2374 * creating the HCI device or in its setup callback.
2376 * In case any of them is set, the controller has to
2377 * start up as unconfigured.
2379 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2380 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2381 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2383 /* For an unconfigured controller it is required to
2384 * read at least the version information provided by
2385 * the Read Local Version Information command.
2387 * If the set_bdaddr driver callback is provided, then
2388 * also the original Bluetooth public device address
2389 * will be read using the Read BD Address command.
2391 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2392 ret = __hci_unconf_init(hdev);
2395 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2396 /* If public address change is configured, ensure that
2397 * the address gets programmed. If the driver does not
2398 * support changing the public address, fail the power
2401 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2403 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2405 ret = -EADDRNOTAVAIL;
2409 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2410 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2411 ret = __hci_init(hdev);
2414 clear_bit(HCI_INIT, &hdev->flags);
2418 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2419 set_bit(HCI_UP, &hdev->flags);
2420 hci_notify(hdev, HCI_DEV_UP);
2421 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2422 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2423 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2424 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2425 hdev->dev_type == HCI_BREDR) {
2427 mgmt_powered(hdev, 1);
2428 hci_dev_unlock(hdev);
2431 /* Init failed, cleanup */
2432 flush_work(&hdev->tx_work);
2433 flush_work(&hdev->cmd_work);
2434 flush_work(&hdev->rx_work);
2436 skb_queue_purge(&hdev->cmd_q);
2437 skb_queue_purge(&hdev->rx_q);
2442 if (hdev->sent_cmd) {
2443 kfree_skb(hdev->sent_cmd);
2444 hdev->sent_cmd = NULL;
2448 hdev->flags &= BIT(HCI_RAW);
2452 hci_req_unlock(hdev);
2456 /* ---- HCI ioctl helpers ---- */
2458 int hci_dev_open(__u16 dev)
2460 struct hci_dev *hdev;
2463 hdev = hci_dev_get(dev);
2467 /* Devices that are marked as unconfigured can only be powered
2468 * up as user channel. Trying to bring them up as normal devices
2469 * will result into a failure. Only user channel operation is
2472 * When this function is called for a user channel, the flag
2473 * HCI_USER_CHANNEL will be set first before attempting to
2476 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2477 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2482 /* We need to ensure that no other power on/off work is pending
2483 * before proceeding to call hci_dev_do_open. This is
2484 * particularly important if the setup procedure has not yet
2487 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2488 cancel_delayed_work(&hdev->power_off);
2490 /* After this call it is guaranteed that the setup procedure
2491 * has finished. This means that error conditions like RFKILL
2492 * or no valid public or static random address apply.
2494 flush_workqueue(hdev->req_workqueue);
2496 /* For controllers not using the management interface and that
2497 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2498 * so that pairing works for them. Once the management interface
2499 * is in use this bit will be cleared again and userspace has
2500 * to explicitly enable it.
2502 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2503 !test_bit(HCI_MGMT, &hdev->dev_flags))
2504 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2506 err = hci_dev_do_open(hdev);
2513 /* This function requires the caller holds hdev->lock */
2514 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2516 struct hci_conn_params *p;
2518 list_for_each_entry(p, &hdev->le_conn_params, list) {
2520 hci_conn_drop(p->conn);
2521 hci_conn_put(p->conn);
2524 list_del_init(&p->action);
2527 BT_DBG("All LE pending actions cleared");
2530 static int hci_dev_do_close(struct hci_dev *hdev)
2532 BT_DBG("%s %p", hdev->name, hdev);
2534 cancel_delayed_work(&hdev->power_off);
2536 hci_req_cancel(hdev, ENODEV);
2539 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2540 cancel_delayed_work_sync(&hdev->cmd_timer);
2541 hci_req_unlock(hdev);
2545 /* Flush RX and TX works */
2546 flush_work(&hdev->tx_work);
2547 flush_work(&hdev->rx_work);
2549 if (hdev->discov_timeout > 0) {
2550 cancel_delayed_work(&hdev->discov_off);
2551 hdev->discov_timeout = 0;
2552 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2553 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2556 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2557 cancel_delayed_work(&hdev->service_cache);
2559 cancel_delayed_work_sync(&hdev->le_scan_disable);
2561 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2562 cancel_delayed_work_sync(&hdev->rpa_expired);
2564 /* Avoid potential lockdep warnings from the *_flush() calls by
2565 * ensuring the workqueue is empty up front.
2567 drain_workqueue(hdev->workqueue);
2570 hci_inquiry_cache_flush(hdev);
2571 hci_pend_le_actions_clear(hdev);
2572 hci_conn_hash_flush(hdev);
2573 hci_dev_unlock(hdev);
2575 hci_notify(hdev, HCI_DEV_DOWN);
2581 skb_queue_purge(&hdev->cmd_q);
2582 atomic_set(&hdev->cmd_cnt, 1);
2583 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2584 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2585 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2586 set_bit(HCI_INIT, &hdev->flags);
2587 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2588 clear_bit(HCI_INIT, &hdev->flags);
2591 /* flush cmd work */
2592 flush_work(&hdev->cmd_work);
2595 skb_queue_purge(&hdev->rx_q);
2596 skb_queue_purge(&hdev->cmd_q);
2597 skb_queue_purge(&hdev->raw_q);
2599 /* Drop last sent command */
2600 if (hdev->sent_cmd) {
2601 cancel_delayed_work_sync(&hdev->cmd_timer);
2602 kfree_skb(hdev->sent_cmd);
2603 hdev->sent_cmd = NULL;
2606 kfree_skb(hdev->recv_evt);
2607 hdev->recv_evt = NULL;
2609 /* After this point our queues are empty
2610 * and no tasks are scheduled. */
2614 hdev->flags &= BIT(HCI_RAW);
2615 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2617 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2618 if (hdev->dev_type == HCI_BREDR) {
2620 mgmt_powered(hdev, 0);
2621 hci_dev_unlock(hdev);
2625 /* Controller radio is available but is currently powered down */
2626 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2628 memset(hdev->eir, 0, sizeof(hdev->eir));
2629 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2630 bacpy(&hdev->random_addr, BDADDR_ANY);
2632 hci_req_unlock(hdev);
2638 int hci_dev_close(__u16 dev)
2640 struct hci_dev *hdev;
2643 hdev = hci_dev_get(dev);
2647 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2652 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2653 cancel_delayed_work(&hdev->power_off);
2655 err = hci_dev_do_close(hdev);
2662 int hci_dev_reset(__u16 dev)
2664 struct hci_dev *hdev;
2667 hdev = hci_dev_get(dev);
2673 if (!test_bit(HCI_UP, &hdev->flags)) {
2678 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2683 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2689 skb_queue_purge(&hdev->rx_q);
2690 skb_queue_purge(&hdev->cmd_q);
2692 /* Avoid potential lockdep warnings from the *_flush() calls by
2693 * ensuring the workqueue is empty up front.
2695 drain_workqueue(hdev->workqueue);
2698 hci_inquiry_cache_flush(hdev);
2699 hci_conn_hash_flush(hdev);
2700 hci_dev_unlock(hdev);
2705 atomic_set(&hdev->cmd_cnt, 1);
2706 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2708 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2711 hci_req_unlock(hdev);
2716 int hci_dev_reset_stat(__u16 dev)
2718 struct hci_dev *hdev;
2721 hdev = hci_dev_get(dev);
2725 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2730 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2735 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2742 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2744 bool conn_changed, discov_changed;
2746 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2748 if ((scan & SCAN_PAGE))
2749 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2752 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2755 if ((scan & SCAN_INQUIRY)) {
2756 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2759 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2760 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2764 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2767 if (conn_changed || discov_changed) {
2768 /* In case this was disabled through mgmt */
2769 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2771 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2772 mgmt_update_adv_data(hdev);
2774 mgmt_new_settings(hdev);
2778 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2780 struct hci_dev *hdev;
2781 struct hci_dev_req dr;
2784 if (copy_from_user(&dr, arg, sizeof(dr)))
2787 hdev = hci_dev_get(dr.dev_id);
2791 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2796 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2801 if (hdev->dev_type != HCI_BREDR) {
2806 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2813 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2818 if (!lmp_encrypt_capable(hdev)) {
2823 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2824 /* Auth must be enabled first */
2825 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2831 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2836 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2839 /* Ensure that the connectable and discoverable states
2840 * get correctly modified as this was a non-mgmt change.
2843 hci_update_scan_state(hdev, dr.dev_opt);
2847 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2851 case HCISETLINKMODE:
2852 hdev->link_mode = ((__u16) dr.dev_opt) &
2853 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2857 hdev->pkt_type = (__u16) dr.dev_opt;
2861 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2862 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2866 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2867 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2880 int hci_get_dev_list(void __user *arg)
2882 struct hci_dev *hdev;
2883 struct hci_dev_list_req *dl;
2884 struct hci_dev_req *dr;
2885 int n = 0, size, err;
2888 if (get_user(dev_num, (__u16 __user *) arg))
2891 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2894 size = sizeof(*dl) + dev_num * sizeof(*dr);
2896 dl = kzalloc(size, GFP_KERNEL);
2902 read_lock(&hci_dev_list_lock);
2903 list_for_each_entry(hdev, &hci_dev_list, list) {
2904 unsigned long flags = hdev->flags;
2906 /* When the auto-off is configured it means the transport
2907 * is running, but in that case still indicate that the
2908 * device is actually down.
2910 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2911 flags &= ~BIT(HCI_UP);
2913 (dr + n)->dev_id = hdev->id;
2914 (dr + n)->dev_opt = flags;
2919 read_unlock(&hci_dev_list_lock);
2922 size = sizeof(*dl) + n * sizeof(*dr);
2924 err = copy_to_user(arg, dl, size);
2927 return err ? -EFAULT : 0;
2930 int hci_get_dev_info(void __user *arg)
2932 struct hci_dev *hdev;
2933 struct hci_dev_info di;
2934 unsigned long flags;
2937 if (copy_from_user(&di, arg, sizeof(di)))
2940 hdev = hci_dev_get(di.dev_id);
2944 /* When the auto-off is configured it means the transport
2945 * is running, but in that case still indicate that the
2946 * device is actually down.
2948 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2949 flags = hdev->flags & ~BIT(HCI_UP);
2951 flags = hdev->flags;
2953 strcpy(di.name, hdev->name);
2954 di.bdaddr = hdev->bdaddr;
2955 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2957 di.pkt_type = hdev->pkt_type;
2958 if (lmp_bredr_capable(hdev)) {
2959 di.acl_mtu = hdev->acl_mtu;
2960 di.acl_pkts = hdev->acl_pkts;
2961 di.sco_mtu = hdev->sco_mtu;
2962 di.sco_pkts = hdev->sco_pkts;
2964 di.acl_mtu = hdev->le_mtu;
2965 di.acl_pkts = hdev->le_pkts;
2969 di.link_policy = hdev->link_policy;
2970 di.link_mode = hdev->link_mode;
2972 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2973 memcpy(&di.features, &hdev->features, sizeof(di.features));
2975 if (copy_to_user(arg, &di, sizeof(di)))
2983 /* ---- Interface to HCI drivers ---- */
2985 static int hci_rfkill_set_block(void *data, bool blocked)
2987 struct hci_dev *hdev = data;
2989 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2991 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2995 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2996 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2997 !test_bit(HCI_CONFIG, &hdev->dev_flags))
2998 hci_dev_do_close(hdev);
3000 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3006 static const struct rfkill_ops hci_rfkill_ops = {
3007 .set_block = hci_rfkill_set_block,
3010 static void hci_power_on(struct work_struct *work)
3012 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3015 BT_DBG("%s", hdev->name);
3017 err = hci_dev_do_open(hdev);
3019 mgmt_set_powered_failed(hdev, err);
3023 /* During the HCI setup phase, a few error conditions are
3024 * ignored and they need to be checked now. If they are still
3025 * valid, it is important to turn the device back off.
3027 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3028 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3029 (hdev->dev_type == HCI_BREDR &&
3030 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3031 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3032 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3033 hci_dev_do_close(hdev);
3034 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3035 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3036 HCI_AUTO_OFF_TIMEOUT);
3039 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3040 /* For unconfigured devices, set the HCI_RAW flag
3041 * so that userspace can easily identify them.
3043 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3044 set_bit(HCI_RAW, &hdev->flags);
3046 /* For fully configured devices, this will send
3047 * the Index Added event. For unconfigured devices,
3048 * it will send Unconfigued Index Added event.
3050 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3051 * and no event will be send.
3053 mgmt_index_added(hdev);
3054 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3055 /* When the controller is now configured, then it
3056 * is important to clear the HCI_RAW flag.
3058 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3059 clear_bit(HCI_RAW, &hdev->flags);
3061 /* Powering on the controller with HCI_CONFIG set only
3062 * happens with the transition from unconfigured to
3063 * configured. This will send the Index Added event.
3065 mgmt_index_added(hdev);
3069 static void hci_power_off(struct work_struct *work)
3071 struct hci_dev *hdev = container_of(work, struct hci_dev,
3074 BT_DBG("%s", hdev->name);
3076 hci_dev_do_close(hdev);
3079 static void hci_discov_off(struct work_struct *work)
3081 struct hci_dev *hdev;
3083 hdev = container_of(work, struct hci_dev, discov_off.work);
3085 BT_DBG("%s", hdev->name);
3087 mgmt_discoverable_timeout(hdev);
3090 void hci_uuids_clear(struct hci_dev *hdev)
3092 struct bt_uuid *uuid, *tmp;
3094 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3095 list_del(&uuid->list);
3100 void hci_link_keys_clear(struct hci_dev *hdev)
3102 struct list_head *p, *n;
3104 list_for_each_safe(p, n, &hdev->link_keys) {
3105 struct link_key *key;
3107 key = list_entry(p, struct link_key, list);
3114 void hci_smp_ltks_clear(struct hci_dev *hdev)
3118 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3119 list_del_rcu(&k->list);
3124 void hci_smp_irks_clear(struct hci_dev *hdev)
3128 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3129 list_del_rcu(&k->list);
3134 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3138 list_for_each_entry(k, &hdev->link_keys, list)
3139 if (bacmp(bdaddr, &k->bdaddr) == 0)
3145 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3146 u8 key_type, u8 old_key_type)
3149 if (key_type < 0x03)
3152 /* Debug keys are insecure so don't store them persistently */
3153 if (key_type == HCI_LK_DEBUG_COMBINATION)
3156 /* Changed combination key and there's no previous one */
3157 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3160 /* Security mode 3 case */
3164 /* Neither local nor remote side had no-bonding as requirement */
3165 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3168 /* Local side had dedicated bonding as requirement */
3169 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3172 /* Remote side had dedicated bonding as requirement */
3173 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3176 /* If none of the above criteria match, then don't store the key
3181 static u8 ltk_role(u8 type)
3183 if (type == SMP_LTK)
3184 return HCI_ROLE_MASTER;
3186 return HCI_ROLE_SLAVE;
3189 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3195 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3196 if (k->ediv != ediv || k->rand != rand)
3199 if (ltk_role(k->type) != role)
3210 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3211 u8 addr_type, u8 role)
3216 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3217 if (addr_type == k->bdaddr_type &&
3218 bacmp(bdaddr, &k->bdaddr) == 0 &&
3219 ltk_role(k->type) == role) {
3229 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3231 struct smp_irk *irk;
3234 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3235 if (!bacmp(&irk->rpa, rpa)) {
3241 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3242 if (smp_irk_matches(hdev, irk->val, rpa)) {
3243 bacpy(&irk->rpa, rpa);
3253 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3256 struct smp_irk *irk;
3258 /* Identity Address must be public or static random */
3259 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3263 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3264 if (addr_type == irk->addr_type &&
3265 bacmp(bdaddr, &irk->bdaddr) == 0) {
3275 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3276 bdaddr_t *bdaddr, u8 *val, u8 type,
3277 u8 pin_len, bool *persistent)
3279 struct link_key *key, *old_key;
3282 old_key = hci_find_link_key(hdev, bdaddr);
3284 old_key_type = old_key->type;
3287 old_key_type = conn ? conn->key_type : 0xff;
3288 key = kzalloc(sizeof(*key), GFP_KERNEL);
3291 list_add(&key->list, &hdev->link_keys);
3294 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3296 /* Some buggy controller combinations generate a changed
3297 * combination key for legacy pairing even when there's no
3299 if (type == HCI_LK_CHANGED_COMBINATION &&
3300 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3301 type = HCI_LK_COMBINATION;
3303 conn->key_type = type;
3306 bacpy(&key->bdaddr, bdaddr);
3307 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3308 key->pin_len = pin_len;
3310 if (type == HCI_LK_CHANGED_COMBINATION)
3311 key->type = old_key_type;
3316 *persistent = hci_persistent_key(hdev, conn, type,
3322 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3323 u8 addr_type, u8 type, u8 authenticated,
3324 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3326 struct smp_ltk *key, *old_key;
3327 u8 role = ltk_role(type);
3329 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3333 key = kzalloc(sizeof(*key), GFP_KERNEL);
3336 list_add_rcu(&key->list, &hdev->long_term_keys);
3339 bacpy(&key->bdaddr, bdaddr);
3340 key->bdaddr_type = addr_type;
3341 memcpy(key->val, tk, sizeof(key->val));
3342 key->authenticated = authenticated;
3345 key->enc_size = enc_size;
3351 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3352 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3354 struct smp_irk *irk;
3356 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3358 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3362 bacpy(&irk->bdaddr, bdaddr);
3363 irk->addr_type = addr_type;
3365 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
3368 memcpy(irk->val, val, 16);
3369 bacpy(&irk->rpa, rpa);
3374 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3376 struct link_key *key;
3378 key = hci_find_link_key(hdev, bdaddr);
3382 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3384 list_del(&key->list);
3390 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3395 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3396 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3399 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3401 list_del_rcu(&k->list);
3406 return removed ? 0 : -ENOENT;
3409 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3413 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3414 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3417 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3419 list_del_rcu(&k->list);
3424 /* HCI command timer function */
3425 static void hci_cmd_timeout(struct work_struct *work)
3427 struct hci_dev *hdev = container_of(work, struct hci_dev,
3430 if (hdev->sent_cmd) {
3431 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3432 u16 opcode = __le16_to_cpu(sent->opcode);
3434 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3436 BT_ERR("%s command tx timeout", hdev->name);
3439 atomic_set(&hdev->cmd_cnt, 1);
3440 queue_work(hdev->workqueue, &hdev->cmd_work);
3443 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3446 struct oob_data *data;
3448 list_for_each_entry(data, &hdev->remote_oob_data, list)
3449 if (bacmp(bdaddr, &data->bdaddr) == 0)
3455 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3457 struct oob_data *data;
3459 data = hci_find_remote_oob_data(hdev, bdaddr);
3463 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3465 list_del(&data->list);
3471 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3473 struct oob_data *data, *n;
3475 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3476 list_del(&data->list);
3481 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3484 struct oob_data *data;
3486 data = hci_find_remote_oob_data(hdev, bdaddr);
3488 data = kmalloc(sizeof(*data), GFP_KERNEL);
3492 bacpy(&data->bdaddr, bdaddr);
3493 list_add(&data->list, &hdev->remote_oob_data);
3496 memcpy(data->hash192, hash, sizeof(data->hash192));
3497 memcpy(data->rand192, rand, sizeof(data->rand192));
3499 memset(data->hash256, 0, sizeof(data->hash256));
3500 memset(data->rand256, 0, sizeof(data->rand256));
3502 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3507 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3508 u8 *hash192, u8 *rand192,
3509 u8 *hash256, u8 *rand256)
3511 struct oob_data *data;
3513 data = hci_find_remote_oob_data(hdev, bdaddr);
3515 data = kmalloc(sizeof(*data), GFP_KERNEL);
3519 bacpy(&data->bdaddr, bdaddr);
3520 list_add(&data->list, &hdev->remote_oob_data);
3523 memcpy(data->hash192, hash192, sizeof(data->hash192));
3524 memcpy(data->rand192, rand192, sizeof(data->rand192));
3526 memcpy(data->hash256, hash256, sizeof(data->hash256));
3527 memcpy(data->rand256, rand256, sizeof(data->rand256));
3529 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3534 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3535 bdaddr_t *bdaddr, u8 type)
3537 struct bdaddr_list *b;
3539 list_for_each_entry(b, bdaddr_list, list) {
3540 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3547 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3549 struct list_head *p, *n;
3551 list_for_each_safe(p, n, bdaddr_list) {
3552 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3559 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3561 struct bdaddr_list *entry;
3563 if (!bacmp(bdaddr, BDADDR_ANY))
3566 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3569 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3573 bacpy(&entry->bdaddr, bdaddr);
3574 entry->bdaddr_type = type;
3576 list_add(&entry->list, list);
3581 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3583 struct bdaddr_list *entry;
3585 if (!bacmp(bdaddr, BDADDR_ANY)) {
3586 hci_bdaddr_list_clear(list);
3590 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3594 list_del(&entry->list);
3600 /* This function requires the caller holds hdev->lock */
3601 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3602 bdaddr_t *addr, u8 addr_type)
3604 struct hci_conn_params *params;
3606 /* The conn params list only contains identity addresses */
3607 if (!hci_is_identity_address(addr, addr_type))
3610 list_for_each_entry(params, &hdev->le_conn_params, list) {
3611 if (bacmp(¶ms->addr, addr) == 0 &&
3612 params->addr_type == addr_type) {
3620 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3622 struct hci_conn *conn;
3624 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3628 if (conn->dst_type != type)
3631 if (conn->state != BT_CONNECTED)
3637 /* This function requires the caller holds hdev->lock */
3638 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3639 bdaddr_t *addr, u8 addr_type)
3641 struct hci_conn_params *param;
3643 /* The list only contains identity addresses */
3644 if (!hci_is_identity_address(addr, addr_type))
3647 list_for_each_entry(param, list, action) {
3648 if (bacmp(¶m->addr, addr) == 0 &&
3649 param->addr_type == addr_type)
3656 /* This function requires the caller holds hdev->lock */
3657 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3658 bdaddr_t *addr, u8 addr_type)
3660 struct hci_conn_params *params;
3662 if (!hci_is_identity_address(addr, addr_type))
3665 params = hci_conn_params_lookup(hdev, addr, addr_type);
3669 params = kzalloc(sizeof(*params), GFP_KERNEL);
3671 BT_ERR("Out of memory");
3675 bacpy(¶ms->addr, addr);
3676 params->addr_type = addr_type;
3678 list_add(¶ms->list, &hdev->le_conn_params);
3679 INIT_LIST_HEAD(¶ms->action);
3681 params->conn_min_interval = hdev->le_conn_min_interval;
3682 params->conn_max_interval = hdev->le_conn_max_interval;
3683 params->conn_latency = hdev->le_conn_latency;
3684 params->supervision_timeout = hdev->le_supv_timeout;
3685 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3687 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3692 /* This function requires the caller holds hdev->lock */
3693 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3696 struct hci_conn_params *params;
3698 params = hci_conn_params_add(hdev, addr, addr_type);
3702 if (params->auto_connect == auto_connect)
3705 list_del_init(¶ms->action);
3707 switch (auto_connect) {
3708 case HCI_AUTO_CONN_DISABLED:
3709 case HCI_AUTO_CONN_LINK_LOSS:
3710 hci_update_background_scan(hdev);
3712 case HCI_AUTO_CONN_REPORT:
3713 list_add(¶ms->action, &hdev->pend_le_reports);
3714 hci_update_background_scan(hdev);
3716 case HCI_AUTO_CONN_DIRECT:
3717 case HCI_AUTO_CONN_ALWAYS:
3718 if (!is_connected(hdev, addr, addr_type)) {
3719 list_add(¶ms->action, &hdev->pend_le_conns);
3720 hci_update_background_scan(hdev);
3725 params->auto_connect = auto_connect;
3727 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3733 static void hci_conn_params_free(struct hci_conn_params *params)
3736 hci_conn_drop(params->conn);
3737 hci_conn_put(params->conn);
3740 list_del(¶ms->action);
3741 list_del(¶ms->list);
3745 /* This function requires the caller holds hdev->lock */
3746 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3748 struct hci_conn_params *params;
3750 params = hci_conn_params_lookup(hdev, addr, addr_type);
3754 hci_conn_params_free(params);
3756 hci_update_background_scan(hdev);
3758 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3761 /* This function requires the caller holds hdev->lock */
3762 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3764 struct hci_conn_params *params, *tmp;
3766 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3767 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3769 list_del(¶ms->list);
3773 BT_DBG("All LE disabled connection parameters were removed");
3776 /* This function requires the caller holds hdev->lock */
3777 void hci_conn_params_clear_all(struct hci_dev *hdev)
3779 struct hci_conn_params *params, *tmp;
3781 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3782 hci_conn_params_free(params);
3784 hci_update_background_scan(hdev);
3786 BT_DBG("All LE connection parameters were removed");
3789 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3792 BT_ERR("Failed to start inquiry: status %d", status);
3795 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3796 hci_dev_unlock(hdev);
3801 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3803 /* General inquiry access code (GIAC) */
3804 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3805 struct hci_request req;
3806 struct hci_cp_inquiry cp;
3810 BT_ERR("Failed to disable LE scanning: status %d", status);
3814 switch (hdev->discovery.type) {
3815 case DISCOV_TYPE_LE:
3817 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3818 hci_dev_unlock(hdev);
3821 case DISCOV_TYPE_INTERLEAVED:
3822 hci_req_init(&req, hdev);
3824 memset(&cp, 0, sizeof(cp));
3825 memcpy(&cp.lap, lap, sizeof(cp.lap));
3826 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3827 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3831 hci_inquiry_cache_flush(hdev);
3833 err = hci_req_run(&req, inquiry_complete);
3835 BT_ERR("Inquiry request failed: err %d", err);
3836 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3839 hci_dev_unlock(hdev);
3844 static void le_scan_disable_work(struct work_struct *work)
3846 struct hci_dev *hdev = container_of(work, struct hci_dev,
3847 le_scan_disable.work);
3848 struct hci_request req;
3851 BT_DBG("%s", hdev->name);
3853 hci_req_init(&req, hdev);
3855 hci_req_add_le_scan_disable(&req);
3857 err = hci_req_run(&req, le_scan_disable_work_complete);
3859 BT_ERR("Disable LE scanning request failed: err %d", err);
3862 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3864 struct hci_dev *hdev = req->hdev;
3866 /* If we're advertising or initiating an LE connection we can't
3867 * go ahead and change the random address at this time. This is
3868 * because the eventual initiator address used for the
3869 * subsequently created connection will be undefined (some
3870 * controllers use the new address and others the one we had
3871 * when the operation started).
3873 * In this kind of scenario skip the update and let the random
3874 * address be updated at the next cycle.
3876 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3877 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3878 BT_DBG("Deferring random address update");
3879 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
3883 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3886 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3889 struct hci_dev *hdev = req->hdev;
3892 /* If privacy is enabled use a resolvable private address. If
3893 * current RPA has expired or there is something else than
3894 * the current RPA in use, then generate a new one.
3896 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3899 *own_addr_type = ADDR_LE_DEV_RANDOM;
3901 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3902 !bacmp(&hdev->random_addr, &hdev->rpa))
3905 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
3907 BT_ERR("%s failed to generate new RPA", hdev->name);
3911 set_random_addr(req, &hdev->rpa);
3913 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3914 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3919 /* In case of required privacy without resolvable private address,
3920 * use an unresolvable private address. This is useful for active
3921 * scanning and non-connectable advertising.
3923 if (require_privacy) {
3926 get_random_bytes(&urpa, 6);
3927 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3929 *own_addr_type = ADDR_LE_DEV_RANDOM;
3930 set_random_addr(req, &urpa);
3934 /* If forcing static address is in use or there is no public
3935 * address use the static address as random address (but skip
3936 * the HCI command if the current random address is already the
3939 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3940 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3941 *own_addr_type = ADDR_LE_DEV_RANDOM;
3942 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3943 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3944 &hdev->static_addr);
3948 /* Neither privacy nor static address is being used so use a
3951 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3956 /* Copy the Identity Address of the controller.
3958 * If the controller has a public BD_ADDR, then by default use that one.
3959 * If this is a LE only controller without a public address, default to
3960 * the static random address.
3962 * For debugging purposes it is possible to force controllers with a
3963 * public address to use the static random address instead.
3965 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3968 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3969 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3970 bacpy(bdaddr, &hdev->static_addr);
3971 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3973 bacpy(bdaddr, &hdev->bdaddr);
3974 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3978 /* Alloc HCI device */
3979 struct hci_dev *hci_alloc_dev(void)
3981 struct hci_dev *hdev;
3983 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3987 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3988 hdev->esco_type = (ESCO_HV1);
3989 hdev->link_mode = (HCI_LM_ACCEPT);
3990 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3991 hdev->io_capability = 0x03; /* No Input No Output */
3992 hdev->manufacturer = 0xffff; /* Default to internal use */
3993 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3994 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3996 hdev->sniff_max_interval = 800;
3997 hdev->sniff_min_interval = 80;
3999 hdev->le_adv_channel_map = 0x07;
4000 hdev->le_adv_min_interval = 0x0800;
4001 hdev->le_adv_max_interval = 0x0800;
4002 hdev->le_scan_interval = 0x0060;
4003 hdev->le_scan_window = 0x0030;
4004 hdev->le_conn_min_interval = 0x0028;
4005 hdev->le_conn_max_interval = 0x0038;
4006 hdev->le_conn_latency = 0x0000;
4007 hdev->le_supv_timeout = 0x002a;
4009 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
4010 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
4011 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4012 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
4014 mutex_init(&hdev->lock);
4015 mutex_init(&hdev->req_lock);
4017 INIT_LIST_HEAD(&hdev->mgmt_pending);
4018 INIT_LIST_HEAD(&hdev->blacklist);
4019 INIT_LIST_HEAD(&hdev->whitelist);
4020 INIT_LIST_HEAD(&hdev->uuids);
4021 INIT_LIST_HEAD(&hdev->link_keys);
4022 INIT_LIST_HEAD(&hdev->long_term_keys);
4023 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4024 INIT_LIST_HEAD(&hdev->remote_oob_data);
4025 INIT_LIST_HEAD(&hdev->le_white_list);
4026 INIT_LIST_HEAD(&hdev->le_conn_params);
4027 INIT_LIST_HEAD(&hdev->pend_le_conns);
4028 INIT_LIST_HEAD(&hdev->pend_le_reports);
4029 INIT_LIST_HEAD(&hdev->conn_hash.list);
4031 INIT_WORK(&hdev->rx_work, hci_rx_work);
4032 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4033 INIT_WORK(&hdev->tx_work, hci_tx_work);
4034 INIT_WORK(&hdev->power_on, hci_power_on);
4036 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4037 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4038 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4040 skb_queue_head_init(&hdev->rx_q);
4041 skb_queue_head_init(&hdev->cmd_q);
4042 skb_queue_head_init(&hdev->raw_q);
4044 init_waitqueue_head(&hdev->req_wait_q);
4046 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4048 hci_init_sysfs(hdev);
4049 discovery_init(hdev);
4053 EXPORT_SYMBOL(hci_alloc_dev);
4055 /* Free HCI device */
4056 void hci_free_dev(struct hci_dev *hdev)
4058 /* will free via device release */
4059 put_device(&hdev->dev);
4061 EXPORT_SYMBOL(hci_free_dev);
4063 /* Register HCI device */
4064 int hci_register_dev(struct hci_dev *hdev)
4068 if (!hdev->open || !hdev->close || !hdev->send)
4071 /* Do not allow HCI_AMP devices to register at index 0,
4072 * so the index can be used as the AMP controller ID.
4074 switch (hdev->dev_type) {
4076 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4079 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4088 sprintf(hdev->name, "hci%d", id);
4091 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4093 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4094 WQ_MEM_RECLAIM, 1, hdev->name);
4095 if (!hdev->workqueue) {
4100 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4101 WQ_MEM_RECLAIM, 1, hdev->name);
4102 if (!hdev->req_workqueue) {
4103 destroy_workqueue(hdev->workqueue);
4108 if (!IS_ERR_OR_NULL(bt_debugfs))
4109 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4111 dev_set_name(&hdev->dev, "%s", hdev->name);
4113 error = device_add(&hdev->dev);
4117 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4118 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4121 if (rfkill_register(hdev->rfkill) < 0) {
4122 rfkill_destroy(hdev->rfkill);
4123 hdev->rfkill = NULL;
4127 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4128 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4130 set_bit(HCI_SETUP, &hdev->dev_flags);
4131 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4133 if (hdev->dev_type == HCI_BREDR) {
4134 /* Assume BR/EDR support until proven otherwise (such as
4135 * through reading supported features during init.
4137 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4140 write_lock(&hci_dev_list_lock);
4141 list_add(&hdev->list, &hci_dev_list);
4142 write_unlock(&hci_dev_list_lock);
4144 /* Devices that are marked for raw-only usage are unconfigured
4145 * and should not be included in normal operation.
4147 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4148 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4150 hci_notify(hdev, HCI_DEV_REG);
4153 queue_work(hdev->req_workqueue, &hdev->power_on);
4158 destroy_workqueue(hdev->workqueue);
4159 destroy_workqueue(hdev->req_workqueue);
4161 ida_simple_remove(&hci_index_ida, hdev->id);
4165 EXPORT_SYMBOL(hci_register_dev);
4167 /* Unregister HCI device */
4168 void hci_unregister_dev(struct hci_dev *hdev)
4172 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4174 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4178 write_lock(&hci_dev_list_lock);
4179 list_del(&hdev->list);
4180 write_unlock(&hci_dev_list_lock);
4182 hci_dev_do_close(hdev);
4184 for (i = 0; i < NUM_REASSEMBLY; i++)
4185 kfree_skb(hdev->reassembly[i]);
4187 cancel_work_sync(&hdev->power_on);
4189 if (!test_bit(HCI_INIT, &hdev->flags) &&
4190 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4191 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4193 mgmt_index_removed(hdev);
4194 hci_dev_unlock(hdev);
4197 /* mgmt_index_removed should take care of emptying the
4199 BUG_ON(!list_empty(&hdev->mgmt_pending));
4201 hci_notify(hdev, HCI_DEV_UNREG);
4204 rfkill_unregister(hdev->rfkill);
4205 rfkill_destroy(hdev->rfkill);
4208 smp_unregister(hdev);
4210 device_del(&hdev->dev);
4212 debugfs_remove_recursive(hdev->debugfs);
4214 destroy_workqueue(hdev->workqueue);
4215 destroy_workqueue(hdev->req_workqueue);
4218 hci_bdaddr_list_clear(&hdev->blacklist);
4219 hci_bdaddr_list_clear(&hdev->whitelist);
4220 hci_uuids_clear(hdev);
4221 hci_link_keys_clear(hdev);
4222 hci_smp_ltks_clear(hdev);
4223 hci_smp_irks_clear(hdev);
4224 hci_remote_oob_data_clear(hdev);
4225 hci_bdaddr_list_clear(&hdev->le_white_list);
4226 hci_conn_params_clear_all(hdev);
4227 hci_dev_unlock(hdev);
4231 ida_simple_remove(&hci_index_ida, id);
4233 EXPORT_SYMBOL(hci_unregister_dev);
4235 /* Suspend HCI device */
4236 int hci_suspend_dev(struct hci_dev *hdev)
4238 hci_notify(hdev, HCI_DEV_SUSPEND);
4241 EXPORT_SYMBOL(hci_suspend_dev);
4243 /* Resume HCI device */
4244 int hci_resume_dev(struct hci_dev *hdev)
4246 hci_notify(hdev, HCI_DEV_RESUME);
4249 EXPORT_SYMBOL(hci_resume_dev);
4251 /* Reset HCI device */
4252 int hci_reset_dev(struct hci_dev *hdev)
4254 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4255 struct sk_buff *skb;
4257 skb = bt_skb_alloc(3, GFP_ATOMIC);
4261 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4262 memcpy(skb_put(skb, 3), hw_err, 3);
4264 /* Send Hardware Error to upper stack */
4265 return hci_recv_frame(hdev, skb);
4267 EXPORT_SYMBOL(hci_reset_dev);
4269 /* Receive frame from HCI drivers */
4270 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4272 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4273 && !test_bit(HCI_INIT, &hdev->flags))) {
4279 bt_cb(skb)->incoming = 1;
4282 __net_timestamp(skb);
4284 skb_queue_tail(&hdev->rx_q, skb);
4285 queue_work(hdev->workqueue, &hdev->rx_work);
4289 EXPORT_SYMBOL(hci_recv_frame);
4291 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4292 int count, __u8 index)
4297 struct sk_buff *skb;
4298 struct bt_skb_cb *scb;
4300 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4301 index >= NUM_REASSEMBLY)
4304 skb = hdev->reassembly[index];
4308 case HCI_ACLDATA_PKT:
4309 len = HCI_MAX_FRAME_SIZE;
4310 hlen = HCI_ACL_HDR_SIZE;
4313 len = HCI_MAX_EVENT_SIZE;
4314 hlen = HCI_EVENT_HDR_SIZE;
4316 case HCI_SCODATA_PKT:
4317 len = HCI_MAX_SCO_SIZE;
4318 hlen = HCI_SCO_HDR_SIZE;
4322 skb = bt_skb_alloc(len, GFP_ATOMIC);
4326 scb = (void *) skb->cb;
4328 scb->pkt_type = type;
4330 hdev->reassembly[index] = skb;
4334 scb = (void *) skb->cb;
4335 len = min_t(uint, scb->expect, count);
4337 memcpy(skb_put(skb, len), data, len);
4346 if (skb->len == HCI_EVENT_HDR_SIZE) {
4347 struct hci_event_hdr *h = hci_event_hdr(skb);
4348 scb->expect = h->plen;
4350 if (skb_tailroom(skb) < scb->expect) {
4352 hdev->reassembly[index] = NULL;
4358 case HCI_ACLDATA_PKT:
4359 if (skb->len == HCI_ACL_HDR_SIZE) {
4360 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4361 scb->expect = __le16_to_cpu(h->dlen);
4363 if (skb_tailroom(skb) < scb->expect) {
4365 hdev->reassembly[index] = NULL;
4371 case HCI_SCODATA_PKT:
4372 if (skb->len == HCI_SCO_HDR_SIZE) {
4373 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4374 scb->expect = h->dlen;
4376 if (skb_tailroom(skb) < scb->expect) {
4378 hdev->reassembly[index] = NULL;
4385 if (scb->expect == 0) {
4386 /* Complete frame */
4388 bt_cb(skb)->pkt_type = type;
4389 hci_recv_frame(hdev, skb);
4391 hdev->reassembly[index] = NULL;
4399 #define STREAM_REASSEMBLY 0
4401 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4407 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4410 struct { char type; } *pkt;
4412 /* Start of the frame */
4419 type = bt_cb(skb)->pkt_type;
4421 rem = hci_reassembly(hdev, type, data, count,
4426 data += (count - rem);
4432 EXPORT_SYMBOL(hci_recv_stream_fragment);
4434 /* ---- Interface to upper protocols ---- */
4436 int hci_register_cb(struct hci_cb *cb)
4438 BT_DBG("%p name %s", cb, cb->name);
4440 write_lock(&hci_cb_list_lock);
4441 list_add(&cb->list, &hci_cb_list);
4442 write_unlock(&hci_cb_list_lock);
4446 EXPORT_SYMBOL(hci_register_cb);
4448 int hci_unregister_cb(struct hci_cb *cb)
4450 BT_DBG("%p name %s", cb, cb->name);
4452 write_lock(&hci_cb_list_lock);
4453 list_del(&cb->list);
4454 write_unlock(&hci_cb_list_lock);
4458 EXPORT_SYMBOL(hci_unregister_cb);
4460 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4464 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4467 __net_timestamp(skb);
4469 /* Send copy to monitor */
4470 hci_send_to_monitor(hdev, skb);
4472 if (atomic_read(&hdev->promisc)) {
4473 /* Send copy to the sockets */
4474 hci_send_to_sock(hdev, skb);
4477 /* Get rid of skb owner, prior to sending to the driver. */
4480 err = hdev->send(hdev, skb);
4482 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4487 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4489 skb_queue_head_init(&req->cmd_q);
4494 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4496 struct hci_dev *hdev = req->hdev;
4497 struct sk_buff *skb;
4498 unsigned long flags;
4500 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4502 /* If an error occurred during request building, remove all HCI
4503 * commands queued on the HCI request queue.
4506 skb_queue_purge(&req->cmd_q);
4510 /* Do not allow empty requests */
4511 if (skb_queue_empty(&req->cmd_q))
4514 skb = skb_peek_tail(&req->cmd_q);
4515 bt_cb(skb)->req.complete = complete;
4517 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4518 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4519 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4521 queue_work(hdev->workqueue, &hdev->cmd_work);
4526 bool hci_req_pending(struct hci_dev *hdev)
4528 return (hdev->req_status == HCI_REQ_PEND);
4531 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4532 u32 plen, const void *param)
4534 int len = HCI_COMMAND_HDR_SIZE + plen;
4535 struct hci_command_hdr *hdr;
4536 struct sk_buff *skb;
4538 skb = bt_skb_alloc(len, GFP_ATOMIC);
4542 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4543 hdr->opcode = cpu_to_le16(opcode);
4547 memcpy(skb_put(skb, plen), param, plen);
4549 BT_DBG("skb len %d", skb->len);
4551 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4552 bt_cb(skb)->opcode = opcode;
4557 /* Send HCI command */
4558 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4561 struct sk_buff *skb;
4563 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4565 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4567 BT_ERR("%s no memory for command", hdev->name);
4571 /* Stand-alone HCI commands must be flagged as
4572 * single-command requests.
4574 bt_cb(skb)->req.start = true;
4576 skb_queue_tail(&hdev->cmd_q, skb);
4577 queue_work(hdev->workqueue, &hdev->cmd_work);
4582 /* Queue a command to an asynchronous HCI request */
4583 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4584 const void *param, u8 event)
4586 struct hci_dev *hdev = req->hdev;
4587 struct sk_buff *skb;
4589 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4591 /* If an error occurred during request building, there is no point in
4592 * queueing the HCI command. We can simply return.
4597 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4599 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4600 hdev->name, opcode);
4605 if (skb_queue_empty(&req->cmd_q))
4606 bt_cb(skb)->req.start = true;
4608 bt_cb(skb)->req.event = event;
4610 skb_queue_tail(&req->cmd_q, skb);
4613 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4616 hci_req_add_ev(req, opcode, plen, param, 0);
4619 /* Get data from the previously sent command */
4620 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4622 struct hci_command_hdr *hdr;
4624 if (!hdev->sent_cmd)
4627 hdr = (void *) hdev->sent_cmd->data;
4629 if (hdr->opcode != cpu_to_le16(opcode))
4632 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4634 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4638 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4640 struct hci_acl_hdr *hdr;
4643 skb_push(skb, HCI_ACL_HDR_SIZE);
4644 skb_reset_transport_header(skb);
4645 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4646 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4647 hdr->dlen = cpu_to_le16(len);
4650 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4651 struct sk_buff *skb, __u16 flags)
4653 struct hci_conn *conn = chan->conn;
4654 struct hci_dev *hdev = conn->hdev;
4655 struct sk_buff *list;
4657 skb->len = skb_headlen(skb);
4660 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4662 switch (hdev->dev_type) {
4664 hci_add_acl_hdr(skb, conn->handle, flags);
4667 hci_add_acl_hdr(skb, chan->handle, flags);
4670 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4674 list = skb_shinfo(skb)->frag_list;
4676 /* Non fragmented */
4677 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4679 skb_queue_tail(queue, skb);
4682 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4684 skb_shinfo(skb)->frag_list = NULL;
4686 /* Queue all fragments atomically. We need to use spin_lock_bh
4687 * here because of 6LoWPAN links, as there this function is
4688 * called from softirq and using normal spin lock could cause
4691 spin_lock_bh(&queue->lock);
4693 __skb_queue_tail(queue, skb);
4695 flags &= ~ACL_START;
4698 skb = list; list = list->next;
4700 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4701 hci_add_acl_hdr(skb, conn->handle, flags);
4703 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4705 __skb_queue_tail(queue, skb);
4708 spin_unlock_bh(&queue->lock);
4712 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4714 struct hci_dev *hdev = chan->conn->hdev;
4716 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4718 hci_queue_acl(chan, &chan->data_q, skb, flags);
4720 queue_work(hdev->workqueue, &hdev->tx_work);
4724 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4726 struct hci_dev *hdev = conn->hdev;
4727 struct hci_sco_hdr hdr;
4729 BT_DBG("%s len %d", hdev->name, skb->len);
4731 hdr.handle = cpu_to_le16(conn->handle);
4732 hdr.dlen = skb->len;
4734 skb_push(skb, HCI_SCO_HDR_SIZE);
4735 skb_reset_transport_header(skb);
4736 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4738 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4740 skb_queue_tail(&conn->data_q, skb);
4741 queue_work(hdev->workqueue, &hdev->tx_work);
4744 /* ---- HCI TX task (outgoing data) ---- */
4746 /* HCI Connection scheduler */
4747 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4750 struct hci_conn_hash *h = &hdev->conn_hash;
4751 struct hci_conn *conn = NULL, *c;
4752 unsigned int num = 0, min = ~0;
4754 /* We don't have to lock device here. Connections are always
4755 * added and removed with TX task disabled. */
4759 list_for_each_entry_rcu(c, &h->list, list) {
4760 if (c->type != type || skb_queue_empty(&c->data_q))
4763 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4768 if (c->sent < min) {
4773 if (hci_conn_num(hdev, type) == num)
4782 switch (conn->type) {
4784 cnt = hdev->acl_cnt;
4788 cnt = hdev->sco_cnt;
4791 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4795 BT_ERR("Unknown link type");
4803 BT_DBG("conn %p quote %d", conn, *quote);
4807 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4809 struct hci_conn_hash *h = &hdev->conn_hash;
4812 BT_ERR("%s link tx timeout", hdev->name);
4816 /* Kill stalled connections */
4817 list_for_each_entry_rcu(c, &h->list, list) {
4818 if (c->type == type && c->sent) {
4819 BT_ERR("%s killing stalled connection %pMR",
4820 hdev->name, &c->dst);
4821 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4828 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4831 struct hci_conn_hash *h = &hdev->conn_hash;
4832 struct hci_chan *chan = NULL;
4833 unsigned int num = 0, min = ~0, cur_prio = 0;
4834 struct hci_conn *conn;
4835 int cnt, q, conn_num = 0;
4837 BT_DBG("%s", hdev->name);
4841 list_for_each_entry_rcu(conn, &h->list, list) {
4842 struct hci_chan *tmp;
4844 if (conn->type != type)
4847 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4852 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4853 struct sk_buff *skb;
4855 if (skb_queue_empty(&tmp->data_q))
4858 skb = skb_peek(&tmp->data_q);
4859 if (skb->priority < cur_prio)
4862 if (skb->priority > cur_prio) {
4865 cur_prio = skb->priority;
4870 if (conn->sent < min) {
4876 if (hci_conn_num(hdev, type) == conn_num)
4885 switch (chan->conn->type) {
4887 cnt = hdev->acl_cnt;
4890 cnt = hdev->block_cnt;
4894 cnt = hdev->sco_cnt;
4897 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4901 BT_ERR("Unknown link type");
4906 BT_DBG("chan %p quote %d", chan, *quote);
4910 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4912 struct hci_conn_hash *h = &hdev->conn_hash;
4913 struct hci_conn *conn;
4916 BT_DBG("%s", hdev->name);
4920 list_for_each_entry_rcu(conn, &h->list, list) {
4921 struct hci_chan *chan;
4923 if (conn->type != type)
4926 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4931 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4932 struct sk_buff *skb;
4939 if (skb_queue_empty(&chan->data_q))
4942 skb = skb_peek(&chan->data_q);
4943 if (skb->priority >= HCI_PRIO_MAX - 1)
4946 skb->priority = HCI_PRIO_MAX - 1;
4948 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4952 if (hci_conn_num(hdev, type) == num)
4960 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4962 /* Calculate count of blocks used by this packet */
4963 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4966 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4968 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4969 /* ACL tx timeout must be longer than maximum
4970 * link supervision timeout (40.9 seconds) */
4971 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4972 HCI_ACL_TX_TIMEOUT))
4973 hci_link_tx_to(hdev, ACL_LINK);
4977 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4979 unsigned int cnt = hdev->acl_cnt;
4980 struct hci_chan *chan;
4981 struct sk_buff *skb;
4984 __check_timeout(hdev, cnt);
4986 while (hdev->acl_cnt &&
4987 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4988 u32 priority = (skb_peek(&chan->data_q))->priority;
4989 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4990 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4991 skb->len, skb->priority);
4993 /* Stop if priority has changed */
4994 if (skb->priority < priority)
4997 skb = skb_dequeue(&chan->data_q);
4999 hci_conn_enter_active_mode(chan->conn,
5000 bt_cb(skb)->force_active);
5002 hci_send_frame(hdev, skb);
5003 hdev->acl_last_tx = jiffies;
5011 if (cnt != hdev->acl_cnt)
5012 hci_prio_recalculate(hdev, ACL_LINK);
5015 static void hci_sched_acl_blk(struct hci_dev *hdev)
5017 unsigned int cnt = hdev->block_cnt;
5018 struct hci_chan *chan;
5019 struct sk_buff *skb;
5023 __check_timeout(hdev, cnt);
5025 BT_DBG("%s", hdev->name);
5027 if (hdev->dev_type == HCI_AMP)
5032 while (hdev->block_cnt > 0 &&
5033 (chan = hci_chan_sent(hdev, type, "e))) {
5034 u32 priority = (skb_peek(&chan->data_q))->priority;
5035 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5038 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5039 skb->len, skb->priority);
5041 /* Stop if priority has changed */
5042 if (skb->priority < priority)
5045 skb = skb_dequeue(&chan->data_q);
5047 blocks = __get_blocks(hdev, skb);
5048 if (blocks > hdev->block_cnt)
5051 hci_conn_enter_active_mode(chan->conn,
5052 bt_cb(skb)->force_active);
5054 hci_send_frame(hdev, skb);
5055 hdev->acl_last_tx = jiffies;
5057 hdev->block_cnt -= blocks;
5060 chan->sent += blocks;
5061 chan->conn->sent += blocks;
5065 if (cnt != hdev->block_cnt)
5066 hci_prio_recalculate(hdev, type);
5069 static void hci_sched_acl(struct hci_dev *hdev)
5071 BT_DBG("%s", hdev->name);
5073 /* No ACL link over BR/EDR controller */
5074 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5077 /* No AMP link over AMP controller */
5078 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5081 switch (hdev->flow_ctl_mode) {
5082 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5083 hci_sched_acl_pkt(hdev);
5086 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5087 hci_sched_acl_blk(hdev);
5093 static void hci_sched_sco(struct hci_dev *hdev)
5095 struct hci_conn *conn;
5096 struct sk_buff *skb;
5099 BT_DBG("%s", hdev->name);
5101 if (!hci_conn_num(hdev, SCO_LINK))
5104 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
5105 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5106 BT_DBG("skb %p len %d", skb, skb->len);
5107 hci_send_frame(hdev, skb);
5110 if (conn->sent == ~0)
5116 static void hci_sched_esco(struct hci_dev *hdev)
5118 struct hci_conn *conn;
5119 struct sk_buff *skb;
5122 BT_DBG("%s", hdev->name);
5124 if (!hci_conn_num(hdev, ESCO_LINK))
5127 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5129 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5130 BT_DBG("skb %p len %d", skb, skb->len);
5131 hci_send_frame(hdev, skb);
5134 if (conn->sent == ~0)
5140 static void hci_sched_le(struct hci_dev *hdev)
5142 struct hci_chan *chan;
5143 struct sk_buff *skb;
5144 int quote, cnt, tmp;
5146 BT_DBG("%s", hdev->name);
5148 if (!hci_conn_num(hdev, LE_LINK))
5151 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5152 /* LE tx timeout must be longer than maximum
5153 * link supervision timeout (40.9 seconds) */
5154 if (!hdev->le_cnt && hdev->le_pkts &&
5155 time_after(jiffies, hdev->le_last_tx + HZ * 45))
5156 hci_link_tx_to(hdev, LE_LINK);
5159 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5161 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
5162 u32 priority = (skb_peek(&chan->data_q))->priority;
5163 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5164 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5165 skb->len, skb->priority);
5167 /* Stop if priority has changed */
5168 if (skb->priority < priority)
5171 skb = skb_dequeue(&chan->data_q);
5173 hci_send_frame(hdev, skb);
5174 hdev->le_last_tx = jiffies;
5185 hdev->acl_cnt = cnt;
5188 hci_prio_recalculate(hdev, LE_LINK);
5191 static void hci_tx_work(struct work_struct *work)
5193 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5194 struct sk_buff *skb;
5196 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5197 hdev->sco_cnt, hdev->le_cnt);
5199 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5200 /* Schedule queues and send stuff to HCI driver */
5201 hci_sched_acl(hdev);
5202 hci_sched_sco(hdev);
5203 hci_sched_esco(hdev);
5207 /* Send next queued raw (unknown type) packet */
5208 while ((skb = skb_dequeue(&hdev->raw_q)))
5209 hci_send_frame(hdev, skb);
5212 /* ----- HCI RX task (incoming data processing) ----- */
5214 /* ACL data packet */
5215 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5217 struct hci_acl_hdr *hdr = (void *) skb->data;
5218 struct hci_conn *conn;
5219 __u16 handle, flags;
5221 skb_pull(skb, HCI_ACL_HDR_SIZE);
5223 handle = __le16_to_cpu(hdr->handle);
5224 flags = hci_flags(handle);
5225 handle = hci_handle(handle);
5227 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5230 hdev->stat.acl_rx++;
5233 conn = hci_conn_hash_lookup_handle(hdev, handle);
5234 hci_dev_unlock(hdev);
5237 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5239 /* Send to upper protocol */
5240 l2cap_recv_acldata(conn, skb, flags);
5243 BT_ERR("%s ACL packet for unknown connection handle %d",
5244 hdev->name, handle);
5250 /* SCO data packet */
5251 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5253 struct hci_sco_hdr *hdr = (void *) skb->data;
5254 struct hci_conn *conn;
5257 skb_pull(skb, HCI_SCO_HDR_SIZE);
5259 handle = __le16_to_cpu(hdr->handle);
5261 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5263 hdev->stat.sco_rx++;
5266 conn = hci_conn_hash_lookup_handle(hdev, handle);
5267 hci_dev_unlock(hdev);
5270 /* Send to upper protocol */
5271 sco_recv_scodata(conn, skb);
5274 BT_ERR("%s SCO packet for unknown connection handle %d",
5275 hdev->name, handle);
5281 static bool hci_req_is_complete(struct hci_dev *hdev)
5283 struct sk_buff *skb;
5285 skb = skb_peek(&hdev->cmd_q);
5289 return bt_cb(skb)->req.start;
5292 static void hci_resend_last(struct hci_dev *hdev)
5294 struct hci_command_hdr *sent;
5295 struct sk_buff *skb;
5298 if (!hdev->sent_cmd)
5301 sent = (void *) hdev->sent_cmd->data;
5302 opcode = __le16_to_cpu(sent->opcode);
5303 if (opcode == HCI_OP_RESET)
5306 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5310 skb_queue_head(&hdev->cmd_q, skb);
5311 queue_work(hdev->workqueue, &hdev->cmd_work);
5314 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5316 hci_req_complete_t req_complete = NULL;
5317 struct sk_buff *skb;
5318 unsigned long flags;
5320 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5322 /* If the completed command doesn't match the last one that was
5323 * sent we need to do special handling of it.
5325 if (!hci_sent_cmd_data(hdev, opcode)) {
5326 /* Some CSR based controllers generate a spontaneous
5327 * reset complete event during init and any pending
5328 * command will never be completed. In such a case we
5329 * need to resend whatever was the last sent
5332 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5333 hci_resend_last(hdev);
5338 /* If the command succeeded and there's still more commands in
5339 * this request the request is not yet complete.
5341 if (!status && !hci_req_is_complete(hdev))
5344 /* If this was the last command in a request the complete
5345 * callback would be found in hdev->sent_cmd instead of the
5346 * command queue (hdev->cmd_q).
5348 if (hdev->sent_cmd) {
5349 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5352 /* We must set the complete callback to NULL to
5353 * avoid calling the callback more than once if
5354 * this function gets called again.
5356 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5362 /* Remove all pending commands belonging to this request */
5363 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5364 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5365 if (bt_cb(skb)->req.start) {
5366 __skb_queue_head(&hdev->cmd_q, skb);
5370 req_complete = bt_cb(skb)->req.complete;
5373 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5377 req_complete(hdev, status);
5380 static void hci_rx_work(struct work_struct *work)
5382 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5383 struct sk_buff *skb;
5385 BT_DBG("%s", hdev->name);
5387 while ((skb = skb_dequeue(&hdev->rx_q))) {
5388 /* Send copy to monitor */
5389 hci_send_to_monitor(hdev, skb);
5391 if (atomic_read(&hdev->promisc)) {
5392 /* Send copy to the sockets */
5393 hci_send_to_sock(hdev, skb);
5396 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5401 if (test_bit(HCI_INIT, &hdev->flags)) {
5402 /* Don't process data packets in this states. */
5403 switch (bt_cb(skb)->pkt_type) {
5404 case HCI_ACLDATA_PKT:
5405 case HCI_SCODATA_PKT:
5412 switch (bt_cb(skb)->pkt_type) {
5414 BT_DBG("%s Event packet", hdev->name);
5415 hci_event_packet(hdev, skb);
5418 case HCI_ACLDATA_PKT:
5419 BT_DBG("%s ACL data packet", hdev->name);
5420 hci_acldata_packet(hdev, skb);
5423 case HCI_SCODATA_PKT:
5424 BT_DBG("%s SCO data packet", hdev->name);
5425 hci_scodata_packet(hdev, skb);
5435 static void hci_cmd_work(struct work_struct *work)
5437 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5438 struct sk_buff *skb;
5440 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5441 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5443 /* Send queued commands */
5444 if (atomic_read(&hdev->cmd_cnt)) {
5445 skb = skb_dequeue(&hdev->cmd_q);
5449 kfree_skb(hdev->sent_cmd);
5451 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5452 if (hdev->sent_cmd) {
5453 atomic_dec(&hdev->cmd_cnt);
5454 hci_send_frame(hdev, skb);
5455 if (test_bit(HCI_RESET, &hdev->flags))
5456 cancel_delayed_work(&hdev->cmd_timer);
5458 schedule_delayed_work(&hdev->cmd_timer,
5461 skb_queue_head(&hdev->cmd_q, skb);
5462 queue_work(hdev->workqueue, &hdev->cmd_work);
5467 void hci_req_add_le_scan_disable(struct hci_request *req)
5469 struct hci_cp_le_set_scan_enable cp;
5471 memset(&cp, 0, sizeof(cp));
5472 cp.enable = LE_SCAN_DISABLE;
5473 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5476 static void add_to_white_list(struct hci_request *req,
5477 struct hci_conn_params *params)
5479 struct hci_cp_le_add_to_white_list cp;
5481 cp.bdaddr_type = params->addr_type;
5482 bacpy(&cp.bdaddr, ¶ms->addr);
5484 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5487 static u8 update_white_list(struct hci_request *req)
5489 struct hci_dev *hdev = req->hdev;
5490 struct hci_conn_params *params;
5491 struct bdaddr_list *b;
5492 uint8_t white_list_entries = 0;
5494 /* Go through the current white list programmed into the
5495 * controller one by one and check if that address is still
5496 * in the list of pending connections or list of devices to
5497 * report. If not present in either list, then queue the
5498 * command to remove it from the controller.
5500 list_for_each_entry(b, &hdev->le_white_list, list) {
5501 struct hci_cp_le_del_from_white_list cp;
5503 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5504 &b->bdaddr, b->bdaddr_type) ||
5505 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5506 &b->bdaddr, b->bdaddr_type)) {
5507 white_list_entries++;
5511 cp.bdaddr_type = b->bdaddr_type;
5512 bacpy(&cp.bdaddr, &b->bdaddr);
5514 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5518 /* Since all no longer valid white list entries have been
5519 * removed, walk through the list of pending connections
5520 * and ensure that any new device gets programmed into
5523 * If the list of the devices is larger than the list of
5524 * available white list entries in the controller, then
5525 * just abort and return filer policy value to not use the
5528 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5529 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5530 ¶ms->addr, params->addr_type))
5533 if (white_list_entries >= hdev->le_white_list_size) {
5534 /* Select filter policy to accept all advertising */
5538 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
5539 params->addr_type)) {
5540 /* White list can not be used with RPAs */
5544 white_list_entries++;
5545 add_to_white_list(req, params);
5548 /* After adding all new pending connections, walk through
5549 * the list of pending reports and also add these to the
5550 * white list if there is still space.
5552 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5553 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5554 ¶ms->addr, params->addr_type))
5557 if (white_list_entries >= hdev->le_white_list_size) {
5558 /* Select filter policy to accept all advertising */
5562 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
5563 params->addr_type)) {
5564 /* White list can not be used with RPAs */
5568 white_list_entries++;
5569 add_to_white_list(req, params);
5572 /* Select filter policy to use white list */
5576 void hci_req_add_le_passive_scan(struct hci_request *req)
5578 struct hci_cp_le_set_scan_param param_cp;
5579 struct hci_cp_le_set_scan_enable enable_cp;
5580 struct hci_dev *hdev = req->hdev;
5584 /* Set require_privacy to false since no SCAN_REQ are send
5585 * during passive scanning. Not using an unresolvable address
5586 * here is important so that peer devices using direct
5587 * advertising with our address will be correctly reported
5588 * by the controller.
5590 if (hci_update_random_address(req, false, &own_addr_type))
5593 /* Adding or removing entries from the white list must
5594 * happen before enabling scanning. The controller does
5595 * not allow white list modification while scanning.
5597 filter_policy = update_white_list(req);
5599 memset(¶m_cp, 0, sizeof(param_cp));
5600 param_cp.type = LE_SCAN_PASSIVE;
5601 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5602 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5603 param_cp.own_address_type = own_addr_type;
5604 param_cp.filter_policy = filter_policy;
5605 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5608 memset(&enable_cp, 0, sizeof(enable_cp));
5609 enable_cp.enable = LE_SCAN_ENABLE;
5610 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5611 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5615 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5618 BT_DBG("HCI request failed to update background scanning: "
5619 "status 0x%2.2x", status);
5622 /* This function controls the background scanning based on hdev->pend_le_conns
5623 * list. If there are pending LE connection we start the background scanning,
5624 * otherwise we stop it.
5626 * This function requires the caller holds hdev->lock.
5628 void hci_update_background_scan(struct hci_dev *hdev)
5630 struct hci_request req;
5631 struct hci_conn *conn;
5634 if (!test_bit(HCI_UP, &hdev->flags) ||
5635 test_bit(HCI_INIT, &hdev->flags) ||
5636 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5637 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5638 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5639 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5642 /* No point in doing scanning if LE support hasn't been enabled */
5643 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5646 /* If discovery is active don't interfere with it */
5647 if (hdev->discovery.state != DISCOVERY_STOPPED)
5650 hci_req_init(&req, hdev);
5652 if (list_empty(&hdev->pend_le_conns) &&
5653 list_empty(&hdev->pend_le_reports)) {
5654 /* If there is no pending LE connections or devices
5655 * to be scanned for, we should stop the background
5659 /* If controller is not scanning we are done. */
5660 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5663 hci_req_add_le_scan_disable(&req);
5665 BT_DBG("%s stopping background scanning", hdev->name);
5667 /* If there is at least one pending LE connection, we should
5668 * keep the background scan running.
5671 /* If controller is connecting, we should not start scanning
5672 * since some controllers are not able to scan and connect at
5675 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5679 /* If controller is currently scanning, we stop it to ensure we
5680 * don't miss any advertising (due to duplicates filter).
5682 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5683 hci_req_add_le_scan_disable(&req);
5685 hci_req_add_le_passive_scan(&req);
5687 BT_DBG("%s starting background scanning", hdev->name);
5690 err = hci_req_run(&req, update_background_scan_complete);
5692 BT_ERR("Failed to run HCI request: err %d", err);
5695 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5697 struct bdaddr_list *b;
5699 list_for_each_entry(b, &hdev->whitelist, list) {
5700 struct hci_conn *conn;
5702 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5706 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5713 void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5717 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5720 if (!hdev_is_powered(hdev))
5723 if (mgmt_powering_down(hdev))
5726 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
5727 disconnected_whitelist_entries(hdev))
5730 scan = SCAN_DISABLED;
5732 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5735 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5736 scan |= SCAN_INQUIRY;
5739 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5741 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);