2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
40 static void hci_rx_work(struct work_struct *work);
41 static void hci_cmd_work(struct work_struct *work);
42 static void hci_tx_work(struct work_struct *work);
45 LIST_HEAD(hci_dev_list);
46 DEFINE_RWLOCK(hci_dev_list_lock);
48 /* HCI callback list */
49 LIST_HEAD(hci_cb_list);
50 DEFINE_RWLOCK(hci_cb_list_lock);
52 /* HCI ID Numbering */
53 static DEFINE_IDA(hci_index_ida);
55 /* ---- HCI notifications ---- */
57 static void hci_notify(struct hci_dev *hdev, int event)
59 hci_sock_dev_event(hdev, event);
62 /* ---- HCI debugfs entries ---- */
64 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
67 struct hci_dev *hdev = file->private_data;
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
79 struct hci_dev *hdev = file->private_data;
82 size_t buf_size = min(count, (sizeof(buf)-1));
86 if (!test_bit(HCI_UP, &hdev->flags))
89 if (copy_from_user(buf, user_buf, buf_size))
93 if (strtobool(buf, &enable))
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 hci_req_unlock(hdev);
111 err = -bt_to_errno(skb->data[0]);
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
122 static const struct file_operations dut_mode_fops = {
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
129 static int features_show(struct seq_file *f, void *ptr)
131 struct hci_dev *hdev = f->private;
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
150 hci_dev_unlock(hdev);
155 static int features_open(struct inode *inode, struct file *file)
157 return single_open(file, features_show, inode->i_private);
160 static const struct file_operations features_fops = {
161 .open = features_open,
164 .release = single_release,
167 static int blacklist_show(struct seq_file *f, void *p)
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
173 list_for_each_entry(b, &hdev->blacklist, list)
174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
175 hci_dev_unlock(hdev);
180 static int blacklist_open(struct inode *inode, struct file *file)
182 return single_open(file, blacklist_show, inode->i_private);
185 static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
189 .release = single_release,
192 static int uuids_show(struct seq_file *f, void *p)
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
198 list_for_each_entry(uuid, &hdev->uuids, list) {
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
208 seq_printf(f, "%pUb\n", val);
210 hci_dev_unlock(hdev);
215 static int uuids_open(struct inode *inode, struct file *file)
217 return single_open(file, uuids_show, inode->i_private);
220 static const struct file_operations uuids_fops = {
224 .release = single_release,
227 static int inquiry_cache_show(struct seq_file *f, void *p)
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
246 hci_dev_unlock(hdev);
251 static int inquiry_cache_open(struct inode *inode, struct file *file)
253 return single_open(file, inquiry_cache_show, inode->i_private);
256 static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
260 .release = single_release,
263 static int link_keys_show(struct seq_file *f, void *ptr)
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 hci_dev_unlock(hdev);
279 static int link_keys_open(struct inode *inode, struct file *file)
281 return single_open(file, link_keys_show, inode->i_private);
284 static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
288 .release = single_release,
291 static int dev_class_show(struct seq_file *f, void *ptr)
293 struct hci_dev *hdev = f->private;
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
303 static int dev_class_open(struct inode *inode, struct file *file)
305 return single_open(file, dev_class_show, inode->i_private);
308 static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
312 .release = single_release,
315 static int voice_setting_get(void *data, u64 *val)
317 struct hci_dev *hdev = data;
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
326 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
329 static int auto_accept_delay_set(void *data, u64 val)
331 struct hci_dev *hdev = data;
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
340 static int auto_accept_delay_get(void *data, u64 *val)
342 struct hci_dev *hdev = data;
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
351 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
354 static int ssp_debug_mode_set(void *data, u64 val)
356 struct hci_dev *hdev = data;
361 if (val != 0 && val != 1)
364 if (!test_bit(HCI_UP, &hdev->flags))
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
376 err = -bt_to_errno(skb->data[0]);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
389 static int ssp_debug_mode_get(void *data, u64 *val)
391 struct hci_dev *hdev = data;
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
400 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
403 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
406 struct hci_dev *hdev = file->private_data;
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
415 static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
419 struct hci_dev *hdev = file->private_data;
421 size_t buf_size = min(count, (sizeof(buf)-1));
424 if (test_bit(HCI_UP, &hdev->flags))
427 if (copy_from_user(buf, user_buf, buf_size))
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
442 static const struct file_operations force_sc_support_fops = {
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
449 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
452 struct hci_dev *hdev = file->private_data;
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
461 static const struct file_operations sc_only_mode_fops = {
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
467 static int idle_timeout_set(void *data, u64 val)
469 struct hci_dev *hdev = data;
471 if (val != 0 && (val < 500 || val > 3600000))
475 hdev->idle_timeout = val;
476 hci_dev_unlock(hdev);
481 static int idle_timeout_get(void *data, u64 *val)
483 struct hci_dev *hdev = data;
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
492 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
495 static int rpa_timeout_set(void *data, u64 val)
497 struct hci_dev *hdev = data;
499 /* Require the RPA timeout to be at least 30 seconds and at most
502 if (val < 30 || val > (60 * 60 * 24))
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
512 static int rpa_timeout_get(void *data, u64 *val)
514 struct hci_dev *hdev = data;
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
523 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
526 static int sniff_min_interval_set(void *data, u64 val)
528 struct hci_dev *hdev = data;
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
534 hdev->sniff_min_interval = val;
535 hci_dev_unlock(hdev);
540 static int sniff_min_interval_get(void *data, u64 *val)
542 struct hci_dev *hdev = data;
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
551 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
554 static int sniff_max_interval_set(void *data, u64 val)
556 struct hci_dev *hdev = data;
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
562 hdev->sniff_max_interval = val;
563 hci_dev_unlock(hdev);
568 static int sniff_max_interval_get(void *data, u64 *val)
570 struct hci_dev *hdev = data;
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
579 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
582 static int identity_show(struct seq_file *f, void *p)
584 struct hci_dev *hdev = f->private;
590 hci_copy_identity_address(hdev, &addr, &addr_type);
592 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
593 16, hdev->irk, &hdev->rpa);
595 hci_dev_unlock(hdev);
600 static int identity_open(struct inode *inode, struct file *file)
602 return single_open(file, identity_show, inode->i_private);
605 static const struct file_operations identity_fops = {
606 .open = identity_open,
609 .release = single_release,
612 static int random_address_show(struct seq_file *f, void *p)
614 struct hci_dev *hdev = f->private;
617 seq_printf(f, "%pMR\n", &hdev->random_addr);
618 hci_dev_unlock(hdev);
623 static int random_address_open(struct inode *inode, struct file *file)
625 return single_open(file, random_address_show, inode->i_private);
628 static const struct file_operations random_address_fops = {
629 .open = random_address_open,
632 .release = single_release,
635 static int static_address_show(struct seq_file *f, void *p)
637 struct hci_dev *hdev = f->private;
640 seq_printf(f, "%pMR\n", &hdev->static_addr);
641 hci_dev_unlock(hdev);
646 static int static_address_open(struct inode *inode, struct file *file)
648 return single_open(file, static_address_show, inode->i_private);
651 static const struct file_operations static_address_fops = {
652 .open = static_address_open,
655 .release = single_release,
658 static ssize_t force_static_address_read(struct file *file,
659 char __user *user_buf,
660 size_t count, loff_t *ppos)
662 struct hci_dev *hdev = file->private_data;
665 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
668 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
671 static ssize_t force_static_address_write(struct file *file,
672 const char __user *user_buf,
673 size_t count, loff_t *ppos)
675 struct hci_dev *hdev = file->private_data;
677 size_t buf_size = min(count, (sizeof(buf)-1));
680 if (test_bit(HCI_UP, &hdev->flags))
683 if (copy_from_user(buf, user_buf, buf_size))
686 buf[buf_size] = '\0';
687 if (strtobool(buf, &enable))
690 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
693 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
698 static const struct file_operations force_static_address_fops = {
700 .read = force_static_address_read,
701 .write = force_static_address_write,
702 .llseek = default_llseek,
705 static int white_list_show(struct seq_file *f, void *ptr)
707 struct hci_dev *hdev = f->private;
708 struct bdaddr_list *b;
711 list_for_each_entry(b, &hdev->le_white_list, list)
712 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
713 hci_dev_unlock(hdev);
718 static int white_list_open(struct inode *inode, struct file *file)
720 return single_open(file, white_list_show, inode->i_private);
723 static const struct file_operations white_list_fops = {
724 .open = white_list_open,
727 .release = single_release,
730 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
732 struct hci_dev *hdev = f->private;
733 struct list_head *p, *n;
736 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
737 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
738 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
739 &irk->bdaddr, irk->addr_type,
740 16, irk->val, &irk->rpa);
742 hci_dev_unlock(hdev);
747 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
749 return single_open(file, identity_resolving_keys_show,
753 static const struct file_operations identity_resolving_keys_fops = {
754 .open = identity_resolving_keys_open,
757 .release = single_release,
760 static int long_term_keys_show(struct seq_file *f, void *ptr)
762 struct hci_dev *hdev = f->private;
763 struct list_head *p, *n;
766 list_for_each_safe(p, n, &hdev->long_term_keys) {
767 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
768 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
769 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
770 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
771 __le64_to_cpu(ltk->rand), 16, ltk->val);
773 hci_dev_unlock(hdev);
778 static int long_term_keys_open(struct inode *inode, struct file *file)
780 return single_open(file, long_term_keys_show, inode->i_private);
783 static const struct file_operations long_term_keys_fops = {
784 .open = long_term_keys_open,
787 .release = single_release,
790 static int conn_min_interval_set(void *data, u64 val)
792 struct hci_dev *hdev = data;
794 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
798 hdev->le_conn_min_interval = val;
799 hci_dev_unlock(hdev);
804 static int conn_min_interval_get(void *data, u64 *val)
806 struct hci_dev *hdev = data;
809 *val = hdev->le_conn_min_interval;
810 hci_dev_unlock(hdev);
815 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
816 conn_min_interval_set, "%llu\n");
818 static int conn_max_interval_set(void *data, u64 val)
820 struct hci_dev *hdev = data;
822 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
826 hdev->le_conn_max_interval = val;
827 hci_dev_unlock(hdev);
832 static int conn_max_interval_get(void *data, u64 *val)
834 struct hci_dev *hdev = data;
837 *val = hdev->le_conn_max_interval;
838 hci_dev_unlock(hdev);
843 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
844 conn_max_interval_set, "%llu\n");
846 static int adv_channel_map_set(void *data, u64 val)
848 struct hci_dev *hdev = data;
850 if (val < 0x01 || val > 0x07)
854 hdev->le_adv_channel_map = val;
855 hci_dev_unlock(hdev);
860 static int adv_channel_map_get(void *data, u64 *val)
862 struct hci_dev *hdev = data;
865 *val = hdev->le_adv_channel_map;
866 hci_dev_unlock(hdev);
871 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
872 adv_channel_map_set, "%llu\n");
874 static ssize_t lowpan_read(struct file *file, char __user *user_buf,
875 size_t count, loff_t *ppos)
877 struct hci_dev *hdev = file->private_data;
880 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
883 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
886 static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
887 size_t count, loff_t *position)
889 struct hci_dev *hdev = fp->private_data;
892 size_t buf_size = min(count, (sizeof(buf)-1));
894 if (copy_from_user(buf, user_buffer, buf_size))
897 buf[buf_size] = '\0';
899 if (strtobool(buf, &enable) < 0)
902 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
905 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
910 static const struct file_operations lowpan_debugfs_fops = {
913 .write = lowpan_write,
914 .llseek = default_llseek,
917 static int le_auto_conn_show(struct seq_file *sf, void *ptr)
919 struct hci_dev *hdev = sf->private;
920 struct hci_conn_params *p;
924 list_for_each_entry(p, &hdev->le_conn_params, list) {
925 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
929 hci_dev_unlock(hdev);
934 static int le_auto_conn_open(struct inode *inode, struct file *file)
936 return single_open(file, le_auto_conn_show, inode->i_private);
939 static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
940 size_t count, loff_t *offset)
942 struct seq_file *sf = file->private_data;
943 struct hci_dev *hdev = sf->private;
951 /* Don't allow partial write */
958 buf = memdup_user(data, count);
962 if (memcmp(buf, "add", 3) == 0) {
963 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
964 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
965 &addr.b[1], &addr.b[0], &addr_type,
974 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
975 hdev->le_conn_min_interval,
976 hdev->le_conn_max_interval);
977 hci_dev_unlock(hdev);
981 } else if (memcmp(buf, "del", 3) == 0) {
982 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
983 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
984 &addr.b[1], &addr.b[0], &addr_type);
992 hci_conn_params_del(hdev, &addr, addr_type);
993 hci_dev_unlock(hdev);
994 } else if (memcmp(buf, "clr", 3) == 0) {
996 hci_conn_params_clear(hdev);
997 hci_pend_le_conns_clear(hdev);
998 hci_update_background_scan(hdev);
999 hci_dev_unlock(hdev);
1013 static const struct file_operations le_auto_conn_fops = {
1014 .open = le_auto_conn_open,
1016 .write = le_auto_conn_write,
1017 .llseek = seq_lseek,
1018 .release = single_release,
1021 /* ---- HCI requests ---- */
1023 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1025 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1027 if (hdev->req_status == HCI_REQ_PEND) {
1028 hdev->req_result = result;
1029 hdev->req_status = HCI_REQ_DONE;
1030 wake_up_interruptible(&hdev->req_wait_q);
1034 static void hci_req_cancel(struct hci_dev *hdev, int err)
1036 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1038 if (hdev->req_status == HCI_REQ_PEND) {
1039 hdev->req_result = err;
1040 hdev->req_status = HCI_REQ_CANCELED;
1041 wake_up_interruptible(&hdev->req_wait_q);
1045 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1048 struct hci_ev_cmd_complete *ev;
1049 struct hci_event_hdr *hdr;
1050 struct sk_buff *skb;
1054 skb = hdev->recv_evt;
1055 hdev->recv_evt = NULL;
1057 hci_dev_unlock(hdev);
1060 return ERR_PTR(-ENODATA);
1062 if (skb->len < sizeof(*hdr)) {
1063 BT_ERR("Too short HCI event");
1067 hdr = (void *) skb->data;
1068 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1071 if (hdr->evt != event)
1076 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1077 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1081 if (skb->len < sizeof(*ev)) {
1082 BT_ERR("Too short cmd_complete event");
1086 ev = (void *) skb->data;
1087 skb_pull(skb, sizeof(*ev));
1089 if (opcode == __le16_to_cpu(ev->opcode))
1092 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1093 __le16_to_cpu(ev->opcode));
1097 return ERR_PTR(-ENODATA);
1100 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1101 const void *param, u8 event, u32 timeout)
1103 DECLARE_WAITQUEUE(wait, current);
1104 struct hci_request req;
1107 BT_DBG("%s", hdev->name);
1109 hci_req_init(&req, hdev);
1111 hci_req_add_ev(&req, opcode, plen, param, event);
1113 hdev->req_status = HCI_REQ_PEND;
1115 err = hci_req_run(&req, hci_req_sync_complete);
1117 return ERR_PTR(err);
1119 add_wait_queue(&hdev->req_wait_q, &wait);
1120 set_current_state(TASK_INTERRUPTIBLE);
1122 schedule_timeout(timeout);
1124 remove_wait_queue(&hdev->req_wait_q, &wait);
1126 if (signal_pending(current))
1127 return ERR_PTR(-EINTR);
1129 switch (hdev->req_status) {
1131 err = -bt_to_errno(hdev->req_result);
1134 case HCI_REQ_CANCELED:
1135 err = -hdev->req_result;
1143 hdev->req_status = hdev->req_result = 0;
1145 BT_DBG("%s end: err %d", hdev->name, err);
1148 return ERR_PTR(err);
1150 return hci_get_cmd_complete(hdev, opcode, event);
1152 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1154 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1155 const void *param, u32 timeout)
1157 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1159 EXPORT_SYMBOL(__hci_cmd_sync);
1161 /* Execute request and wait for completion. */
1162 static int __hci_req_sync(struct hci_dev *hdev,
1163 void (*func)(struct hci_request *req,
1165 unsigned long opt, __u32 timeout)
1167 struct hci_request req;
1168 DECLARE_WAITQUEUE(wait, current);
1171 BT_DBG("%s start", hdev->name);
1173 hci_req_init(&req, hdev);
1175 hdev->req_status = HCI_REQ_PEND;
1179 err = hci_req_run(&req, hci_req_sync_complete);
1181 hdev->req_status = 0;
1183 /* ENODATA means the HCI request command queue is empty.
1184 * This can happen when a request with conditionals doesn't
1185 * trigger any commands to be sent. This is normal behavior
1186 * and should not trigger an error return.
1188 if (err == -ENODATA)
1194 add_wait_queue(&hdev->req_wait_q, &wait);
1195 set_current_state(TASK_INTERRUPTIBLE);
1197 schedule_timeout(timeout);
1199 remove_wait_queue(&hdev->req_wait_q, &wait);
1201 if (signal_pending(current))
1204 switch (hdev->req_status) {
1206 err = -bt_to_errno(hdev->req_result);
1209 case HCI_REQ_CANCELED:
1210 err = -hdev->req_result;
1218 hdev->req_status = hdev->req_result = 0;
1220 BT_DBG("%s end: err %d", hdev->name, err);
1225 static int hci_req_sync(struct hci_dev *hdev,
1226 void (*req)(struct hci_request *req,
1228 unsigned long opt, __u32 timeout)
1232 if (!test_bit(HCI_UP, &hdev->flags))
1235 /* Serialize all requests */
1237 ret = __hci_req_sync(hdev, req, opt, timeout);
1238 hci_req_unlock(hdev);
1243 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1245 BT_DBG("%s %ld", req->hdev->name, opt);
1248 set_bit(HCI_RESET, &req->hdev->flags);
1249 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1252 static void bredr_init(struct hci_request *req)
1254 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1256 /* Read Local Supported Features */
1257 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1259 /* Read Local Version */
1260 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1262 /* Read BD Address */
1263 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1266 static void amp_init(struct hci_request *req)
1268 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1270 /* Read Local Version */
1271 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1273 /* Read Local Supported Commands */
1274 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1276 /* Read Local Supported Features */
1277 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1279 /* Read Local AMP Info */
1280 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1282 /* Read Data Blk size */
1283 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1285 /* Read Flow Control Mode */
1286 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1288 /* Read Location Data */
1289 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1292 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1294 struct hci_dev *hdev = req->hdev;
1296 BT_DBG("%s %ld", hdev->name, opt);
1299 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1300 hci_reset_req(req, 0);
1302 switch (hdev->dev_type) {
1312 BT_ERR("Unknown device type %d", hdev->dev_type);
1317 static void bredr_setup(struct hci_request *req)
1319 struct hci_dev *hdev = req->hdev;
1324 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1325 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1327 /* Read Class of Device */
1328 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1330 /* Read Local Name */
1331 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1333 /* Read Voice Setting */
1334 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1336 /* Read Number of Supported IAC */
1337 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1339 /* Read Current IAC LAP */
1340 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1342 /* Clear Event Filters */
1343 flt_type = HCI_FLT_CLEAR_ALL;
1344 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1346 /* Connection accept timeout ~20 secs */
1347 param = cpu_to_le16(0x7d00);
1348 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1350 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1351 * but it does not support page scan related HCI commands.
1353 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1354 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1355 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1359 static void le_setup(struct hci_request *req)
1361 struct hci_dev *hdev = req->hdev;
1363 /* Read LE Buffer Size */
1364 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1366 /* Read LE Local Supported Features */
1367 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1369 /* Read LE Supported States */
1370 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1372 /* Read LE Advertising Channel TX Power */
1373 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1375 /* Read LE White List Size */
1376 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1378 /* Clear LE White List */
1379 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1381 /* LE-only controllers have LE implicitly enabled */
1382 if (!lmp_bredr_capable(hdev))
1383 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1386 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1388 if (lmp_ext_inq_capable(hdev))
1391 if (lmp_inq_rssi_capable(hdev))
1394 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1395 hdev->lmp_subver == 0x0757)
1398 if (hdev->manufacturer == 15) {
1399 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1401 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1403 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1407 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1408 hdev->lmp_subver == 0x1805)
1414 static void hci_setup_inquiry_mode(struct hci_request *req)
1418 mode = hci_get_inquiry_mode(req->hdev);
1420 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1423 static void hci_setup_event_mask(struct hci_request *req)
1425 struct hci_dev *hdev = req->hdev;
1427 /* The second byte is 0xff instead of 0x9f (two reserved bits
1428 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1429 * command otherwise.
1431 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1433 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1434 * any event mask for pre 1.2 devices.
1436 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1439 if (lmp_bredr_capable(hdev)) {
1440 events[4] |= 0x01; /* Flow Specification Complete */
1441 events[4] |= 0x02; /* Inquiry Result with RSSI */
1442 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1443 events[5] |= 0x08; /* Synchronous Connection Complete */
1444 events[5] |= 0x10; /* Synchronous Connection Changed */
1446 /* Use a different default for LE-only devices */
1447 memset(events, 0, sizeof(events));
1448 events[0] |= 0x10; /* Disconnection Complete */
1449 events[0] |= 0x80; /* Encryption Change */
1450 events[1] |= 0x08; /* Read Remote Version Information Complete */
1451 events[1] |= 0x20; /* Command Complete */
1452 events[1] |= 0x40; /* Command Status */
1453 events[1] |= 0x80; /* Hardware Error */
1454 events[2] |= 0x04; /* Number of Completed Packets */
1455 events[3] |= 0x02; /* Data Buffer Overflow */
1456 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1459 if (lmp_inq_rssi_capable(hdev))
1460 events[4] |= 0x02; /* Inquiry Result with RSSI */
1462 if (lmp_sniffsubr_capable(hdev))
1463 events[5] |= 0x20; /* Sniff Subrating */
1465 if (lmp_pause_enc_capable(hdev))
1466 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1468 if (lmp_ext_inq_capable(hdev))
1469 events[5] |= 0x40; /* Extended Inquiry Result */
1471 if (lmp_no_flush_capable(hdev))
1472 events[7] |= 0x01; /* Enhanced Flush Complete */
1474 if (lmp_lsto_capable(hdev))
1475 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1477 if (lmp_ssp_capable(hdev)) {
1478 events[6] |= 0x01; /* IO Capability Request */
1479 events[6] |= 0x02; /* IO Capability Response */
1480 events[6] |= 0x04; /* User Confirmation Request */
1481 events[6] |= 0x08; /* User Passkey Request */
1482 events[6] |= 0x10; /* Remote OOB Data Request */
1483 events[6] |= 0x20; /* Simple Pairing Complete */
1484 events[7] |= 0x04; /* User Passkey Notification */
1485 events[7] |= 0x08; /* Keypress Notification */
1486 events[7] |= 0x10; /* Remote Host Supported
1487 * Features Notification
1491 if (lmp_le_capable(hdev))
1492 events[7] |= 0x20; /* LE Meta-Event */
1494 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1496 if (lmp_le_capable(hdev)) {
1497 memset(events, 0, sizeof(events));
1499 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1500 sizeof(events), events);
1504 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1506 struct hci_dev *hdev = req->hdev;
1508 if (lmp_bredr_capable(hdev))
1511 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1513 if (lmp_le_capable(hdev))
1516 hci_setup_event_mask(req);
1518 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1519 * local supported commands HCI command.
1521 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1522 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1524 if (lmp_ssp_capable(hdev)) {
1525 /* When SSP is available, then the host features page
1526 * should also be available as well. However some
1527 * controllers list the max_page as 0 as long as SSP
1528 * has not been enabled. To achieve proper debugging
1529 * output, force the minimum max_page to 1 at least.
1531 hdev->max_page = 0x01;
1533 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1535 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1536 sizeof(mode), &mode);
1538 struct hci_cp_write_eir cp;
1540 memset(hdev->eir, 0, sizeof(hdev->eir));
1541 memset(&cp, 0, sizeof(cp));
1543 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1547 if (lmp_inq_rssi_capable(hdev))
1548 hci_setup_inquiry_mode(req);
1550 if (lmp_inq_tx_pwr_capable(hdev))
1551 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1553 if (lmp_ext_feat_capable(hdev)) {
1554 struct hci_cp_read_local_ext_features cp;
1557 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1561 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1563 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1568 static void hci_setup_link_policy(struct hci_request *req)
1570 struct hci_dev *hdev = req->hdev;
1571 struct hci_cp_write_def_link_policy cp;
1572 u16 link_policy = 0;
1574 if (lmp_rswitch_capable(hdev))
1575 link_policy |= HCI_LP_RSWITCH;
1576 if (lmp_hold_capable(hdev))
1577 link_policy |= HCI_LP_HOLD;
1578 if (lmp_sniff_capable(hdev))
1579 link_policy |= HCI_LP_SNIFF;
1580 if (lmp_park_capable(hdev))
1581 link_policy |= HCI_LP_PARK;
1583 cp.policy = cpu_to_le16(link_policy);
1584 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1587 static void hci_set_le_support(struct hci_request *req)
1589 struct hci_dev *hdev = req->hdev;
1590 struct hci_cp_write_le_host_supported cp;
1592 /* LE-only devices do not support explicit enablement */
1593 if (!lmp_bredr_capable(hdev))
1596 memset(&cp, 0, sizeof(cp));
1598 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1600 cp.simul = lmp_le_br_capable(hdev);
1603 if (cp.le != lmp_host_le_capable(hdev))
1604 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1608 static void hci_set_event_mask_page_2(struct hci_request *req)
1610 struct hci_dev *hdev = req->hdev;
1611 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1613 /* If Connectionless Slave Broadcast master role is supported
1614 * enable all necessary events for it.
1616 if (lmp_csb_master_capable(hdev)) {
1617 events[1] |= 0x40; /* Triggered Clock Capture */
1618 events[1] |= 0x80; /* Synchronization Train Complete */
1619 events[2] |= 0x10; /* Slave Page Response Timeout */
1620 events[2] |= 0x20; /* CSB Channel Map Change */
1623 /* If Connectionless Slave Broadcast slave role is supported
1624 * enable all necessary events for it.
1626 if (lmp_csb_slave_capable(hdev)) {
1627 events[2] |= 0x01; /* Synchronization Train Received */
1628 events[2] |= 0x02; /* CSB Receive */
1629 events[2] |= 0x04; /* CSB Timeout */
1630 events[2] |= 0x08; /* Truncated Page Complete */
1633 /* Enable Authenticated Payload Timeout Expired event if supported */
1634 if (lmp_ping_capable(hdev))
1637 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1640 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1642 struct hci_dev *hdev = req->hdev;
1645 /* Some Broadcom based Bluetooth controllers do not support the
1646 * Delete Stored Link Key command. They are clearly indicating its
1647 * absence in the bit mask of supported commands.
1649 * Check the supported commands and only if the the command is marked
1650 * as supported send it. If not supported assume that the controller
1651 * does not have actual support for stored link keys which makes this
1652 * command redundant anyway.
1654 * Some controllers indicate that they support handling deleting
1655 * stored link keys, but they don't. The quirk lets a driver
1656 * just disable this command.
1658 if (hdev->commands[6] & 0x80 &&
1659 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1660 struct hci_cp_delete_stored_link_key cp;
1662 bacpy(&cp.bdaddr, BDADDR_ANY);
1663 cp.delete_all = 0x01;
1664 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1668 if (hdev->commands[5] & 0x10)
1669 hci_setup_link_policy(req);
1671 if (lmp_le_capable(hdev))
1672 hci_set_le_support(req);
1674 /* Read features beyond page 1 if available */
1675 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1676 struct hci_cp_read_local_ext_features cp;
1679 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1684 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1686 struct hci_dev *hdev = req->hdev;
1688 /* Set event mask page 2 if the HCI command for it is supported */
1689 if (hdev->commands[22] & 0x04)
1690 hci_set_event_mask_page_2(req);
1692 /* Check for Synchronization Train support */
1693 if (lmp_sync_train_capable(hdev))
1694 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1696 /* Enable Secure Connections if supported and configured */
1697 if ((lmp_sc_capable(hdev) ||
1698 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
1699 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1701 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1702 sizeof(support), &support);
1706 static int __hci_init(struct hci_dev *hdev)
1710 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1714 /* The Device Under Test (DUT) mode is special and available for
1715 * all controller types. So just create it early on.
1717 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1718 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1722 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1723 * BR/EDR/LE type controllers. AMP controllers only need the
1726 if (hdev->dev_type != HCI_BREDR)
1729 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1733 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1737 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1741 /* Only create debugfs entries during the initial setup
1742 * phase and not every time the controller gets powered on.
1744 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1747 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1749 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1750 &hdev->manufacturer);
1751 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1752 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1753 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1755 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1757 if (lmp_bredr_capable(hdev)) {
1758 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1759 hdev, &inquiry_cache_fops);
1760 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1761 hdev, &link_keys_fops);
1762 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1763 hdev, &dev_class_fops);
1764 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1765 hdev, &voice_setting_fops);
1768 if (lmp_ssp_capable(hdev)) {
1769 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1770 hdev, &auto_accept_delay_fops);
1771 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1772 hdev, &ssp_debug_mode_fops);
1773 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1774 hdev, &force_sc_support_fops);
1775 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1776 hdev, &sc_only_mode_fops);
1779 if (lmp_sniff_capable(hdev)) {
1780 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1781 hdev, &idle_timeout_fops);
1782 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1783 hdev, &sniff_min_interval_fops);
1784 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1785 hdev, &sniff_max_interval_fops);
1788 if (lmp_le_capable(hdev)) {
1789 debugfs_create_file("identity", 0400, hdev->debugfs,
1790 hdev, &identity_fops);
1791 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1792 hdev, &rpa_timeout_fops);
1793 debugfs_create_file("random_address", 0444, hdev->debugfs,
1794 hdev, &random_address_fops);
1795 debugfs_create_file("static_address", 0444, hdev->debugfs,
1796 hdev, &static_address_fops);
1798 /* For controllers with a public address, provide a debug
1799 * option to force the usage of the configured static
1800 * address. By default the public address is used.
1802 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1803 debugfs_create_file("force_static_address", 0644,
1804 hdev->debugfs, hdev,
1805 &force_static_address_fops);
1807 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1808 &hdev->le_white_list_size);
1809 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1811 debugfs_create_file("identity_resolving_keys", 0400,
1812 hdev->debugfs, hdev,
1813 &identity_resolving_keys_fops);
1814 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1815 hdev, &long_term_keys_fops);
1816 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1817 hdev, &conn_min_interval_fops);
1818 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1819 hdev, &conn_max_interval_fops);
1820 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1821 hdev, &adv_channel_map_fops);
1822 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1823 &lowpan_debugfs_fops);
1824 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1825 &le_auto_conn_fops);
1826 debugfs_create_u16("discov_interleaved_timeout", 0644,
1828 &hdev->discov_interleaved_timeout);
1834 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1838 BT_DBG("%s %x", req->hdev->name, scan);
1840 /* Inquiry and Page scans */
1841 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1844 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1848 BT_DBG("%s %x", req->hdev->name, auth);
1850 /* Authentication */
1851 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1854 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1858 BT_DBG("%s %x", req->hdev->name, encrypt);
1861 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1864 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1866 __le16 policy = cpu_to_le16(opt);
1868 BT_DBG("%s %x", req->hdev->name, policy);
1870 /* Default link policy */
1871 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1874 /* Get HCI device by index.
1875 * Device is held on return. */
1876 struct hci_dev *hci_dev_get(int index)
1878 struct hci_dev *hdev = NULL, *d;
1880 BT_DBG("%d", index);
1885 read_lock(&hci_dev_list_lock);
1886 list_for_each_entry(d, &hci_dev_list, list) {
1887 if (d->id == index) {
1888 hdev = hci_dev_hold(d);
1892 read_unlock(&hci_dev_list_lock);
1896 /* ---- Inquiry support ---- */
1898 bool hci_discovery_active(struct hci_dev *hdev)
1900 struct discovery_state *discov = &hdev->discovery;
1902 switch (discov->state) {
1903 case DISCOVERY_FINDING:
1904 case DISCOVERY_RESOLVING:
1912 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1914 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1916 if (hdev->discovery.state == state)
1920 case DISCOVERY_STOPPED:
1921 hci_update_background_scan(hdev);
1923 if (hdev->discovery.state != DISCOVERY_STARTING)
1924 mgmt_discovering(hdev, 0);
1926 case DISCOVERY_STARTING:
1928 case DISCOVERY_FINDING:
1929 mgmt_discovering(hdev, 1);
1931 case DISCOVERY_RESOLVING:
1933 case DISCOVERY_STOPPING:
1937 hdev->discovery.state = state;
1940 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1942 struct discovery_state *cache = &hdev->discovery;
1943 struct inquiry_entry *p, *n;
1945 list_for_each_entry_safe(p, n, &cache->all, all) {
1950 INIT_LIST_HEAD(&cache->unknown);
1951 INIT_LIST_HEAD(&cache->resolve);
1954 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1957 struct discovery_state *cache = &hdev->discovery;
1958 struct inquiry_entry *e;
1960 BT_DBG("cache %p, %pMR", cache, bdaddr);
1962 list_for_each_entry(e, &cache->all, all) {
1963 if (!bacmp(&e->data.bdaddr, bdaddr))
1970 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1973 struct discovery_state *cache = &hdev->discovery;
1974 struct inquiry_entry *e;
1976 BT_DBG("cache %p, %pMR", cache, bdaddr);
1978 list_for_each_entry(e, &cache->unknown, list) {
1979 if (!bacmp(&e->data.bdaddr, bdaddr))
1986 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1990 struct discovery_state *cache = &hdev->discovery;
1991 struct inquiry_entry *e;
1993 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1995 list_for_each_entry(e, &cache->resolve, list) {
1996 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1998 if (!bacmp(&e->data.bdaddr, bdaddr))
2005 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2006 struct inquiry_entry *ie)
2008 struct discovery_state *cache = &hdev->discovery;
2009 struct list_head *pos = &cache->resolve;
2010 struct inquiry_entry *p;
2012 list_del(&ie->list);
2014 list_for_each_entry(p, &cache->resolve, list) {
2015 if (p->name_state != NAME_PENDING &&
2016 abs(p->data.rssi) >= abs(ie->data.rssi))
2021 list_add(&ie->list, pos);
2024 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2025 bool name_known, bool *ssp)
2027 struct discovery_state *cache = &hdev->discovery;
2028 struct inquiry_entry *ie;
2030 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2032 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2034 *ssp = data->ssp_mode;
2036 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2038 if (ie->data.ssp_mode)
2041 if (ie->name_state == NAME_NEEDED &&
2042 data->rssi != ie->data.rssi) {
2043 ie->data.rssi = data->rssi;
2044 hci_inquiry_cache_update_resolve(hdev, ie);
2050 /* Entry not in the cache. Add new one. */
2051 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2055 list_add(&ie->all, &cache->all);
2058 ie->name_state = NAME_KNOWN;
2060 ie->name_state = NAME_NOT_KNOWN;
2061 list_add(&ie->list, &cache->unknown);
2065 if (name_known && ie->name_state != NAME_KNOWN &&
2066 ie->name_state != NAME_PENDING) {
2067 ie->name_state = NAME_KNOWN;
2068 list_del(&ie->list);
2071 memcpy(&ie->data, data, sizeof(*data));
2072 ie->timestamp = jiffies;
2073 cache->timestamp = jiffies;
2075 if (ie->name_state == NAME_NOT_KNOWN)
2081 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2083 struct discovery_state *cache = &hdev->discovery;
2084 struct inquiry_info *info = (struct inquiry_info *) buf;
2085 struct inquiry_entry *e;
2088 list_for_each_entry(e, &cache->all, all) {
2089 struct inquiry_data *data = &e->data;
2094 bacpy(&info->bdaddr, &data->bdaddr);
2095 info->pscan_rep_mode = data->pscan_rep_mode;
2096 info->pscan_period_mode = data->pscan_period_mode;
2097 info->pscan_mode = data->pscan_mode;
2098 memcpy(info->dev_class, data->dev_class, 3);
2099 info->clock_offset = data->clock_offset;
2105 BT_DBG("cache %p, copied %d", cache, copied);
2109 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2111 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2112 struct hci_dev *hdev = req->hdev;
2113 struct hci_cp_inquiry cp;
2115 BT_DBG("%s", hdev->name);
2117 if (test_bit(HCI_INQUIRY, &hdev->flags))
2121 memcpy(&cp.lap, &ir->lap, 3);
2122 cp.length = ir->length;
2123 cp.num_rsp = ir->num_rsp;
2124 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2127 static int wait_inquiry(void *word)
2130 return signal_pending(current);
2133 int hci_inquiry(void __user *arg)
2135 __u8 __user *ptr = arg;
2136 struct hci_inquiry_req ir;
2137 struct hci_dev *hdev;
2138 int err = 0, do_inquiry = 0, max_rsp;
2142 if (copy_from_user(&ir, ptr, sizeof(ir)))
2145 hdev = hci_dev_get(ir.dev_id);
2149 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2154 if (hdev->dev_type != HCI_BREDR) {
2159 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2165 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2166 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2167 hci_inquiry_cache_flush(hdev);
2170 hci_dev_unlock(hdev);
2172 timeo = ir.length * msecs_to_jiffies(2000);
2175 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2180 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2181 * cleared). If it is interrupted by a signal, return -EINTR.
2183 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2184 TASK_INTERRUPTIBLE))
2188 /* for unlimited number of responses we will use buffer with
2191 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2193 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2194 * copy it to the user space.
2196 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2203 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2204 hci_dev_unlock(hdev);
2206 BT_DBG("num_rsp %d", ir.num_rsp);
2208 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2210 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2223 static int hci_dev_do_open(struct hci_dev *hdev)
2227 BT_DBG("%s %p", hdev->name, hdev);
2231 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2236 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2237 /* Check for rfkill but allow the HCI setup stage to
2238 * proceed (which in itself doesn't cause any RF activity).
2240 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2245 /* Check for valid public address or a configured static
2246 * random adddress, but let the HCI setup proceed to
2247 * be able to determine if there is a public address
2250 * In case of user channel usage, it is not important
2251 * if a public address or static random address is
2254 * This check is only valid for BR/EDR controllers
2255 * since AMP controllers do not have an address.
2257 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2258 hdev->dev_type == HCI_BREDR &&
2259 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2260 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2261 ret = -EADDRNOTAVAIL;
2266 if (test_bit(HCI_UP, &hdev->flags)) {
2271 if (hdev->open(hdev)) {
2276 atomic_set(&hdev->cmd_cnt, 1);
2277 set_bit(HCI_INIT, &hdev->flags);
2279 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2280 ret = hdev->setup(hdev);
2283 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2284 set_bit(HCI_RAW, &hdev->flags);
2286 if (!test_bit(HCI_RAW, &hdev->flags) &&
2287 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2288 ret = __hci_init(hdev);
2291 clear_bit(HCI_INIT, &hdev->flags);
2295 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2296 set_bit(HCI_UP, &hdev->flags);
2297 hci_notify(hdev, HCI_DEV_UP);
2298 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2299 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2300 hdev->dev_type == HCI_BREDR) {
2302 mgmt_powered(hdev, 1);
2303 hci_dev_unlock(hdev);
2306 /* Init failed, cleanup */
2307 flush_work(&hdev->tx_work);
2308 flush_work(&hdev->cmd_work);
2309 flush_work(&hdev->rx_work);
2311 skb_queue_purge(&hdev->cmd_q);
2312 skb_queue_purge(&hdev->rx_q);
2317 if (hdev->sent_cmd) {
2318 kfree_skb(hdev->sent_cmd);
2319 hdev->sent_cmd = NULL;
2327 hci_req_unlock(hdev);
2331 /* ---- HCI ioctl helpers ---- */
2333 int hci_dev_open(__u16 dev)
2335 struct hci_dev *hdev;
2338 hdev = hci_dev_get(dev);
2342 /* We need to ensure that no other power on/off work is pending
2343 * before proceeding to call hci_dev_do_open. This is
2344 * particularly important if the setup procedure has not yet
2347 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2348 cancel_delayed_work(&hdev->power_off);
2350 /* After this call it is guaranteed that the setup procedure
2351 * has finished. This means that error conditions like RFKILL
2352 * or no valid public or static random address apply.
2354 flush_workqueue(hdev->req_workqueue);
2356 err = hci_dev_do_open(hdev);
2363 static int hci_dev_do_close(struct hci_dev *hdev)
2365 BT_DBG("%s %p", hdev->name, hdev);
2367 cancel_delayed_work(&hdev->power_off);
2369 hci_req_cancel(hdev, ENODEV);
2372 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2373 del_timer_sync(&hdev->cmd_timer);
2374 hci_req_unlock(hdev);
2378 /* Flush RX and TX works */
2379 flush_work(&hdev->tx_work);
2380 flush_work(&hdev->rx_work);
2382 if (hdev->discov_timeout > 0) {
2383 cancel_delayed_work(&hdev->discov_off);
2384 hdev->discov_timeout = 0;
2385 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2386 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2389 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2390 cancel_delayed_work(&hdev->service_cache);
2392 cancel_delayed_work_sync(&hdev->le_scan_disable);
2394 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2395 cancel_delayed_work_sync(&hdev->rpa_expired);
2398 hci_inquiry_cache_flush(hdev);
2399 hci_conn_hash_flush(hdev);
2400 hci_pend_le_conns_clear(hdev);
2401 hci_dev_unlock(hdev);
2403 hci_notify(hdev, HCI_DEV_DOWN);
2409 skb_queue_purge(&hdev->cmd_q);
2410 atomic_set(&hdev->cmd_cnt, 1);
2411 if (!test_bit(HCI_RAW, &hdev->flags) &&
2412 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2413 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2414 set_bit(HCI_INIT, &hdev->flags);
2415 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2416 clear_bit(HCI_INIT, &hdev->flags);
2419 /* flush cmd work */
2420 flush_work(&hdev->cmd_work);
2423 skb_queue_purge(&hdev->rx_q);
2424 skb_queue_purge(&hdev->cmd_q);
2425 skb_queue_purge(&hdev->raw_q);
2427 /* Drop last sent command */
2428 if (hdev->sent_cmd) {
2429 del_timer_sync(&hdev->cmd_timer);
2430 kfree_skb(hdev->sent_cmd);
2431 hdev->sent_cmd = NULL;
2434 kfree_skb(hdev->recv_evt);
2435 hdev->recv_evt = NULL;
2437 /* After this point our queues are empty
2438 * and no tasks are scheduled. */
2443 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2445 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2446 if (hdev->dev_type == HCI_BREDR) {
2448 mgmt_powered(hdev, 0);
2449 hci_dev_unlock(hdev);
2453 /* Controller radio is available but is currently powered down */
2454 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2456 memset(hdev->eir, 0, sizeof(hdev->eir));
2457 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2458 bacpy(&hdev->random_addr, BDADDR_ANY);
2460 hci_req_unlock(hdev);
2466 int hci_dev_close(__u16 dev)
2468 struct hci_dev *hdev;
2471 hdev = hci_dev_get(dev);
2475 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2480 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2481 cancel_delayed_work(&hdev->power_off);
2483 err = hci_dev_do_close(hdev);
2490 int hci_dev_reset(__u16 dev)
2492 struct hci_dev *hdev;
2495 hdev = hci_dev_get(dev);
2501 if (!test_bit(HCI_UP, &hdev->flags)) {
2506 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2512 skb_queue_purge(&hdev->rx_q);
2513 skb_queue_purge(&hdev->cmd_q);
2516 hci_inquiry_cache_flush(hdev);
2517 hci_conn_hash_flush(hdev);
2518 hci_dev_unlock(hdev);
2523 atomic_set(&hdev->cmd_cnt, 1);
2524 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2526 if (!test_bit(HCI_RAW, &hdev->flags))
2527 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2530 hci_req_unlock(hdev);
2535 int hci_dev_reset_stat(__u16 dev)
2537 struct hci_dev *hdev;
2540 hdev = hci_dev_get(dev);
2544 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2549 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2556 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2558 struct hci_dev *hdev;
2559 struct hci_dev_req dr;
2562 if (copy_from_user(&dr, arg, sizeof(dr)))
2565 hdev = hci_dev_get(dr.dev_id);
2569 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2574 if (hdev->dev_type != HCI_BREDR) {
2579 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2586 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2591 if (!lmp_encrypt_capable(hdev)) {
2596 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2597 /* Auth must be enabled first */
2598 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2604 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2609 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2614 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2618 case HCISETLINKMODE:
2619 hdev->link_mode = ((__u16) dr.dev_opt) &
2620 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2624 hdev->pkt_type = (__u16) dr.dev_opt;
2628 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2629 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2633 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2634 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2647 int hci_get_dev_list(void __user *arg)
2649 struct hci_dev *hdev;
2650 struct hci_dev_list_req *dl;
2651 struct hci_dev_req *dr;
2652 int n = 0, size, err;
2655 if (get_user(dev_num, (__u16 __user *) arg))
2658 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2661 size = sizeof(*dl) + dev_num * sizeof(*dr);
2663 dl = kzalloc(size, GFP_KERNEL);
2669 read_lock(&hci_dev_list_lock);
2670 list_for_each_entry(hdev, &hci_dev_list, list) {
2671 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2672 cancel_delayed_work(&hdev->power_off);
2674 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2675 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2677 (dr + n)->dev_id = hdev->id;
2678 (dr + n)->dev_opt = hdev->flags;
2683 read_unlock(&hci_dev_list_lock);
2686 size = sizeof(*dl) + n * sizeof(*dr);
2688 err = copy_to_user(arg, dl, size);
2691 return err ? -EFAULT : 0;
2694 int hci_get_dev_info(void __user *arg)
2696 struct hci_dev *hdev;
2697 struct hci_dev_info di;
2700 if (copy_from_user(&di, arg, sizeof(di)))
2703 hdev = hci_dev_get(di.dev_id);
2707 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2708 cancel_delayed_work_sync(&hdev->power_off);
2710 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2711 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2713 strcpy(di.name, hdev->name);
2714 di.bdaddr = hdev->bdaddr;
2715 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2716 di.flags = hdev->flags;
2717 di.pkt_type = hdev->pkt_type;
2718 if (lmp_bredr_capable(hdev)) {
2719 di.acl_mtu = hdev->acl_mtu;
2720 di.acl_pkts = hdev->acl_pkts;
2721 di.sco_mtu = hdev->sco_mtu;
2722 di.sco_pkts = hdev->sco_pkts;
2724 di.acl_mtu = hdev->le_mtu;
2725 di.acl_pkts = hdev->le_pkts;
2729 di.link_policy = hdev->link_policy;
2730 di.link_mode = hdev->link_mode;
2732 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2733 memcpy(&di.features, &hdev->features, sizeof(di.features));
2735 if (copy_to_user(arg, &di, sizeof(di)))
2743 /* ---- Interface to HCI drivers ---- */
2745 static int hci_rfkill_set_block(void *data, bool blocked)
2747 struct hci_dev *hdev = data;
2749 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2751 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2755 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2756 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2757 hci_dev_do_close(hdev);
2759 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2765 static const struct rfkill_ops hci_rfkill_ops = {
2766 .set_block = hci_rfkill_set_block,
2769 static void hci_power_on(struct work_struct *work)
2771 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2774 BT_DBG("%s", hdev->name);
2776 err = hci_dev_do_open(hdev);
2778 mgmt_set_powered_failed(hdev, err);
2782 /* During the HCI setup phase, a few error conditions are
2783 * ignored and they need to be checked now. If they are still
2784 * valid, it is important to turn the device back off.
2786 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2787 (hdev->dev_type == HCI_BREDR &&
2788 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2789 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2790 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2791 hci_dev_do_close(hdev);
2792 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2793 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2794 HCI_AUTO_OFF_TIMEOUT);
2797 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2798 mgmt_index_added(hdev);
2801 static void hci_power_off(struct work_struct *work)
2803 struct hci_dev *hdev = container_of(work, struct hci_dev,
2806 BT_DBG("%s", hdev->name);
2808 hci_dev_do_close(hdev);
2811 static void hci_discov_off(struct work_struct *work)
2813 struct hci_dev *hdev;
2815 hdev = container_of(work, struct hci_dev, discov_off.work);
2817 BT_DBG("%s", hdev->name);
2819 mgmt_discoverable_timeout(hdev);
2822 void hci_uuids_clear(struct hci_dev *hdev)
2824 struct bt_uuid *uuid, *tmp;
2826 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2827 list_del(&uuid->list);
2832 void hci_link_keys_clear(struct hci_dev *hdev)
2834 struct list_head *p, *n;
2836 list_for_each_safe(p, n, &hdev->link_keys) {
2837 struct link_key *key;
2839 key = list_entry(p, struct link_key, list);
2846 void hci_smp_ltks_clear(struct hci_dev *hdev)
2848 struct smp_ltk *k, *tmp;
2850 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2856 void hci_smp_irks_clear(struct hci_dev *hdev)
2858 struct smp_irk *k, *tmp;
2860 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2866 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2870 list_for_each_entry(k, &hdev->link_keys, list)
2871 if (bacmp(bdaddr, &k->bdaddr) == 0)
2877 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2878 u8 key_type, u8 old_key_type)
2881 if (key_type < 0x03)
2884 /* Debug keys are insecure so don't store them persistently */
2885 if (key_type == HCI_LK_DEBUG_COMBINATION)
2888 /* Changed combination key and there's no previous one */
2889 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2892 /* Security mode 3 case */
2896 /* Neither local nor remote side had no-bonding as requirement */
2897 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2900 /* Local side had dedicated bonding as requirement */
2901 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2904 /* Remote side had dedicated bonding as requirement */
2905 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2908 /* If none of the above criteria match, then don't store the key
2913 static bool ltk_type_master(u8 type)
2915 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2921 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2926 list_for_each_entry(k, &hdev->long_term_keys, list) {
2927 if (k->ediv != ediv || k->rand != rand)
2930 if (ltk_type_master(k->type) != master)
2939 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2940 u8 addr_type, bool master)
2944 list_for_each_entry(k, &hdev->long_term_keys, list)
2945 if (addr_type == k->bdaddr_type &&
2946 bacmp(bdaddr, &k->bdaddr) == 0 &&
2947 ltk_type_master(k->type) == master)
2953 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2955 struct smp_irk *irk;
2957 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2958 if (!bacmp(&irk->rpa, rpa))
2962 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2963 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2964 bacpy(&irk->rpa, rpa);
2972 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2975 struct smp_irk *irk;
2977 /* Identity Address must be public or static random */
2978 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2981 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2982 if (addr_type == irk->addr_type &&
2983 bacmp(bdaddr, &irk->bdaddr) == 0)
2990 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2991 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2993 struct link_key *key, *old_key;
2997 old_key = hci_find_link_key(hdev, bdaddr);
2999 old_key_type = old_key->type;
3002 old_key_type = conn ? conn->key_type : 0xff;
3003 key = kzalloc(sizeof(*key), GFP_KERNEL);
3006 list_add(&key->list, &hdev->link_keys);
3009 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3011 /* Some buggy controller combinations generate a changed
3012 * combination key for legacy pairing even when there's no
3014 if (type == HCI_LK_CHANGED_COMBINATION &&
3015 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3016 type = HCI_LK_COMBINATION;
3018 conn->key_type = type;
3021 bacpy(&key->bdaddr, bdaddr);
3022 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3023 key->pin_len = pin_len;
3025 if (type == HCI_LK_CHANGED_COMBINATION)
3026 key->type = old_key_type;
3033 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3035 mgmt_new_link_key(hdev, key, persistent);
3038 conn->flush_key = !persistent;
3043 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3044 u8 addr_type, u8 type, u8 authenticated,
3045 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3047 struct smp_ltk *key, *old_key;
3048 bool master = ltk_type_master(type);
3050 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3054 key = kzalloc(sizeof(*key), GFP_KERNEL);
3057 list_add(&key->list, &hdev->long_term_keys);
3060 bacpy(&key->bdaddr, bdaddr);
3061 key->bdaddr_type = addr_type;
3062 memcpy(key->val, tk, sizeof(key->val));
3063 key->authenticated = authenticated;
3066 key->enc_size = enc_size;
3072 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3073 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3075 struct smp_irk *irk;
3077 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3079 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3083 bacpy(&irk->bdaddr, bdaddr);
3084 irk->addr_type = addr_type;
3086 list_add(&irk->list, &hdev->identity_resolving_keys);
3089 memcpy(irk->val, val, 16);
3090 bacpy(&irk->rpa, rpa);
3095 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3097 struct link_key *key;
3099 key = hci_find_link_key(hdev, bdaddr);
3103 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3105 list_del(&key->list);
3111 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3113 struct smp_ltk *k, *tmp;
3116 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3117 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3120 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3127 return removed ? 0 : -ENOENT;
3130 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3132 struct smp_irk *k, *tmp;
3134 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3135 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3138 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3145 /* HCI command timer function */
3146 static void hci_cmd_timeout(unsigned long arg)
3148 struct hci_dev *hdev = (void *) arg;
3150 if (hdev->sent_cmd) {
3151 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3152 u16 opcode = __le16_to_cpu(sent->opcode);
3154 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3156 BT_ERR("%s command tx timeout", hdev->name);
3159 atomic_set(&hdev->cmd_cnt, 1);
3160 queue_work(hdev->workqueue, &hdev->cmd_work);
3163 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3166 struct oob_data *data;
3168 list_for_each_entry(data, &hdev->remote_oob_data, list)
3169 if (bacmp(bdaddr, &data->bdaddr) == 0)
3175 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3177 struct oob_data *data;
3179 data = hci_find_remote_oob_data(hdev, bdaddr);
3183 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3185 list_del(&data->list);
3191 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3193 struct oob_data *data, *n;
3195 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3196 list_del(&data->list);
3201 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3202 u8 *hash, u8 *randomizer)
3204 struct oob_data *data;
3206 data = hci_find_remote_oob_data(hdev, bdaddr);
3208 data = kmalloc(sizeof(*data), GFP_KERNEL);
3212 bacpy(&data->bdaddr, bdaddr);
3213 list_add(&data->list, &hdev->remote_oob_data);
3216 memcpy(data->hash192, hash, sizeof(data->hash192));
3217 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3219 memset(data->hash256, 0, sizeof(data->hash256));
3220 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3222 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3227 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3228 u8 *hash192, u8 *randomizer192,
3229 u8 *hash256, u8 *randomizer256)
3231 struct oob_data *data;
3233 data = hci_find_remote_oob_data(hdev, bdaddr);
3235 data = kmalloc(sizeof(*data), GFP_KERNEL);
3239 bacpy(&data->bdaddr, bdaddr);
3240 list_add(&data->list, &hdev->remote_oob_data);
3243 memcpy(data->hash192, hash192, sizeof(data->hash192));
3244 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3246 memcpy(data->hash256, hash256, sizeof(data->hash256));
3247 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3249 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3254 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3255 bdaddr_t *bdaddr, u8 type)
3257 struct bdaddr_list *b;
3259 list_for_each_entry(b, &hdev->blacklist, list) {
3260 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3267 static void hci_blacklist_clear(struct hci_dev *hdev)
3269 struct list_head *p, *n;
3271 list_for_each_safe(p, n, &hdev->blacklist) {
3272 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3279 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3281 struct bdaddr_list *entry;
3283 if (!bacmp(bdaddr, BDADDR_ANY))
3286 if (hci_blacklist_lookup(hdev, bdaddr, type))
3289 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3293 bacpy(&entry->bdaddr, bdaddr);
3294 entry->bdaddr_type = type;
3296 list_add(&entry->list, &hdev->blacklist);
3298 return mgmt_device_blocked(hdev, bdaddr, type);
3301 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3303 struct bdaddr_list *entry;
3305 if (!bacmp(bdaddr, BDADDR_ANY)) {
3306 hci_blacklist_clear(hdev);
3310 entry = hci_blacklist_lookup(hdev, bdaddr, type);
3314 list_del(&entry->list);
3317 return mgmt_device_unblocked(hdev, bdaddr, type);
3320 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3321 bdaddr_t *bdaddr, u8 type)
3323 struct bdaddr_list *b;
3325 list_for_each_entry(b, &hdev->le_white_list, list) {
3326 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3333 void hci_white_list_clear(struct hci_dev *hdev)
3335 struct list_head *p, *n;
3337 list_for_each_safe(p, n, &hdev->le_white_list) {
3338 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3345 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3347 struct bdaddr_list *entry;
3349 if (!bacmp(bdaddr, BDADDR_ANY))
3352 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3356 bacpy(&entry->bdaddr, bdaddr);
3357 entry->bdaddr_type = type;
3359 list_add(&entry->list, &hdev->le_white_list);
3364 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3366 struct bdaddr_list *entry;
3368 if (!bacmp(bdaddr, BDADDR_ANY))
3371 entry = hci_white_list_lookup(hdev, bdaddr, type);
3375 list_del(&entry->list);
3381 /* This function requires the caller holds hdev->lock */
3382 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3383 bdaddr_t *addr, u8 addr_type)
3385 struct hci_conn_params *params;
3387 list_for_each_entry(params, &hdev->le_conn_params, list) {
3388 if (bacmp(¶ms->addr, addr) == 0 &&
3389 params->addr_type == addr_type) {
3397 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3399 struct hci_conn *conn;
3401 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3405 if (conn->dst_type != type)
3408 if (conn->state != BT_CONNECTED)
3414 static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3416 if (addr_type == ADDR_LE_DEV_PUBLIC)
3419 /* Check for Random Static address type */
3420 if ((addr->b[5] & 0xc0) == 0xc0)
3426 /* This function requires the caller holds hdev->lock */
3427 int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3428 u8 auto_connect, u16 conn_min_interval,
3429 u16 conn_max_interval)
3431 struct hci_conn_params *params;
3433 if (!is_identity_address(addr, addr_type))
3436 params = hci_conn_params_lookup(hdev, addr, addr_type);
3440 params = kzalloc(sizeof(*params), GFP_KERNEL);
3442 BT_ERR("Out of memory");
3446 bacpy(¶ms->addr, addr);
3447 params->addr_type = addr_type;
3449 list_add(¶ms->list, &hdev->le_conn_params);
3452 params->conn_min_interval = conn_min_interval;
3453 params->conn_max_interval = conn_max_interval;
3454 params->auto_connect = auto_connect;
3456 switch (auto_connect) {
3457 case HCI_AUTO_CONN_DISABLED:
3458 case HCI_AUTO_CONN_LINK_LOSS:
3459 hci_pend_le_conn_del(hdev, addr, addr_type);
3461 case HCI_AUTO_CONN_ALWAYS:
3462 if (!is_connected(hdev, addr, addr_type))
3463 hci_pend_le_conn_add(hdev, addr, addr_type);
3467 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3468 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3469 conn_min_interval, conn_max_interval);
3474 /* This function requires the caller holds hdev->lock */
3475 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3477 struct hci_conn_params *params;
3479 params = hci_conn_params_lookup(hdev, addr, addr_type);
3483 hci_pend_le_conn_del(hdev, addr, addr_type);
3485 list_del(¶ms->list);
3488 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3491 /* This function requires the caller holds hdev->lock */
3492 void hci_conn_params_clear(struct hci_dev *hdev)
3494 struct hci_conn_params *params, *tmp;
3496 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3497 list_del(¶ms->list);
3501 BT_DBG("All LE connection parameters were removed");
3504 /* This function requires the caller holds hdev->lock */
3505 struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3506 bdaddr_t *addr, u8 addr_type)
3508 struct bdaddr_list *entry;
3510 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3511 if (bacmp(&entry->bdaddr, addr) == 0 &&
3512 entry->bdaddr_type == addr_type)
3519 /* This function requires the caller holds hdev->lock */
3520 void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3522 struct bdaddr_list *entry;
3524 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3528 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3530 BT_ERR("Out of memory");
3534 bacpy(&entry->bdaddr, addr);
3535 entry->bdaddr_type = addr_type;
3537 list_add(&entry->list, &hdev->pend_le_conns);
3539 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3542 hci_update_background_scan(hdev);
3545 /* This function requires the caller holds hdev->lock */
3546 void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3548 struct bdaddr_list *entry;
3550 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3554 list_del(&entry->list);
3557 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3560 hci_update_background_scan(hdev);
3563 /* This function requires the caller holds hdev->lock */
3564 void hci_pend_le_conns_clear(struct hci_dev *hdev)
3566 struct bdaddr_list *entry, *tmp;
3568 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3569 list_del(&entry->list);
3573 BT_DBG("All LE pending connections cleared");
3576 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3579 BT_ERR("Failed to start inquiry: status %d", status);
3582 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3583 hci_dev_unlock(hdev);
3588 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3590 /* General inquiry access code (GIAC) */
3591 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3592 struct hci_request req;
3593 struct hci_cp_inquiry cp;
3597 BT_ERR("Failed to disable LE scanning: status %d", status);
3601 switch (hdev->discovery.type) {
3602 case DISCOV_TYPE_LE:
3604 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3605 hci_dev_unlock(hdev);
3608 case DISCOV_TYPE_INTERLEAVED:
3609 hci_req_init(&req, hdev);
3611 memset(&cp, 0, sizeof(cp));
3612 memcpy(&cp.lap, lap, sizeof(cp.lap));
3613 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3614 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3618 hci_inquiry_cache_flush(hdev);
3620 err = hci_req_run(&req, inquiry_complete);
3622 BT_ERR("Inquiry request failed: err %d", err);
3623 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3626 hci_dev_unlock(hdev);
3631 static void le_scan_disable_work(struct work_struct *work)
3633 struct hci_dev *hdev = container_of(work, struct hci_dev,
3634 le_scan_disable.work);
3635 struct hci_request req;
3638 BT_DBG("%s", hdev->name);
3640 hci_req_init(&req, hdev);
3642 hci_req_add_le_scan_disable(&req);
3644 err = hci_req_run(&req, le_scan_disable_work_complete);
3646 BT_ERR("Disable LE scanning request failed: err %d", err);
3649 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3651 struct hci_dev *hdev = req->hdev;
3653 /* If we're advertising or initiating an LE connection we can't
3654 * go ahead and change the random address at this time. This is
3655 * because the eventual initiator address used for the
3656 * subsequently created connection will be undefined (some
3657 * controllers use the new address and others the one we had
3658 * when the operation started).
3660 * In this kind of scenario skip the update and let the random
3661 * address be updated at the next cycle.
3663 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3664 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3665 BT_DBG("Deferring random address update");
3669 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3672 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3675 struct hci_dev *hdev = req->hdev;
3678 /* If privacy is enabled use a resolvable private address. If
3679 * current RPA has expired or there is something else than
3680 * the current RPA in use, then generate a new one.
3682 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3685 *own_addr_type = ADDR_LE_DEV_RANDOM;
3687 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3688 !bacmp(&hdev->random_addr, &hdev->rpa))
3691 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3693 BT_ERR("%s failed to generate new RPA", hdev->name);
3697 set_random_addr(req, &hdev->rpa);
3699 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3700 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3705 /* In case of required privacy without resolvable private address,
3706 * use an unresolvable private address. This is useful for active
3707 * scanning and non-connectable advertising.
3709 if (require_privacy) {
3712 get_random_bytes(&urpa, 6);
3713 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3715 *own_addr_type = ADDR_LE_DEV_RANDOM;
3716 set_random_addr(req, &urpa);
3720 /* If forcing static address is in use or there is no public
3721 * address use the static address as random address (but skip
3722 * the HCI command if the current random address is already the
3725 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3726 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3727 *own_addr_type = ADDR_LE_DEV_RANDOM;
3728 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3729 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3730 &hdev->static_addr);
3734 /* Neither privacy nor static address is being used so use a
3737 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3742 /* Copy the Identity Address of the controller.
3744 * If the controller has a public BD_ADDR, then by default use that one.
3745 * If this is a LE only controller without a public address, default to
3746 * the static random address.
3748 * For debugging purposes it is possible to force controllers with a
3749 * public address to use the static random address instead.
3751 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3754 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3755 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3756 bacpy(bdaddr, &hdev->static_addr);
3757 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3759 bacpy(bdaddr, &hdev->bdaddr);
3760 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3764 /* Alloc HCI device */
3765 struct hci_dev *hci_alloc_dev(void)
3767 struct hci_dev *hdev;
3769 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3773 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3774 hdev->esco_type = (ESCO_HV1);
3775 hdev->link_mode = (HCI_LM_ACCEPT);
3776 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3777 hdev->io_capability = 0x03; /* No Input No Output */
3778 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3779 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3781 hdev->sniff_max_interval = 800;
3782 hdev->sniff_min_interval = 80;
3784 hdev->le_adv_channel_map = 0x07;
3785 hdev->le_scan_interval = 0x0060;
3786 hdev->le_scan_window = 0x0030;
3787 hdev->le_conn_min_interval = 0x0028;
3788 hdev->le_conn_max_interval = 0x0038;
3790 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3791 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3793 mutex_init(&hdev->lock);
3794 mutex_init(&hdev->req_lock);
3796 INIT_LIST_HEAD(&hdev->mgmt_pending);
3797 INIT_LIST_HEAD(&hdev->blacklist);
3798 INIT_LIST_HEAD(&hdev->uuids);
3799 INIT_LIST_HEAD(&hdev->link_keys);
3800 INIT_LIST_HEAD(&hdev->long_term_keys);
3801 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3802 INIT_LIST_HEAD(&hdev->remote_oob_data);
3803 INIT_LIST_HEAD(&hdev->le_white_list);
3804 INIT_LIST_HEAD(&hdev->le_conn_params);
3805 INIT_LIST_HEAD(&hdev->pend_le_conns);
3806 INIT_LIST_HEAD(&hdev->conn_hash.list);
3808 INIT_WORK(&hdev->rx_work, hci_rx_work);
3809 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3810 INIT_WORK(&hdev->tx_work, hci_tx_work);
3811 INIT_WORK(&hdev->power_on, hci_power_on);
3813 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3814 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3815 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3817 skb_queue_head_init(&hdev->rx_q);
3818 skb_queue_head_init(&hdev->cmd_q);
3819 skb_queue_head_init(&hdev->raw_q);
3821 init_waitqueue_head(&hdev->req_wait_q);
3823 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
3825 hci_init_sysfs(hdev);
3826 discovery_init(hdev);
3830 EXPORT_SYMBOL(hci_alloc_dev);
3832 /* Free HCI device */
3833 void hci_free_dev(struct hci_dev *hdev)
3835 /* will free via device release */
3836 put_device(&hdev->dev);
3838 EXPORT_SYMBOL(hci_free_dev);
3840 /* Register HCI device */
3841 int hci_register_dev(struct hci_dev *hdev)
3845 if (!hdev->open || !hdev->close)
3848 /* Do not allow HCI_AMP devices to register at index 0,
3849 * so the index can be used as the AMP controller ID.
3851 switch (hdev->dev_type) {
3853 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3856 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3865 sprintf(hdev->name, "hci%d", id);
3868 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3870 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3871 WQ_MEM_RECLAIM, 1, hdev->name);
3872 if (!hdev->workqueue) {
3877 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3878 WQ_MEM_RECLAIM, 1, hdev->name);
3879 if (!hdev->req_workqueue) {
3880 destroy_workqueue(hdev->workqueue);
3885 if (!IS_ERR_OR_NULL(bt_debugfs))
3886 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3888 dev_set_name(&hdev->dev, "%s", hdev->name);
3890 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3892 if (IS_ERR(hdev->tfm_aes)) {
3893 BT_ERR("Unable to create crypto context");
3894 error = PTR_ERR(hdev->tfm_aes);
3895 hdev->tfm_aes = NULL;
3899 error = device_add(&hdev->dev);
3903 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3904 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3907 if (rfkill_register(hdev->rfkill) < 0) {
3908 rfkill_destroy(hdev->rfkill);
3909 hdev->rfkill = NULL;
3913 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3914 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3916 set_bit(HCI_SETUP, &hdev->dev_flags);
3917 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3919 if (hdev->dev_type == HCI_BREDR) {
3920 /* Assume BR/EDR support until proven otherwise (such as
3921 * through reading supported features during init.
3923 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3926 write_lock(&hci_dev_list_lock);
3927 list_add(&hdev->list, &hci_dev_list);
3928 write_unlock(&hci_dev_list_lock);
3930 hci_notify(hdev, HCI_DEV_REG);
3933 queue_work(hdev->req_workqueue, &hdev->power_on);
3938 crypto_free_blkcipher(hdev->tfm_aes);
3940 destroy_workqueue(hdev->workqueue);
3941 destroy_workqueue(hdev->req_workqueue);
3943 ida_simple_remove(&hci_index_ida, hdev->id);
3947 EXPORT_SYMBOL(hci_register_dev);
3949 /* Unregister HCI device */
3950 void hci_unregister_dev(struct hci_dev *hdev)
3954 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3956 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3960 write_lock(&hci_dev_list_lock);
3961 list_del(&hdev->list);
3962 write_unlock(&hci_dev_list_lock);
3964 hci_dev_do_close(hdev);
3966 for (i = 0; i < NUM_REASSEMBLY; i++)
3967 kfree_skb(hdev->reassembly[i]);
3969 cancel_work_sync(&hdev->power_on);
3971 if (!test_bit(HCI_INIT, &hdev->flags) &&
3972 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
3974 mgmt_index_removed(hdev);
3975 hci_dev_unlock(hdev);
3978 /* mgmt_index_removed should take care of emptying the
3980 BUG_ON(!list_empty(&hdev->mgmt_pending));
3982 hci_notify(hdev, HCI_DEV_UNREG);
3985 rfkill_unregister(hdev->rfkill);
3986 rfkill_destroy(hdev->rfkill);
3990 crypto_free_blkcipher(hdev->tfm_aes);
3992 device_del(&hdev->dev);
3994 debugfs_remove_recursive(hdev->debugfs);
3996 destroy_workqueue(hdev->workqueue);
3997 destroy_workqueue(hdev->req_workqueue);
4000 hci_blacklist_clear(hdev);
4001 hci_uuids_clear(hdev);
4002 hci_link_keys_clear(hdev);
4003 hci_smp_ltks_clear(hdev);
4004 hci_smp_irks_clear(hdev);
4005 hci_remote_oob_data_clear(hdev);
4006 hci_white_list_clear(hdev);
4007 hci_conn_params_clear(hdev);
4008 hci_pend_le_conns_clear(hdev);
4009 hci_dev_unlock(hdev);
4013 ida_simple_remove(&hci_index_ida, id);
4015 EXPORT_SYMBOL(hci_unregister_dev);
4017 /* Suspend HCI device */
4018 int hci_suspend_dev(struct hci_dev *hdev)
4020 hci_notify(hdev, HCI_DEV_SUSPEND);
4023 EXPORT_SYMBOL(hci_suspend_dev);
4025 /* Resume HCI device */
4026 int hci_resume_dev(struct hci_dev *hdev)
4028 hci_notify(hdev, HCI_DEV_RESUME);
4031 EXPORT_SYMBOL(hci_resume_dev);
4033 /* Receive frame from HCI drivers */
4034 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4036 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4037 && !test_bit(HCI_INIT, &hdev->flags))) {
4043 bt_cb(skb)->incoming = 1;
4046 __net_timestamp(skb);
4048 skb_queue_tail(&hdev->rx_q, skb);
4049 queue_work(hdev->workqueue, &hdev->rx_work);
4053 EXPORT_SYMBOL(hci_recv_frame);
4055 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4056 int count, __u8 index)
4061 struct sk_buff *skb;
4062 struct bt_skb_cb *scb;
4064 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4065 index >= NUM_REASSEMBLY)
4068 skb = hdev->reassembly[index];
4072 case HCI_ACLDATA_PKT:
4073 len = HCI_MAX_FRAME_SIZE;
4074 hlen = HCI_ACL_HDR_SIZE;
4077 len = HCI_MAX_EVENT_SIZE;
4078 hlen = HCI_EVENT_HDR_SIZE;
4080 case HCI_SCODATA_PKT:
4081 len = HCI_MAX_SCO_SIZE;
4082 hlen = HCI_SCO_HDR_SIZE;
4086 skb = bt_skb_alloc(len, GFP_ATOMIC);
4090 scb = (void *) skb->cb;
4092 scb->pkt_type = type;
4094 hdev->reassembly[index] = skb;
4098 scb = (void *) skb->cb;
4099 len = min_t(uint, scb->expect, count);
4101 memcpy(skb_put(skb, len), data, len);
4110 if (skb->len == HCI_EVENT_HDR_SIZE) {
4111 struct hci_event_hdr *h = hci_event_hdr(skb);
4112 scb->expect = h->plen;
4114 if (skb_tailroom(skb) < scb->expect) {
4116 hdev->reassembly[index] = NULL;
4122 case HCI_ACLDATA_PKT:
4123 if (skb->len == HCI_ACL_HDR_SIZE) {
4124 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4125 scb->expect = __le16_to_cpu(h->dlen);
4127 if (skb_tailroom(skb) < scb->expect) {
4129 hdev->reassembly[index] = NULL;
4135 case HCI_SCODATA_PKT:
4136 if (skb->len == HCI_SCO_HDR_SIZE) {
4137 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4138 scb->expect = h->dlen;
4140 if (skb_tailroom(skb) < scb->expect) {
4142 hdev->reassembly[index] = NULL;
4149 if (scb->expect == 0) {
4150 /* Complete frame */
4152 bt_cb(skb)->pkt_type = type;
4153 hci_recv_frame(hdev, skb);
4155 hdev->reassembly[index] = NULL;
4163 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4167 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4171 rem = hci_reassembly(hdev, type, data, count, type - 1);
4175 data += (count - rem);
4181 EXPORT_SYMBOL(hci_recv_fragment);
4183 #define STREAM_REASSEMBLY 0
4185 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4191 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4194 struct { char type; } *pkt;
4196 /* Start of the frame */
4203 type = bt_cb(skb)->pkt_type;
4205 rem = hci_reassembly(hdev, type, data, count,
4210 data += (count - rem);
4216 EXPORT_SYMBOL(hci_recv_stream_fragment);
4218 /* ---- Interface to upper protocols ---- */
4220 int hci_register_cb(struct hci_cb *cb)
4222 BT_DBG("%p name %s", cb, cb->name);
4224 write_lock(&hci_cb_list_lock);
4225 list_add(&cb->list, &hci_cb_list);
4226 write_unlock(&hci_cb_list_lock);
4230 EXPORT_SYMBOL(hci_register_cb);
4232 int hci_unregister_cb(struct hci_cb *cb)
4234 BT_DBG("%p name %s", cb, cb->name);
4236 write_lock(&hci_cb_list_lock);
4237 list_del(&cb->list);
4238 write_unlock(&hci_cb_list_lock);
4242 EXPORT_SYMBOL(hci_unregister_cb);
4244 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4246 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4249 __net_timestamp(skb);
4251 /* Send copy to monitor */
4252 hci_send_to_monitor(hdev, skb);
4254 if (atomic_read(&hdev->promisc)) {
4255 /* Send copy to the sockets */
4256 hci_send_to_sock(hdev, skb);
4259 /* Get rid of skb owner, prior to sending to the driver. */
4262 if (hdev->send(hdev, skb) < 0)
4263 BT_ERR("%s sending frame failed", hdev->name);
4266 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4268 skb_queue_head_init(&req->cmd_q);
4273 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4275 struct hci_dev *hdev = req->hdev;
4276 struct sk_buff *skb;
4277 unsigned long flags;
4279 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4281 /* If an error occured during request building, remove all HCI
4282 * commands queued on the HCI request queue.
4285 skb_queue_purge(&req->cmd_q);
4289 /* Do not allow empty requests */
4290 if (skb_queue_empty(&req->cmd_q))
4293 skb = skb_peek_tail(&req->cmd_q);
4294 bt_cb(skb)->req.complete = complete;
4296 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4297 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4298 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4300 queue_work(hdev->workqueue, &hdev->cmd_work);
4305 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4306 u32 plen, const void *param)
4308 int len = HCI_COMMAND_HDR_SIZE + plen;
4309 struct hci_command_hdr *hdr;
4310 struct sk_buff *skb;
4312 skb = bt_skb_alloc(len, GFP_ATOMIC);
4316 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4317 hdr->opcode = cpu_to_le16(opcode);
4321 memcpy(skb_put(skb, plen), param, plen);
4323 BT_DBG("skb len %d", skb->len);
4325 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4330 /* Send HCI command */
4331 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4334 struct sk_buff *skb;
4336 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4338 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4340 BT_ERR("%s no memory for command", hdev->name);
4344 /* Stand-alone HCI commands must be flaged as
4345 * single-command requests.
4347 bt_cb(skb)->req.start = true;
4349 skb_queue_tail(&hdev->cmd_q, skb);
4350 queue_work(hdev->workqueue, &hdev->cmd_work);
4355 /* Queue a command to an asynchronous HCI request */
4356 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4357 const void *param, u8 event)
4359 struct hci_dev *hdev = req->hdev;
4360 struct sk_buff *skb;
4362 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4364 /* If an error occured during request building, there is no point in
4365 * queueing the HCI command. We can simply return.
4370 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4372 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4373 hdev->name, opcode);
4378 if (skb_queue_empty(&req->cmd_q))
4379 bt_cb(skb)->req.start = true;
4381 bt_cb(skb)->req.event = event;
4383 skb_queue_tail(&req->cmd_q, skb);
4386 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4389 hci_req_add_ev(req, opcode, plen, param, 0);
4392 /* Get data from the previously sent command */
4393 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4395 struct hci_command_hdr *hdr;
4397 if (!hdev->sent_cmd)
4400 hdr = (void *) hdev->sent_cmd->data;
4402 if (hdr->opcode != cpu_to_le16(opcode))
4405 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4407 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4411 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4413 struct hci_acl_hdr *hdr;
4416 skb_push(skb, HCI_ACL_HDR_SIZE);
4417 skb_reset_transport_header(skb);
4418 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4419 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4420 hdr->dlen = cpu_to_le16(len);
4423 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4424 struct sk_buff *skb, __u16 flags)
4426 struct hci_conn *conn = chan->conn;
4427 struct hci_dev *hdev = conn->hdev;
4428 struct sk_buff *list;
4430 skb->len = skb_headlen(skb);
4433 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4435 switch (hdev->dev_type) {
4437 hci_add_acl_hdr(skb, conn->handle, flags);
4440 hci_add_acl_hdr(skb, chan->handle, flags);
4443 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4447 list = skb_shinfo(skb)->frag_list;
4449 /* Non fragmented */
4450 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4452 skb_queue_tail(queue, skb);
4455 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4457 skb_shinfo(skb)->frag_list = NULL;
4459 /* Queue all fragments atomically */
4460 spin_lock(&queue->lock);
4462 __skb_queue_tail(queue, skb);
4464 flags &= ~ACL_START;
4467 skb = list; list = list->next;
4469 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4470 hci_add_acl_hdr(skb, conn->handle, flags);
4472 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4474 __skb_queue_tail(queue, skb);
4477 spin_unlock(&queue->lock);
4481 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4483 struct hci_dev *hdev = chan->conn->hdev;
4485 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4487 hci_queue_acl(chan, &chan->data_q, skb, flags);
4489 queue_work(hdev->workqueue, &hdev->tx_work);
4493 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4495 struct hci_dev *hdev = conn->hdev;
4496 struct hci_sco_hdr hdr;
4498 BT_DBG("%s len %d", hdev->name, skb->len);
4500 hdr.handle = cpu_to_le16(conn->handle);
4501 hdr.dlen = skb->len;
4503 skb_push(skb, HCI_SCO_HDR_SIZE);
4504 skb_reset_transport_header(skb);
4505 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4507 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4509 skb_queue_tail(&conn->data_q, skb);
4510 queue_work(hdev->workqueue, &hdev->tx_work);
4513 /* ---- HCI TX task (outgoing data) ---- */
4515 /* HCI Connection scheduler */
4516 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4519 struct hci_conn_hash *h = &hdev->conn_hash;
4520 struct hci_conn *conn = NULL, *c;
4521 unsigned int num = 0, min = ~0;
4523 /* We don't have to lock device here. Connections are always
4524 * added and removed with TX task disabled. */
4528 list_for_each_entry_rcu(c, &h->list, list) {
4529 if (c->type != type || skb_queue_empty(&c->data_q))
4532 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4537 if (c->sent < min) {
4542 if (hci_conn_num(hdev, type) == num)
4551 switch (conn->type) {
4553 cnt = hdev->acl_cnt;
4557 cnt = hdev->sco_cnt;
4560 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4564 BT_ERR("Unknown link type");
4572 BT_DBG("conn %p quote %d", conn, *quote);
4576 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4578 struct hci_conn_hash *h = &hdev->conn_hash;
4581 BT_ERR("%s link tx timeout", hdev->name);
4585 /* Kill stalled connections */
4586 list_for_each_entry_rcu(c, &h->list, list) {
4587 if (c->type == type && c->sent) {
4588 BT_ERR("%s killing stalled connection %pMR",
4589 hdev->name, &c->dst);
4590 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4597 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4600 struct hci_conn_hash *h = &hdev->conn_hash;
4601 struct hci_chan *chan = NULL;
4602 unsigned int num = 0, min = ~0, cur_prio = 0;
4603 struct hci_conn *conn;
4604 int cnt, q, conn_num = 0;
4606 BT_DBG("%s", hdev->name);
4610 list_for_each_entry_rcu(conn, &h->list, list) {
4611 struct hci_chan *tmp;
4613 if (conn->type != type)
4616 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4621 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4622 struct sk_buff *skb;
4624 if (skb_queue_empty(&tmp->data_q))
4627 skb = skb_peek(&tmp->data_q);
4628 if (skb->priority < cur_prio)
4631 if (skb->priority > cur_prio) {
4634 cur_prio = skb->priority;
4639 if (conn->sent < min) {
4645 if (hci_conn_num(hdev, type) == conn_num)
4654 switch (chan->conn->type) {
4656 cnt = hdev->acl_cnt;
4659 cnt = hdev->block_cnt;
4663 cnt = hdev->sco_cnt;
4666 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4670 BT_ERR("Unknown link type");
4675 BT_DBG("chan %p quote %d", chan, *quote);
4679 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4681 struct hci_conn_hash *h = &hdev->conn_hash;
4682 struct hci_conn *conn;
4685 BT_DBG("%s", hdev->name);
4689 list_for_each_entry_rcu(conn, &h->list, list) {
4690 struct hci_chan *chan;
4692 if (conn->type != type)
4695 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4700 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4701 struct sk_buff *skb;
4708 if (skb_queue_empty(&chan->data_q))
4711 skb = skb_peek(&chan->data_q);
4712 if (skb->priority >= HCI_PRIO_MAX - 1)
4715 skb->priority = HCI_PRIO_MAX - 1;
4717 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4721 if (hci_conn_num(hdev, type) == num)
4729 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4731 /* Calculate count of blocks used by this packet */
4732 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4735 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4737 if (!test_bit(HCI_RAW, &hdev->flags)) {
4738 /* ACL tx timeout must be longer than maximum
4739 * link supervision timeout (40.9 seconds) */
4740 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4741 HCI_ACL_TX_TIMEOUT))
4742 hci_link_tx_to(hdev, ACL_LINK);
4746 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4748 unsigned int cnt = hdev->acl_cnt;
4749 struct hci_chan *chan;
4750 struct sk_buff *skb;
4753 __check_timeout(hdev, cnt);
4755 while (hdev->acl_cnt &&
4756 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4757 u32 priority = (skb_peek(&chan->data_q))->priority;
4758 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4759 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4760 skb->len, skb->priority);
4762 /* Stop if priority has changed */
4763 if (skb->priority < priority)
4766 skb = skb_dequeue(&chan->data_q);
4768 hci_conn_enter_active_mode(chan->conn,
4769 bt_cb(skb)->force_active);
4771 hci_send_frame(hdev, skb);
4772 hdev->acl_last_tx = jiffies;
4780 if (cnt != hdev->acl_cnt)
4781 hci_prio_recalculate(hdev, ACL_LINK);
4784 static void hci_sched_acl_blk(struct hci_dev *hdev)
4786 unsigned int cnt = hdev->block_cnt;
4787 struct hci_chan *chan;
4788 struct sk_buff *skb;
4792 __check_timeout(hdev, cnt);
4794 BT_DBG("%s", hdev->name);
4796 if (hdev->dev_type == HCI_AMP)
4801 while (hdev->block_cnt > 0 &&
4802 (chan = hci_chan_sent(hdev, type, "e))) {
4803 u32 priority = (skb_peek(&chan->data_q))->priority;
4804 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4807 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4808 skb->len, skb->priority);
4810 /* Stop if priority has changed */
4811 if (skb->priority < priority)
4814 skb = skb_dequeue(&chan->data_q);
4816 blocks = __get_blocks(hdev, skb);
4817 if (blocks > hdev->block_cnt)
4820 hci_conn_enter_active_mode(chan->conn,
4821 bt_cb(skb)->force_active);
4823 hci_send_frame(hdev, skb);
4824 hdev->acl_last_tx = jiffies;
4826 hdev->block_cnt -= blocks;
4829 chan->sent += blocks;
4830 chan->conn->sent += blocks;
4834 if (cnt != hdev->block_cnt)
4835 hci_prio_recalculate(hdev, type);
4838 static void hci_sched_acl(struct hci_dev *hdev)
4840 BT_DBG("%s", hdev->name);
4842 /* No ACL link over BR/EDR controller */
4843 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4846 /* No AMP link over AMP controller */
4847 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4850 switch (hdev->flow_ctl_mode) {
4851 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4852 hci_sched_acl_pkt(hdev);
4855 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4856 hci_sched_acl_blk(hdev);
4862 static void hci_sched_sco(struct hci_dev *hdev)
4864 struct hci_conn *conn;
4865 struct sk_buff *skb;
4868 BT_DBG("%s", hdev->name);
4870 if (!hci_conn_num(hdev, SCO_LINK))
4873 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4874 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4875 BT_DBG("skb %p len %d", skb, skb->len);
4876 hci_send_frame(hdev, skb);
4879 if (conn->sent == ~0)
4885 static void hci_sched_esco(struct hci_dev *hdev)
4887 struct hci_conn *conn;
4888 struct sk_buff *skb;
4891 BT_DBG("%s", hdev->name);
4893 if (!hci_conn_num(hdev, ESCO_LINK))
4896 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4898 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4899 BT_DBG("skb %p len %d", skb, skb->len);
4900 hci_send_frame(hdev, skb);
4903 if (conn->sent == ~0)
4909 static void hci_sched_le(struct hci_dev *hdev)
4911 struct hci_chan *chan;
4912 struct sk_buff *skb;
4913 int quote, cnt, tmp;
4915 BT_DBG("%s", hdev->name);
4917 if (!hci_conn_num(hdev, LE_LINK))
4920 if (!test_bit(HCI_RAW, &hdev->flags)) {
4921 /* LE tx timeout must be longer than maximum
4922 * link supervision timeout (40.9 seconds) */
4923 if (!hdev->le_cnt && hdev->le_pkts &&
4924 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4925 hci_link_tx_to(hdev, LE_LINK);
4928 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4930 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4931 u32 priority = (skb_peek(&chan->data_q))->priority;
4932 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4933 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4934 skb->len, skb->priority);
4936 /* Stop if priority has changed */
4937 if (skb->priority < priority)
4940 skb = skb_dequeue(&chan->data_q);
4942 hci_send_frame(hdev, skb);
4943 hdev->le_last_tx = jiffies;
4954 hdev->acl_cnt = cnt;
4957 hci_prio_recalculate(hdev, LE_LINK);
4960 static void hci_tx_work(struct work_struct *work)
4962 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4963 struct sk_buff *skb;
4965 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4966 hdev->sco_cnt, hdev->le_cnt);
4968 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4969 /* Schedule queues and send stuff to HCI driver */
4970 hci_sched_acl(hdev);
4971 hci_sched_sco(hdev);
4972 hci_sched_esco(hdev);
4976 /* Send next queued raw (unknown type) packet */
4977 while ((skb = skb_dequeue(&hdev->raw_q)))
4978 hci_send_frame(hdev, skb);
4981 /* ----- HCI RX task (incoming data processing) ----- */
4983 /* ACL data packet */
4984 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4986 struct hci_acl_hdr *hdr = (void *) skb->data;
4987 struct hci_conn *conn;
4988 __u16 handle, flags;
4990 skb_pull(skb, HCI_ACL_HDR_SIZE);
4992 handle = __le16_to_cpu(hdr->handle);
4993 flags = hci_flags(handle);
4994 handle = hci_handle(handle);
4996 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4999 hdev->stat.acl_rx++;
5002 conn = hci_conn_hash_lookup_handle(hdev, handle);
5003 hci_dev_unlock(hdev);
5006 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5008 /* Send to upper protocol */
5009 l2cap_recv_acldata(conn, skb, flags);
5012 BT_ERR("%s ACL packet for unknown connection handle %d",
5013 hdev->name, handle);
5019 /* SCO data packet */
5020 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5022 struct hci_sco_hdr *hdr = (void *) skb->data;
5023 struct hci_conn *conn;
5026 skb_pull(skb, HCI_SCO_HDR_SIZE);
5028 handle = __le16_to_cpu(hdr->handle);
5030 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5032 hdev->stat.sco_rx++;
5035 conn = hci_conn_hash_lookup_handle(hdev, handle);
5036 hci_dev_unlock(hdev);
5039 /* Send to upper protocol */
5040 sco_recv_scodata(conn, skb);
5043 BT_ERR("%s SCO packet for unknown connection handle %d",
5044 hdev->name, handle);
5050 static bool hci_req_is_complete(struct hci_dev *hdev)
5052 struct sk_buff *skb;
5054 skb = skb_peek(&hdev->cmd_q);
5058 return bt_cb(skb)->req.start;
5061 static void hci_resend_last(struct hci_dev *hdev)
5063 struct hci_command_hdr *sent;
5064 struct sk_buff *skb;
5067 if (!hdev->sent_cmd)
5070 sent = (void *) hdev->sent_cmd->data;
5071 opcode = __le16_to_cpu(sent->opcode);
5072 if (opcode == HCI_OP_RESET)
5075 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5079 skb_queue_head(&hdev->cmd_q, skb);
5080 queue_work(hdev->workqueue, &hdev->cmd_work);
5083 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5085 hci_req_complete_t req_complete = NULL;
5086 struct sk_buff *skb;
5087 unsigned long flags;
5089 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5091 /* If the completed command doesn't match the last one that was
5092 * sent we need to do special handling of it.
5094 if (!hci_sent_cmd_data(hdev, opcode)) {
5095 /* Some CSR based controllers generate a spontaneous
5096 * reset complete event during init and any pending
5097 * command will never be completed. In such a case we
5098 * need to resend whatever was the last sent
5101 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5102 hci_resend_last(hdev);
5107 /* If the command succeeded and there's still more commands in
5108 * this request the request is not yet complete.
5110 if (!status && !hci_req_is_complete(hdev))
5113 /* If this was the last command in a request the complete
5114 * callback would be found in hdev->sent_cmd instead of the
5115 * command queue (hdev->cmd_q).
5117 if (hdev->sent_cmd) {
5118 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5121 /* We must set the complete callback to NULL to
5122 * avoid calling the callback more than once if
5123 * this function gets called again.
5125 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5131 /* Remove all pending commands belonging to this request */
5132 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5133 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5134 if (bt_cb(skb)->req.start) {
5135 __skb_queue_head(&hdev->cmd_q, skb);
5139 req_complete = bt_cb(skb)->req.complete;
5142 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5146 req_complete(hdev, status);
5149 static void hci_rx_work(struct work_struct *work)
5151 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5152 struct sk_buff *skb;
5154 BT_DBG("%s", hdev->name);
5156 while ((skb = skb_dequeue(&hdev->rx_q))) {
5157 /* Send copy to monitor */
5158 hci_send_to_monitor(hdev, skb);
5160 if (atomic_read(&hdev->promisc)) {
5161 /* Send copy to the sockets */
5162 hci_send_to_sock(hdev, skb);
5165 if (test_bit(HCI_RAW, &hdev->flags) ||
5166 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5171 if (test_bit(HCI_INIT, &hdev->flags)) {
5172 /* Don't process data packets in this states. */
5173 switch (bt_cb(skb)->pkt_type) {
5174 case HCI_ACLDATA_PKT:
5175 case HCI_SCODATA_PKT:
5182 switch (bt_cb(skb)->pkt_type) {
5184 BT_DBG("%s Event packet", hdev->name);
5185 hci_event_packet(hdev, skb);
5188 case HCI_ACLDATA_PKT:
5189 BT_DBG("%s ACL data packet", hdev->name);
5190 hci_acldata_packet(hdev, skb);
5193 case HCI_SCODATA_PKT:
5194 BT_DBG("%s SCO data packet", hdev->name);
5195 hci_scodata_packet(hdev, skb);
5205 static void hci_cmd_work(struct work_struct *work)
5207 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5208 struct sk_buff *skb;
5210 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5211 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5213 /* Send queued commands */
5214 if (atomic_read(&hdev->cmd_cnt)) {
5215 skb = skb_dequeue(&hdev->cmd_q);
5219 kfree_skb(hdev->sent_cmd);
5221 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5222 if (hdev->sent_cmd) {
5223 atomic_dec(&hdev->cmd_cnt);
5224 hci_send_frame(hdev, skb);
5225 if (test_bit(HCI_RESET, &hdev->flags))
5226 del_timer(&hdev->cmd_timer);
5228 mod_timer(&hdev->cmd_timer,
5229 jiffies + HCI_CMD_TIMEOUT);
5231 skb_queue_head(&hdev->cmd_q, skb);
5232 queue_work(hdev->workqueue, &hdev->cmd_work);
5237 void hci_req_add_le_scan_disable(struct hci_request *req)
5239 struct hci_cp_le_set_scan_enable cp;
5241 memset(&cp, 0, sizeof(cp));
5242 cp.enable = LE_SCAN_DISABLE;
5243 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5246 void hci_req_add_le_passive_scan(struct hci_request *req)
5248 struct hci_cp_le_set_scan_param param_cp;
5249 struct hci_cp_le_set_scan_enable enable_cp;
5250 struct hci_dev *hdev = req->hdev;
5253 /* Set require_privacy to true to avoid identification from
5254 * unknown peer devices. Since this is passive scanning, no
5255 * SCAN_REQ using the local identity should be sent. Mandating
5256 * privacy is just an extra precaution.
5258 if (hci_update_random_address(req, true, &own_addr_type))
5261 memset(¶m_cp, 0, sizeof(param_cp));
5262 param_cp.type = LE_SCAN_PASSIVE;
5263 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5264 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5265 param_cp.own_address_type = own_addr_type;
5266 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5269 memset(&enable_cp, 0, sizeof(enable_cp));
5270 enable_cp.enable = LE_SCAN_ENABLE;
5271 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5272 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5276 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5279 BT_DBG("HCI request failed to update background scanning: "
5280 "status 0x%2.2x", status);
5283 /* This function controls the background scanning based on hdev->pend_le_conns
5284 * list. If there are pending LE connection we start the background scanning,
5285 * otherwise we stop it.
5287 * This function requires the caller holds hdev->lock.
5289 void hci_update_background_scan(struct hci_dev *hdev)
5291 struct hci_request req;
5292 struct hci_conn *conn;
5295 hci_req_init(&req, hdev);
5297 if (list_empty(&hdev->pend_le_conns)) {
5298 /* If there is no pending LE connections, we should stop
5299 * the background scanning.
5302 /* If controller is not scanning we are done. */
5303 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5306 hci_req_add_le_scan_disable(&req);
5308 BT_DBG("%s stopping background scanning", hdev->name);
5310 /* If there is at least one pending LE connection, we should
5311 * keep the background scan running.
5314 /* If controller is connecting, we should not start scanning
5315 * since some controllers are not able to scan and connect at
5318 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5322 /* If controller is currently scanning, we stop it to ensure we
5323 * don't miss any advertising (due to duplicates filter).
5325 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5326 hci_req_add_le_scan_disable(&req);
5328 hci_req_add_le_passive_scan(&req);
5330 BT_DBG("%s starting background scanning", hdev->name);
5333 err = hci_req_run(&req, update_background_scan_complete);
5335 BT_ERR("Failed to run HCI request: err %d", err);