Bluetooth: Remove unused hci_find_ltk function
[linux-block.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
970c4e46
JH
40#include "smp.h"
41
b78752cc 42static void hci_rx_work(struct work_struct *work);
c347b765 43static void hci_cmd_work(struct work_struct *work);
3eff45ea 44static void hci_tx_work(struct work_struct *work);
1da177e4 45
1da177e4
LT
46/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
3df92b31
SL
54/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
899de765
MH
57/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
1da177e4
LT
66/* ---- HCI notifications ---- */
67
6516455d 68static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 69{
040030ef 70 hci_sock_dev_event(hdev, event);
1da177e4
LT
71}
72
baf27f6e
MH
73/* ---- HCI debugfs entries ---- */
74
4b4148e9
MH
75static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
111902f7 81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
82 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
111902f7 107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
111902f7 128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
dfb826a8
MH
140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
cfbb2b5b
MH
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
70afe0b8
MH
178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
47219839
MH
203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
210 u8 i, val[16];
211
212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
218
219 seq_printf(f, "%pUb\n", val);
47219839
MH
220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
baf27f6e
MH
238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
02d08d15
MH
274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
0378b597 277 struct link_key *key;
02d08d15 278
0378b597
JH
279 rcu_read_lock();
280 list_for_each_entry_rcu(key, &hdev->link_keys, list)
02d08d15
MH
281 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
0378b597 283 rcu_read_unlock();
02d08d15
MH
284
285 return 0;
286}
287
288static int link_keys_open(struct inode *inode, struct file *file)
289{
290 return single_open(file, link_keys_show, inode->i_private);
291}
292
293static const struct file_operations link_keys_fops = {
294 .open = link_keys_open,
295 .read = seq_read,
296 .llseek = seq_lseek,
297 .release = single_release,
298};
299
babdbb3c
MH
300static int dev_class_show(struct seq_file *f, void *ptr)
301{
302 struct hci_dev *hdev = f->private;
303
304 hci_dev_lock(hdev);
305 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306 hdev->dev_class[1], hdev->dev_class[0]);
307 hci_dev_unlock(hdev);
308
309 return 0;
310}
311
312static int dev_class_open(struct inode *inode, struct file *file)
313{
314 return single_open(file, dev_class_show, inode->i_private);
315}
316
317static const struct file_operations dev_class_fops = {
318 .open = dev_class_open,
319 .read = seq_read,
320 .llseek = seq_lseek,
321 .release = single_release,
322};
323
041000b9
MH
324static int voice_setting_get(void *data, u64 *val)
325{
326 struct hci_dev *hdev = data;
327
328 hci_dev_lock(hdev);
329 *val = hdev->voice_setting;
330 hci_dev_unlock(hdev);
331
332 return 0;
333}
334
335DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336 NULL, "0x%4.4llx\n");
337
ebd1e33b
MH
338static int auto_accept_delay_set(void *data, u64 val)
339{
340 struct hci_dev *hdev = data;
341
342 hci_dev_lock(hdev);
343 hdev->auto_accept_delay = val;
344 hci_dev_unlock(hdev);
345
346 return 0;
347}
348
349static int auto_accept_delay_get(void *data, u64 *val)
350{
351 struct hci_dev *hdev = data;
352
353 hci_dev_lock(hdev);
354 *val = hdev->auto_accept_delay;
355 hci_dev_unlock(hdev);
356
357 return 0;
358}
359
360DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361 auto_accept_delay_set, "%llu\n");
362
5afeac14
MH
363static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364 size_t count, loff_t *ppos)
365{
366 struct hci_dev *hdev = file->private_data;
367 char buf[3];
368
111902f7 369 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
370 buf[1] = '\n';
371 buf[2] = '\0';
372 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373}
374
375static ssize_t force_sc_support_write(struct file *file,
376 const char __user *user_buf,
377 size_t count, loff_t *ppos)
378{
379 struct hci_dev *hdev = file->private_data;
380 char buf[32];
381 size_t buf_size = min(count, (sizeof(buf)-1));
382 bool enable;
383
384 if (test_bit(HCI_UP, &hdev->flags))
385 return -EBUSY;
386
387 if (copy_from_user(buf, user_buf, buf_size))
388 return -EFAULT;
389
390 buf[buf_size] = '\0';
391 if (strtobool(buf, &enable))
392 return -EINVAL;
393
111902f7 394 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
395 return -EALREADY;
396
111902f7 397 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
398
399 return count;
400}
401
402static const struct file_operations force_sc_support_fops = {
403 .open = simple_open,
404 .read = force_sc_support_read,
405 .write = force_sc_support_write,
406 .llseek = default_llseek,
407};
408
134c2a89
MH
409static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
410 size_t count, loff_t *ppos)
411{
412 struct hci_dev *hdev = file->private_data;
413 char buf[3];
414
415 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
416 buf[1] = '\n';
417 buf[2] = '\0';
418 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419}
420
421static const struct file_operations sc_only_mode_fops = {
422 .open = simple_open,
423 .read = sc_only_mode_read,
424 .llseek = default_llseek,
425};
426
2bfa3531
MH
427static int idle_timeout_set(void *data, u64 val)
428{
429 struct hci_dev *hdev = data;
430
431 if (val != 0 && (val < 500 || val > 3600000))
432 return -EINVAL;
433
434 hci_dev_lock(hdev);
2be48b65 435 hdev->idle_timeout = val;
2bfa3531
MH
436 hci_dev_unlock(hdev);
437
438 return 0;
439}
440
441static int idle_timeout_get(void *data, u64 *val)
442{
443 struct hci_dev *hdev = data;
444
445 hci_dev_lock(hdev);
446 *val = hdev->idle_timeout;
447 hci_dev_unlock(hdev);
448
449 return 0;
450}
451
452DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
453 idle_timeout_set, "%llu\n");
454
c982b2ea
JH
455static int rpa_timeout_set(void *data, u64 val)
456{
457 struct hci_dev *hdev = data;
458
459 /* Require the RPA timeout to be at least 30 seconds and at most
460 * 24 hours.
461 */
462 if (val < 30 || val > (60 * 60 * 24))
463 return -EINVAL;
464
465 hci_dev_lock(hdev);
466 hdev->rpa_timeout = val;
467 hci_dev_unlock(hdev);
468
469 return 0;
470}
471
472static int rpa_timeout_get(void *data, u64 *val)
473{
474 struct hci_dev *hdev = data;
475
476 hci_dev_lock(hdev);
477 *val = hdev->rpa_timeout;
478 hci_dev_unlock(hdev);
479
480 return 0;
481}
482
483DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
484 rpa_timeout_set, "%llu\n");
485
2bfa3531
MH
486static int sniff_min_interval_set(void *data, u64 val)
487{
488 struct hci_dev *hdev = data;
489
490 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
491 return -EINVAL;
492
493 hci_dev_lock(hdev);
2be48b65 494 hdev->sniff_min_interval = val;
2bfa3531
MH
495 hci_dev_unlock(hdev);
496
497 return 0;
498}
499
500static int sniff_min_interval_get(void *data, u64 *val)
501{
502 struct hci_dev *hdev = data;
503
504 hci_dev_lock(hdev);
505 *val = hdev->sniff_min_interval;
506 hci_dev_unlock(hdev);
507
508 return 0;
509}
510
511DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
512 sniff_min_interval_set, "%llu\n");
513
514static int sniff_max_interval_set(void *data, u64 val)
515{
516 struct hci_dev *hdev = data;
517
518 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
519 return -EINVAL;
520
521 hci_dev_lock(hdev);
2be48b65 522 hdev->sniff_max_interval = val;
2bfa3531
MH
523 hci_dev_unlock(hdev);
524
525 return 0;
526}
527
528static int sniff_max_interval_get(void *data, u64 *val)
529{
530 struct hci_dev *hdev = data;
531
532 hci_dev_lock(hdev);
533 *val = hdev->sniff_max_interval;
534 hci_dev_unlock(hdev);
535
536 return 0;
537}
538
539DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
540 sniff_max_interval_set, "%llu\n");
541
31ad1691
AK
542static int conn_info_min_age_set(void *data, u64 val)
543{
544 struct hci_dev *hdev = data;
545
546 if (val == 0 || val > hdev->conn_info_max_age)
547 return -EINVAL;
548
549 hci_dev_lock(hdev);
550 hdev->conn_info_min_age = val;
551 hci_dev_unlock(hdev);
552
553 return 0;
554}
555
556static int conn_info_min_age_get(void *data, u64 *val)
557{
558 struct hci_dev *hdev = data;
559
560 hci_dev_lock(hdev);
561 *val = hdev->conn_info_min_age;
562 hci_dev_unlock(hdev);
563
564 return 0;
565}
566
567DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
568 conn_info_min_age_set, "%llu\n");
569
570static int conn_info_max_age_set(void *data, u64 val)
571{
572 struct hci_dev *hdev = data;
573
574 if (val == 0 || val < hdev->conn_info_min_age)
575 return -EINVAL;
576
577 hci_dev_lock(hdev);
578 hdev->conn_info_max_age = val;
579 hci_dev_unlock(hdev);
580
581 return 0;
582}
583
584static int conn_info_max_age_get(void *data, u64 *val)
585{
586 struct hci_dev *hdev = data;
587
588 hci_dev_lock(hdev);
589 *val = hdev->conn_info_max_age;
590 hci_dev_unlock(hdev);
591
592 return 0;
593}
594
595DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
596 conn_info_max_age_set, "%llu\n");
597
ac345813
MH
598static int identity_show(struct seq_file *f, void *p)
599{
600 struct hci_dev *hdev = f->private;
a1f4c318 601 bdaddr_t addr;
ac345813
MH
602 u8 addr_type;
603
604 hci_dev_lock(hdev);
605
a1f4c318 606 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 607
a1f4c318 608 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 609 16, hdev->irk, &hdev->rpa);
ac345813
MH
610
611 hci_dev_unlock(hdev);
612
613 return 0;
614}
615
616static int identity_open(struct inode *inode, struct file *file)
617{
618 return single_open(file, identity_show, inode->i_private);
619}
620
621static const struct file_operations identity_fops = {
622 .open = identity_open,
623 .read = seq_read,
624 .llseek = seq_lseek,
625 .release = single_release,
626};
627
7a4cd51d
MH
628static int random_address_show(struct seq_file *f, void *p)
629{
630 struct hci_dev *hdev = f->private;
631
632 hci_dev_lock(hdev);
633 seq_printf(f, "%pMR\n", &hdev->random_addr);
634 hci_dev_unlock(hdev);
635
636 return 0;
637}
638
639static int random_address_open(struct inode *inode, struct file *file)
640{
641 return single_open(file, random_address_show, inode->i_private);
642}
643
644static const struct file_operations random_address_fops = {
645 .open = random_address_open,
646 .read = seq_read,
647 .llseek = seq_lseek,
648 .release = single_release,
649};
650
e7b8fc92
MH
651static int static_address_show(struct seq_file *f, void *p)
652{
653 struct hci_dev *hdev = f->private;
654
655 hci_dev_lock(hdev);
656 seq_printf(f, "%pMR\n", &hdev->static_addr);
657 hci_dev_unlock(hdev);
658
659 return 0;
660}
661
662static int static_address_open(struct inode *inode, struct file *file)
663{
664 return single_open(file, static_address_show, inode->i_private);
665}
666
667static const struct file_operations static_address_fops = {
668 .open = static_address_open,
669 .read = seq_read,
670 .llseek = seq_lseek,
671 .release = single_release,
672};
673
b32bba6c
MH
674static ssize_t force_static_address_read(struct file *file,
675 char __user *user_buf,
676 size_t count, loff_t *ppos)
92202185 677{
b32bba6c
MH
678 struct hci_dev *hdev = file->private_data;
679 char buf[3];
92202185 680
111902f7 681 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
682 buf[1] = '\n';
683 buf[2] = '\0';
684 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
685}
686
b32bba6c
MH
687static ssize_t force_static_address_write(struct file *file,
688 const char __user *user_buf,
689 size_t count, loff_t *ppos)
92202185 690{
b32bba6c
MH
691 struct hci_dev *hdev = file->private_data;
692 char buf[32];
693 size_t buf_size = min(count, (sizeof(buf)-1));
694 bool enable;
92202185 695
b32bba6c
MH
696 if (test_bit(HCI_UP, &hdev->flags))
697 return -EBUSY;
92202185 698
b32bba6c
MH
699 if (copy_from_user(buf, user_buf, buf_size))
700 return -EFAULT;
701
702 buf[buf_size] = '\0';
703 if (strtobool(buf, &enable))
704 return -EINVAL;
705
111902f7 706 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
707 return -EALREADY;
708
111902f7 709 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
710
711 return count;
92202185
MH
712}
713
b32bba6c
MH
714static const struct file_operations force_static_address_fops = {
715 .open = simple_open,
716 .read = force_static_address_read,
717 .write = force_static_address_write,
718 .llseek = default_llseek,
719};
92202185 720
d2ab0ac1
MH
721static int white_list_show(struct seq_file *f, void *ptr)
722{
723 struct hci_dev *hdev = f->private;
724 struct bdaddr_list *b;
725
726 hci_dev_lock(hdev);
727 list_for_each_entry(b, &hdev->le_white_list, list)
728 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
729 hci_dev_unlock(hdev);
730
731 return 0;
732}
733
734static int white_list_open(struct inode *inode, struct file *file)
735{
736 return single_open(file, white_list_show, inode->i_private);
737}
738
739static const struct file_operations white_list_fops = {
740 .open = white_list_open,
741 .read = seq_read,
742 .llseek = seq_lseek,
743 .release = single_release,
744};
745
3698d704
MH
746static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
747{
748 struct hci_dev *hdev = f->private;
adae20cb 749 struct smp_irk *irk;
3698d704 750
adae20cb
JH
751 rcu_read_lock();
752 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3698d704
MH
753 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
754 &irk->bdaddr, irk->addr_type,
755 16, irk->val, &irk->rpa);
756 }
adae20cb 757 rcu_read_unlock();
3698d704
MH
758
759 return 0;
760}
761
762static int identity_resolving_keys_open(struct inode *inode, struct file *file)
763{
764 return single_open(file, identity_resolving_keys_show,
765 inode->i_private);
766}
767
768static const struct file_operations identity_resolving_keys_fops = {
769 .open = identity_resolving_keys_open,
770 .read = seq_read,
771 .llseek = seq_lseek,
772 .release = single_release,
773};
774
8f8625cd
MH
775static int long_term_keys_show(struct seq_file *f, void *ptr)
776{
777 struct hci_dev *hdev = f->private;
970d0f1b 778 struct smp_ltk *ltk;
8f8625cd 779
970d0f1b
JH
780 rcu_read_lock();
781 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
fe39c7b2 782 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
783 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
784 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 785 __le64_to_cpu(ltk->rand), 16, ltk->val);
970d0f1b 786 rcu_read_unlock();
8f8625cd
MH
787
788 return 0;
789}
790
791static int long_term_keys_open(struct inode *inode, struct file *file)
792{
793 return single_open(file, long_term_keys_show, inode->i_private);
794}
795
796static const struct file_operations long_term_keys_fops = {
797 .open = long_term_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
4e70c7e7
MH
803static int conn_min_interval_set(void *data, u64 val)
804{
805 struct hci_dev *hdev = data;
806
807 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
808 return -EINVAL;
809
810 hci_dev_lock(hdev);
2be48b65 811 hdev->le_conn_min_interval = val;
4e70c7e7
MH
812 hci_dev_unlock(hdev);
813
814 return 0;
815}
816
817static int conn_min_interval_get(void *data, u64 *val)
818{
819 struct hci_dev *hdev = data;
820
821 hci_dev_lock(hdev);
822 *val = hdev->le_conn_min_interval;
823 hci_dev_unlock(hdev);
824
825 return 0;
826}
827
828DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
829 conn_min_interval_set, "%llu\n");
830
831static int conn_max_interval_set(void *data, u64 val)
832{
833 struct hci_dev *hdev = data;
834
835 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
836 return -EINVAL;
837
838 hci_dev_lock(hdev);
2be48b65 839 hdev->le_conn_max_interval = val;
4e70c7e7
MH
840 hci_dev_unlock(hdev);
841
842 return 0;
843}
844
845static int conn_max_interval_get(void *data, u64 *val)
846{
847 struct hci_dev *hdev = data;
848
849 hci_dev_lock(hdev);
850 *val = hdev->le_conn_max_interval;
851 hci_dev_unlock(hdev);
852
853 return 0;
854}
855
856DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
857 conn_max_interval_set, "%llu\n");
858
816a93d1 859static int conn_latency_set(void *data, u64 val)
3f959d46
MH
860{
861 struct hci_dev *hdev = data;
862
816a93d1 863 if (val > 0x01f3)
3f959d46
MH
864 return -EINVAL;
865
866 hci_dev_lock(hdev);
816a93d1 867 hdev->le_conn_latency = val;
3f959d46
MH
868 hci_dev_unlock(hdev);
869
870 return 0;
871}
872
816a93d1 873static int conn_latency_get(void *data, u64 *val)
3f959d46
MH
874{
875 struct hci_dev *hdev = data;
876
877 hci_dev_lock(hdev);
816a93d1 878 *val = hdev->le_conn_latency;
3f959d46
MH
879 hci_dev_unlock(hdev);
880
881 return 0;
882}
883
816a93d1
MH
884DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
885 conn_latency_set, "%llu\n");
3f959d46 886
f1649577 887static int supervision_timeout_set(void *data, u64 val)
89863109 888{
f1649577 889 struct hci_dev *hdev = data;
89863109 890
f1649577
MH
891 if (val < 0x000a || val > 0x0c80)
892 return -EINVAL;
893
894 hci_dev_lock(hdev);
895 hdev->le_supv_timeout = val;
896 hci_dev_unlock(hdev);
897
898 return 0;
89863109
JR
899}
900
f1649577 901static int supervision_timeout_get(void *data, u64 *val)
89863109 902{
f1649577 903 struct hci_dev *hdev = data;
89863109 904
f1649577
MH
905 hci_dev_lock(hdev);
906 *val = hdev->le_supv_timeout;
907 hci_dev_unlock(hdev);
89863109 908
f1649577
MH
909 return 0;
910}
89863109 911
f1649577
MH
912DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
913 supervision_timeout_set, "%llu\n");
89863109 914
3f959d46
MH
915static int adv_channel_map_set(void *data, u64 val)
916{
917 struct hci_dev *hdev = data;
89863109 918
3f959d46
MH
919 if (val < 0x01 || val > 0x07)
920 return -EINVAL;
89863109 921
3f959d46
MH
922 hci_dev_lock(hdev);
923 hdev->le_adv_channel_map = val;
924 hci_dev_unlock(hdev);
89863109 925
3f959d46
MH
926 return 0;
927}
89863109 928
3f959d46 929static int adv_channel_map_get(void *data, u64 *val)
7d474e06 930{
3f959d46 931 struct hci_dev *hdev = data;
7d474e06
AG
932
933 hci_dev_lock(hdev);
3f959d46
MH
934 *val = hdev->le_adv_channel_map;
935 hci_dev_unlock(hdev);
7d474e06 936
3f959d46
MH
937 return 0;
938}
939
940DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
941 adv_channel_map_set, "%llu\n");
7d474e06 942
729a1051
GL
943static int adv_min_interval_set(void *data, u64 val)
944{
945 struct hci_dev *hdev = data;
946
947 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
948 return -EINVAL;
949
950 hci_dev_lock(hdev);
951 hdev->le_adv_min_interval = val;
7d474e06
AG
952 hci_dev_unlock(hdev);
953
954 return 0;
955}
956
729a1051 957static int adv_min_interval_get(void *data, u64 *val)
7d474e06 958{
729a1051
GL
959 struct hci_dev *hdev = data;
960
961 hci_dev_lock(hdev);
962 *val = hdev->le_adv_min_interval;
963 hci_dev_unlock(hdev);
964
965 return 0;
7d474e06
AG
966}
967
729a1051
GL
968DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
969 adv_min_interval_set, "%llu\n");
970
971static int adv_max_interval_set(void *data, u64 val)
7d474e06 972{
729a1051 973 struct hci_dev *hdev = data;
7d474e06 974
729a1051 975 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
7d474e06
AG
976 return -EINVAL;
977
729a1051
GL
978 hci_dev_lock(hdev);
979 hdev->le_adv_max_interval = val;
980 hci_dev_unlock(hdev);
7d474e06 981
729a1051
GL
982 return 0;
983}
7d474e06 984
729a1051
GL
985static int adv_max_interval_get(void *data, u64 *val)
986{
987 struct hci_dev *hdev = data;
7d474e06 988
729a1051
GL
989 hci_dev_lock(hdev);
990 *val = hdev->le_adv_max_interval;
991 hci_dev_unlock(hdev);
7d474e06 992
729a1051
GL
993 return 0;
994}
7d474e06 995
729a1051
GL
996DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
997 adv_max_interval_set, "%llu\n");
7d474e06 998
0b3c7d37 999static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 1000{
0b3c7d37 1001 struct hci_dev *hdev = f->private;
7d474e06 1002 struct hci_conn_params *p;
40f4938a 1003 struct bdaddr_list *b;
7d474e06 1004
7d474e06 1005 hci_dev_lock(hdev);
40f4938a
MH
1006 list_for_each_entry(b, &hdev->whitelist, list)
1007 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
7d474e06 1008 list_for_each_entry(p, &hdev->le_conn_params, list) {
40f4938a 1009 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
7d474e06 1010 p->auto_connect);
7d474e06 1011 }
7d474e06 1012 hci_dev_unlock(hdev);
7d474e06 1013
7d474e06
AG
1014 return 0;
1015}
7d474e06 1016
0b3c7d37 1017static int device_list_open(struct inode *inode, struct file *file)
7d474e06 1018{
0b3c7d37 1019 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
1020}
1021
0b3c7d37
MH
1022static const struct file_operations device_list_fops = {
1023 .open = device_list_open,
7d474e06 1024 .read = seq_read,
7d474e06
AG
1025 .llseek = seq_lseek,
1026 .release = single_release,
1027};
1028
1da177e4
LT
1029/* ---- HCI requests ---- */
1030
42c6b129 1031static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 1032{
42c6b129 1033 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
1034
1035 if (hdev->req_status == HCI_REQ_PEND) {
1036 hdev->req_result = result;
1037 hdev->req_status = HCI_REQ_DONE;
1038 wake_up_interruptible(&hdev->req_wait_q);
1039 }
1040}
1041
1042static void hci_req_cancel(struct hci_dev *hdev, int err)
1043{
1044 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1045
1046 if (hdev->req_status == HCI_REQ_PEND) {
1047 hdev->req_result = err;
1048 hdev->req_status = HCI_REQ_CANCELED;
1049 wake_up_interruptible(&hdev->req_wait_q);
1050 }
1051}
1052
77a63e0a
FW
1053static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1054 u8 event)
75e84b7c
JH
1055{
1056 struct hci_ev_cmd_complete *ev;
1057 struct hci_event_hdr *hdr;
1058 struct sk_buff *skb;
1059
1060 hci_dev_lock(hdev);
1061
1062 skb = hdev->recv_evt;
1063 hdev->recv_evt = NULL;
1064
1065 hci_dev_unlock(hdev);
1066
1067 if (!skb)
1068 return ERR_PTR(-ENODATA);
1069
1070 if (skb->len < sizeof(*hdr)) {
1071 BT_ERR("Too short HCI event");
1072 goto failed;
1073 }
1074
1075 hdr = (void *) skb->data;
1076 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1077
7b1abbbe
JH
1078 if (event) {
1079 if (hdr->evt != event)
1080 goto failed;
1081 return skb;
1082 }
1083
75e84b7c
JH
1084 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1085 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1086 goto failed;
1087 }
1088
1089 if (skb->len < sizeof(*ev)) {
1090 BT_ERR("Too short cmd_complete event");
1091 goto failed;
1092 }
1093
1094 ev = (void *) skb->data;
1095 skb_pull(skb, sizeof(*ev));
1096
1097 if (opcode == __le16_to_cpu(ev->opcode))
1098 return skb;
1099
1100 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1101 __le16_to_cpu(ev->opcode));
1102
1103failed:
1104 kfree_skb(skb);
1105 return ERR_PTR(-ENODATA);
1106}
1107
7b1abbbe 1108struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1109 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1110{
1111 DECLARE_WAITQUEUE(wait, current);
1112 struct hci_request req;
1113 int err = 0;
1114
1115 BT_DBG("%s", hdev->name);
1116
1117 hci_req_init(&req, hdev);
1118
7b1abbbe 1119 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1120
1121 hdev->req_status = HCI_REQ_PEND;
1122
75e84b7c
JH
1123 add_wait_queue(&hdev->req_wait_q, &wait);
1124 set_current_state(TASK_INTERRUPTIBLE);
1125
039fada5
CP
1126 err = hci_req_run(&req, hci_req_sync_complete);
1127 if (err < 0) {
1128 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 1129 set_current_state(TASK_RUNNING);
039fada5
CP
1130 return ERR_PTR(err);
1131 }
1132
75e84b7c
JH
1133 schedule_timeout(timeout);
1134
1135 remove_wait_queue(&hdev->req_wait_q, &wait);
1136
1137 if (signal_pending(current))
1138 return ERR_PTR(-EINTR);
1139
1140 switch (hdev->req_status) {
1141 case HCI_REQ_DONE:
1142 err = -bt_to_errno(hdev->req_result);
1143 break;
1144
1145 case HCI_REQ_CANCELED:
1146 err = -hdev->req_result;
1147 break;
1148
1149 default:
1150 err = -ETIMEDOUT;
1151 break;
1152 }
1153
1154 hdev->req_status = hdev->req_result = 0;
1155
1156 BT_DBG("%s end: err %d", hdev->name, err);
1157
1158 if (err < 0)
1159 return ERR_PTR(err);
1160
7b1abbbe
JH
1161 return hci_get_cmd_complete(hdev, opcode, event);
1162}
1163EXPORT_SYMBOL(__hci_cmd_sync_ev);
1164
1165struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1166 const void *param, u32 timeout)
7b1abbbe
JH
1167{
1168 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1169}
1170EXPORT_SYMBOL(__hci_cmd_sync);
1171
1da177e4 1172/* Execute request and wait for completion. */
01178cd4 1173static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1174 void (*func)(struct hci_request *req,
1175 unsigned long opt),
01178cd4 1176 unsigned long opt, __u32 timeout)
1da177e4 1177{
42c6b129 1178 struct hci_request req;
1da177e4
LT
1179 DECLARE_WAITQUEUE(wait, current);
1180 int err = 0;
1181
1182 BT_DBG("%s start", hdev->name);
1183
42c6b129
JH
1184 hci_req_init(&req, hdev);
1185
1da177e4
LT
1186 hdev->req_status = HCI_REQ_PEND;
1187
42c6b129 1188 func(&req, opt);
53cce22d 1189
039fada5
CP
1190 add_wait_queue(&hdev->req_wait_q, &wait);
1191 set_current_state(TASK_INTERRUPTIBLE);
1192
42c6b129
JH
1193 err = hci_req_run(&req, hci_req_sync_complete);
1194 if (err < 0) {
53cce22d 1195 hdev->req_status = 0;
920c8300 1196
039fada5 1197 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 1198 set_current_state(TASK_RUNNING);
039fada5 1199
920c8300
AG
1200 /* ENODATA means the HCI request command queue is empty.
1201 * This can happen when a request with conditionals doesn't
1202 * trigger any commands to be sent. This is normal behavior
1203 * and should not trigger an error return.
42c6b129 1204 */
920c8300
AG
1205 if (err == -ENODATA)
1206 return 0;
1207
1208 return err;
53cce22d
JH
1209 }
1210
1da177e4
LT
1211 schedule_timeout(timeout);
1212
1213 remove_wait_queue(&hdev->req_wait_q, &wait);
1214
1215 if (signal_pending(current))
1216 return -EINTR;
1217
1218 switch (hdev->req_status) {
1219 case HCI_REQ_DONE:
e175072f 1220 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1221 break;
1222
1223 case HCI_REQ_CANCELED:
1224 err = -hdev->req_result;
1225 break;
1226
1227 default:
1228 err = -ETIMEDOUT;
1229 break;
3ff50b79 1230 }
1da177e4 1231
a5040efa 1232 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1233
1234 BT_DBG("%s end: err %d", hdev->name, err);
1235
1236 return err;
1237}
1238
01178cd4 1239static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1240 void (*req)(struct hci_request *req,
1241 unsigned long opt),
01178cd4 1242 unsigned long opt, __u32 timeout)
1da177e4
LT
1243{
1244 int ret;
1245
7c6a329e
MH
1246 if (!test_bit(HCI_UP, &hdev->flags))
1247 return -ENETDOWN;
1248
1da177e4
LT
1249 /* Serialize all requests */
1250 hci_req_lock(hdev);
01178cd4 1251 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1252 hci_req_unlock(hdev);
1253
1254 return ret;
1255}
1256
42c6b129 1257static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1258{
42c6b129 1259 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1260
1261 /* Reset device */
42c6b129
JH
1262 set_bit(HCI_RESET, &req->hdev->flags);
1263 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1264}
1265
42c6b129 1266static void bredr_init(struct hci_request *req)
1da177e4 1267{
42c6b129 1268 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1269
1da177e4 1270 /* Read Local Supported Features */
42c6b129 1271 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1272
1143e5a6 1273 /* Read Local Version */
42c6b129 1274 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1275
1276 /* Read BD Address */
42c6b129 1277 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1278}
1279
42c6b129 1280static void amp_init(struct hci_request *req)
e61ef499 1281{
42c6b129 1282 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1283
e61ef499 1284 /* Read Local Version */
42c6b129 1285 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1286
f6996cfe
MH
1287 /* Read Local Supported Commands */
1288 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1289
1290 /* Read Local Supported Features */
1291 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1292
6bcbc489 1293 /* Read Local AMP Info */
42c6b129 1294 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1295
1296 /* Read Data Blk size */
42c6b129 1297 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1298
f38ba941
MH
1299 /* Read Flow Control Mode */
1300 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1301
7528ca1c
MH
1302 /* Read Location Data */
1303 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1304}
1305
42c6b129 1306static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1307{
42c6b129 1308 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1309
1310 BT_DBG("%s %ld", hdev->name, opt);
1311
11778716
AE
1312 /* Reset */
1313 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1314 hci_reset_req(req, 0);
11778716 1315
e61ef499
AE
1316 switch (hdev->dev_type) {
1317 case HCI_BREDR:
42c6b129 1318 bredr_init(req);
e61ef499
AE
1319 break;
1320
1321 case HCI_AMP:
42c6b129 1322 amp_init(req);
e61ef499
AE
1323 break;
1324
1325 default:
1326 BT_ERR("Unknown device type %d", hdev->dev_type);
1327 break;
1328 }
e61ef499
AE
1329}
1330
42c6b129 1331static void bredr_setup(struct hci_request *req)
2177bab5 1332{
4ca048e3
MH
1333 struct hci_dev *hdev = req->hdev;
1334
2177bab5
JH
1335 __le16 param;
1336 __u8 flt_type;
1337
1338 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1339 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1340
1341 /* Read Class of Device */
42c6b129 1342 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1343
1344 /* Read Local Name */
42c6b129 1345 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1346
1347 /* Read Voice Setting */
42c6b129 1348 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1349
b4cb9fb2
MH
1350 /* Read Number of Supported IAC */
1351 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1352
4b836f39
MH
1353 /* Read Current IAC LAP */
1354 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1355
2177bab5
JH
1356 /* Clear Event Filters */
1357 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1358 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1359
1360 /* Connection accept timeout ~20 secs */
dcf4adbf 1361 param = cpu_to_le16(0x7d00);
42c6b129 1362 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1363
4ca048e3
MH
1364 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1365 * but it does not support page scan related HCI commands.
1366 */
1367 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1368 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1370 }
2177bab5
JH
1371}
1372
42c6b129 1373static void le_setup(struct hci_request *req)
2177bab5 1374{
c73eee91
JH
1375 struct hci_dev *hdev = req->hdev;
1376
2177bab5 1377 /* Read LE Buffer Size */
42c6b129 1378 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1379
1380 /* Read LE Local Supported Features */
42c6b129 1381 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1382
747d3f03
MH
1383 /* Read LE Supported States */
1384 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1385
2177bab5 1386 /* Read LE White List Size */
42c6b129 1387 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1388
747d3f03
MH
1389 /* Clear LE White List */
1390 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1391
1392 /* LE-only controllers have LE implicitly enabled */
1393 if (!lmp_bredr_capable(hdev))
1394 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1395}
1396
1397static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1398{
1399 if (lmp_ext_inq_capable(hdev))
1400 return 0x02;
1401
1402 if (lmp_inq_rssi_capable(hdev))
1403 return 0x01;
1404
1405 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1406 hdev->lmp_subver == 0x0757)
1407 return 0x01;
1408
1409 if (hdev->manufacturer == 15) {
1410 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1411 return 0x01;
1412 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1413 return 0x01;
1414 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1415 return 0x01;
1416 }
1417
1418 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1419 hdev->lmp_subver == 0x1805)
1420 return 0x01;
1421
1422 return 0x00;
1423}
1424
42c6b129 1425static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1426{
1427 u8 mode;
1428
42c6b129 1429 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1430
42c6b129 1431 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1432}
1433
42c6b129 1434static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1435{
42c6b129
JH
1436 struct hci_dev *hdev = req->hdev;
1437
2177bab5
JH
1438 /* The second byte is 0xff instead of 0x9f (two reserved bits
1439 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1440 * command otherwise.
1441 */
1442 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1443
1444 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1445 * any event mask for pre 1.2 devices.
1446 */
1447 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1448 return;
1449
1450 if (lmp_bredr_capable(hdev)) {
1451 events[4] |= 0x01; /* Flow Specification Complete */
1452 events[4] |= 0x02; /* Inquiry Result with RSSI */
1453 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1454 events[5] |= 0x08; /* Synchronous Connection Complete */
1455 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1456 } else {
1457 /* Use a different default for LE-only devices */
1458 memset(events, 0, sizeof(events));
1459 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
1460 events[1] |= 0x08; /* Read Remote Version Information Complete */
1461 events[1] |= 0x20; /* Command Complete */
1462 events[1] |= 0x40; /* Command Status */
1463 events[1] |= 0x80; /* Hardware Error */
1464 events[2] |= 0x04; /* Number of Completed Packets */
1465 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
1466
1467 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1468 events[0] |= 0x80; /* Encryption Change */
1469 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1470 }
2177bab5
JH
1471 }
1472
1473 if (lmp_inq_rssi_capable(hdev))
1474 events[4] |= 0x02; /* Inquiry Result with RSSI */
1475
1476 if (lmp_sniffsubr_capable(hdev))
1477 events[5] |= 0x20; /* Sniff Subrating */
1478
1479 if (lmp_pause_enc_capable(hdev))
1480 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1481
1482 if (lmp_ext_inq_capable(hdev))
1483 events[5] |= 0x40; /* Extended Inquiry Result */
1484
1485 if (lmp_no_flush_capable(hdev))
1486 events[7] |= 0x01; /* Enhanced Flush Complete */
1487
1488 if (lmp_lsto_capable(hdev))
1489 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1490
1491 if (lmp_ssp_capable(hdev)) {
1492 events[6] |= 0x01; /* IO Capability Request */
1493 events[6] |= 0x02; /* IO Capability Response */
1494 events[6] |= 0x04; /* User Confirmation Request */
1495 events[6] |= 0x08; /* User Passkey Request */
1496 events[6] |= 0x10; /* Remote OOB Data Request */
1497 events[6] |= 0x20; /* Simple Pairing Complete */
1498 events[7] |= 0x04; /* User Passkey Notification */
1499 events[7] |= 0x08; /* Keypress Notification */
1500 events[7] |= 0x10; /* Remote Host Supported
1501 * Features Notification
1502 */
1503 }
1504
1505 if (lmp_le_capable(hdev))
1506 events[7] |= 0x20; /* LE Meta-Event */
1507
42c6b129 1508 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1509}
1510
42c6b129 1511static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1512{
42c6b129
JH
1513 struct hci_dev *hdev = req->hdev;
1514
2177bab5 1515 if (lmp_bredr_capable(hdev))
42c6b129 1516 bredr_setup(req);
56f87901
JH
1517 else
1518 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1519
1520 if (lmp_le_capable(hdev))
42c6b129 1521 le_setup(req);
2177bab5 1522
3f8e2d75
JH
1523 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1524 * local supported commands HCI command.
1525 */
1526 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1527 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1528
1529 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1530 /* When SSP is available, then the host features page
1531 * should also be available as well. However some
1532 * controllers list the max_page as 0 as long as SSP
1533 * has not been enabled. To achieve proper debugging
1534 * output, force the minimum max_page to 1 at least.
1535 */
1536 hdev->max_page = 0x01;
1537
2177bab5
JH
1538 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1539 u8 mode = 0x01;
42c6b129
JH
1540 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1541 sizeof(mode), &mode);
2177bab5
JH
1542 } else {
1543 struct hci_cp_write_eir cp;
1544
1545 memset(hdev->eir, 0, sizeof(hdev->eir));
1546 memset(&cp, 0, sizeof(cp));
1547
42c6b129 1548 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1549 }
1550 }
1551
1552 if (lmp_inq_rssi_capable(hdev))
42c6b129 1553 hci_setup_inquiry_mode(req);
2177bab5
JH
1554
1555 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1556 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1557
1558 if (lmp_ext_feat_capable(hdev)) {
1559 struct hci_cp_read_local_ext_features cp;
1560
1561 cp.page = 0x01;
42c6b129
JH
1562 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1563 sizeof(cp), &cp);
2177bab5
JH
1564 }
1565
1566 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1567 u8 enable = 1;
42c6b129
JH
1568 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1569 &enable);
2177bab5
JH
1570 }
1571}
1572
42c6b129 1573static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1574{
42c6b129 1575 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1576 struct hci_cp_write_def_link_policy cp;
1577 u16 link_policy = 0;
1578
1579 if (lmp_rswitch_capable(hdev))
1580 link_policy |= HCI_LP_RSWITCH;
1581 if (lmp_hold_capable(hdev))
1582 link_policy |= HCI_LP_HOLD;
1583 if (lmp_sniff_capable(hdev))
1584 link_policy |= HCI_LP_SNIFF;
1585 if (lmp_park_capable(hdev))
1586 link_policy |= HCI_LP_PARK;
1587
1588 cp.policy = cpu_to_le16(link_policy);
42c6b129 1589 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1590}
1591
42c6b129 1592static void hci_set_le_support(struct hci_request *req)
2177bab5 1593{
42c6b129 1594 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1595 struct hci_cp_write_le_host_supported cp;
1596
c73eee91
JH
1597 /* LE-only devices do not support explicit enablement */
1598 if (!lmp_bredr_capable(hdev))
1599 return;
1600
2177bab5
JH
1601 memset(&cp, 0, sizeof(cp));
1602
1603 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1604 cp.le = 0x01;
32226e4f 1605 cp.simul = 0x00;
2177bab5
JH
1606 }
1607
1608 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1609 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1610 &cp);
2177bab5
JH
1611}
1612
d62e6d67
JH
1613static void hci_set_event_mask_page_2(struct hci_request *req)
1614{
1615 struct hci_dev *hdev = req->hdev;
1616 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1617
1618 /* If Connectionless Slave Broadcast master role is supported
1619 * enable all necessary events for it.
1620 */
53b834d2 1621 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1622 events[1] |= 0x40; /* Triggered Clock Capture */
1623 events[1] |= 0x80; /* Synchronization Train Complete */
1624 events[2] |= 0x10; /* Slave Page Response Timeout */
1625 events[2] |= 0x20; /* CSB Channel Map Change */
1626 }
1627
1628 /* If Connectionless Slave Broadcast slave role is supported
1629 * enable all necessary events for it.
1630 */
53b834d2 1631 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1632 events[2] |= 0x01; /* Synchronization Train Received */
1633 events[2] |= 0x02; /* CSB Receive */
1634 events[2] |= 0x04; /* CSB Timeout */
1635 events[2] |= 0x08; /* Truncated Page Complete */
1636 }
1637
40c59fcb 1638 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 1639 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
1640 events[2] |= 0x80;
1641
d62e6d67
JH
1642 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1643}
1644
42c6b129 1645static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1646{
42c6b129 1647 struct hci_dev *hdev = req->hdev;
d2c5d77f 1648 u8 p;
42c6b129 1649
0da71f1b
MH
1650 hci_setup_event_mask(req);
1651
b8f4e068
GP
1652 /* Some Broadcom based Bluetooth controllers do not support the
1653 * Delete Stored Link Key command. They are clearly indicating its
1654 * absence in the bit mask of supported commands.
1655 *
1656 * Check the supported commands and only if the the command is marked
1657 * as supported send it. If not supported assume that the controller
1658 * does not have actual support for stored link keys which makes this
1659 * command redundant anyway.
f9f462fa
MH
1660 *
1661 * Some controllers indicate that they support handling deleting
1662 * stored link keys, but they don't. The quirk lets a driver
1663 * just disable this command.
637b4cae 1664 */
f9f462fa
MH
1665 if (hdev->commands[6] & 0x80 &&
1666 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1667 struct hci_cp_delete_stored_link_key cp;
1668
1669 bacpy(&cp.bdaddr, BDADDR_ANY);
1670 cp.delete_all = 0x01;
1671 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1672 sizeof(cp), &cp);
1673 }
1674
2177bab5 1675 if (hdev->commands[5] & 0x10)
42c6b129 1676 hci_setup_link_policy(req);
2177bab5 1677
9193c6e8
AG
1678 if (lmp_le_capable(hdev)) {
1679 u8 events[8];
1680
1681 memset(events, 0, sizeof(events));
4d6c705b
MH
1682 events[0] = 0x0f;
1683
1684 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1685 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
1686
1687 /* If controller supports the Connection Parameters Request
1688 * Link Layer Procedure, enable the corresponding event.
1689 */
1690 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1691 events[0] |= 0x20; /* LE Remote Connection
1692 * Parameter Request
1693 */
1694
9193c6e8
AG
1695 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1696 events);
1697
15a49cca
MH
1698 if (hdev->commands[25] & 0x40) {
1699 /* Read LE Advertising Channel TX Power */
1700 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1701 }
1702
42c6b129 1703 hci_set_le_support(req);
9193c6e8 1704 }
d2c5d77f
JH
1705
1706 /* Read features beyond page 1 if available */
1707 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1708 struct hci_cp_read_local_ext_features cp;
1709
1710 cp.page = p;
1711 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1712 sizeof(cp), &cp);
1713 }
2177bab5
JH
1714}
1715
5d4e7e8d
JH
1716static void hci_init4_req(struct hci_request *req, unsigned long opt)
1717{
1718 struct hci_dev *hdev = req->hdev;
1719
d62e6d67
JH
1720 /* Set event mask page 2 if the HCI command for it is supported */
1721 if (hdev->commands[22] & 0x04)
1722 hci_set_event_mask_page_2(req);
1723
109e3191
MH
1724 /* Read local codec list if the HCI command is supported */
1725 if (hdev->commands[29] & 0x20)
1726 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1727
f4fe73ed
MH
1728 /* Get MWS transport configuration if the HCI command is supported */
1729 if (hdev->commands[30] & 0x08)
1730 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1731
5d4e7e8d 1732 /* Check for Synchronization Train support */
53b834d2 1733 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1734 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1735
1736 /* Enable Secure Connections if supported and configured */
710f11c0 1737 if (bredr_sc_enabled(hdev)) {
a6d0d690
MH
1738 u8 support = 0x01;
1739 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1740 sizeof(support), &support);
1741 }
5d4e7e8d
JH
1742}
1743
2177bab5
JH
1744static int __hci_init(struct hci_dev *hdev)
1745{
1746 int err;
1747
1748 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1749 if (err < 0)
1750 return err;
1751
4b4148e9
MH
1752 /* The Device Under Test (DUT) mode is special and available for
1753 * all controller types. So just create it early on.
1754 */
1755 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1756 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1757 &dut_mode_fops);
1758 }
1759
2177bab5
JH
1760 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1761 * BR/EDR/LE type controllers. AMP controllers only need the
1762 * first stage init.
1763 */
1764 if (hdev->dev_type != HCI_BREDR)
1765 return 0;
1766
1767 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1768 if (err < 0)
1769 return err;
1770
5d4e7e8d
JH
1771 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1772 if (err < 0)
1773 return err;
1774
baf27f6e
MH
1775 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1776 if (err < 0)
1777 return err;
1778
1779 /* Only create debugfs entries during the initial setup
1780 * phase and not every time the controller gets powered on.
1781 */
1782 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1783 return 0;
1784
dfb826a8
MH
1785 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1786 &features_fops);
ceeb3bc0
MH
1787 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1788 &hdev->manufacturer);
1789 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1790 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
40f4938a
MH
1791 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1792 &device_list_fops);
70afe0b8
MH
1793 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1794 &blacklist_fops);
47219839
MH
1795 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1796
31ad1691
AK
1797 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1798 &conn_info_min_age_fops);
1799 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1800 &conn_info_max_age_fops);
1801
baf27f6e
MH
1802 if (lmp_bredr_capable(hdev)) {
1803 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1804 hdev, &inquiry_cache_fops);
02d08d15
MH
1805 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1806 hdev, &link_keys_fops);
babdbb3c
MH
1807 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1808 hdev, &dev_class_fops);
041000b9
MH
1809 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1810 hdev, &voice_setting_fops);
baf27f6e
MH
1811 }
1812
06f5b778 1813 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1814 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1815 hdev, &auto_accept_delay_fops);
5afeac14
MH
1816 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1817 hdev, &force_sc_support_fops);
134c2a89
MH
1818 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1819 hdev, &sc_only_mode_fops);
06f5b778 1820 }
ebd1e33b 1821
2bfa3531
MH
1822 if (lmp_sniff_capable(hdev)) {
1823 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1824 hdev, &idle_timeout_fops);
1825 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1826 hdev, &sniff_min_interval_fops);
1827 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1828 hdev, &sniff_max_interval_fops);
1829 }
1830
d0f729b8 1831 if (lmp_le_capable(hdev)) {
ac345813
MH
1832 debugfs_create_file("identity", 0400, hdev->debugfs,
1833 hdev, &identity_fops);
1834 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1835 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1836 debugfs_create_file("random_address", 0444, hdev->debugfs,
1837 hdev, &random_address_fops);
b32bba6c
MH
1838 debugfs_create_file("static_address", 0444, hdev->debugfs,
1839 hdev, &static_address_fops);
1840
1841 /* For controllers with a public address, provide a debug
1842 * option to force the usage of the configured static
1843 * address. By default the public address is used.
1844 */
1845 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1846 debugfs_create_file("force_static_address", 0644,
1847 hdev->debugfs, hdev,
1848 &force_static_address_fops);
1849
d0f729b8
MH
1850 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1851 &hdev->le_white_list_size);
d2ab0ac1
MH
1852 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1853 &white_list_fops);
3698d704
MH
1854 debugfs_create_file("identity_resolving_keys", 0400,
1855 hdev->debugfs, hdev,
1856 &identity_resolving_keys_fops);
8f8625cd
MH
1857 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1858 hdev, &long_term_keys_fops);
4e70c7e7
MH
1859 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1860 hdev, &conn_min_interval_fops);
1861 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1862 hdev, &conn_max_interval_fops);
816a93d1
MH
1863 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1864 hdev, &conn_latency_fops);
f1649577
MH
1865 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1866 hdev, &supervision_timeout_fops);
3f959d46
MH
1867 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1868 hdev, &adv_channel_map_fops);
729a1051
GL
1869 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1870 hdev, &adv_min_interval_fops);
1871 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1872 hdev, &adv_max_interval_fops);
b9a7a61e
LR
1873 debugfs_create_u16("discov_interleaved_timeout", 0644,
1874 hdev->debugfs,
1875 &hdev->discov_interleaved_timeout);
54506918 1876
711eafe3 1877 smp_register(hdev);
d0f729b8 1878 }
e7b8fc92 1879
baf27f6e 1880 return 0;
2177bab5
JH
1881}
1882
0ebca7d6
MH
1883static void hci_init0_req(struct hci_request *req, unsigned long opt)
1884{
1885 struct hci_dev *hdev = req->hdev;
1886
1887 BT_DBG("%s %ld", hdev->name, opt);
1888
1889 /* Reset */
1890 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1891 hci_reset_req(req, 0);
1892
1893 /* Read Local Version */
1894 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1895
1896 /* Read BD Address */
1897 if (hdev->set_bdaddr)
1898 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1899}
1900
1901static int __hci_unconf_init(struct hci_dev *hdev)
1902{
1903 int err;
1904
cc78b44b
MH
1905 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1906 return 0;
1907
0ebca7d6
MH
1908 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1909 if (err < 0)
1910 return err;
1911
1912 return 0;
1913}
1914
42c6b129 1915static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1916{
1917 __u8 scan = opt;
1918
42c6b129 1919 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1920
1921 /* Inquiry and Page scans */
42c6b129 1922 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1923}
1924
42c6b129 1925static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1926{
1927 __u8 auth = opt;
1928
42c6b129 1929 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1930
1931 /* Authentication */
42c6b129 1932 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1933}
1934
42c6b129 1935static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1936{
1937 __u8 encrypt = opt;
1938
42c6b129 1939 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1940
e4e8e37c 1941 /* Encryption */
42c6b129 1942 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1943}
1944
42c6b129 1945static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1946{
1947 __le16 policy = cpu_to_le16(opt);
1948
42c6b129 1949 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1950
1951 /* Default link policy */
42c6b129 1952 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1953}
1954
8e87d142 1955/* Get HCI device by index.
1da177e4
LT
1956 * Device is held on return. */
1957struct hci_dev *hci_dev_get(int index)
1958{
8035ded4 1959 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1960
1961 BT_DBG("%d", index);
1962
1963 if (index < 0)
1964 return NULL;
1965
1966 read_lock(&hci_dev_list_lock);
8035ded4 1967 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1968 if (d->id == index) {
1969 hdev = hci_dev_hold(d);
1970 break;
1971 }
1972 }
1973 read_unlock(&hci_dev_list_lock);
1974 return hdev;
1975}
1da177e4
LT
1976
1977/* ---- Inquiry support ---- */
ff9ef578 1978
30dc78e1
JH
1979bool hci_discovery_active(struct hci_dev *hdev)
1980{
1981 struct discovery_state *discov = &hdev->discovery;
1982
6fbe195d 1983 switch (discov->state) {
343f935b 1984 case DISCOVERY_FINDING:
6fbe195d 1985 case DISCOVERY_RESOLVING:
30dc78e1
JH
1986 return true;
1987
6fbe195d
AG
1988 default:
1989 return false;
1990 }
30dc78e1
JH
1991}
1992
ff9ef578
JH
1993void hci_discovery_set_state(struct hci_dev *hdev, int state)
1994{
bb3e0a33
JH
1995 int old_state = hdev->discovery.state;
1996
ff9ef578
JH
1997 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1998
bb3e0a33 1999 if (old_state == state)
ff9ef578
JH
2000 return;
2001
bb3e0a33
JH
2002 hdev->discovery.state = state;
2003
ff9ef578
JH
2004 switch (state) {
2005 case DISCOVERY_STOPPED:
c54c3860
AG
2006 hci_update_background_scan(hdev);
2007
bb3e0a33 2008 if (old_state != DISCOVERY_STARTING)
7b99b659 2009 mgmt_discovering(hdev, 0);
ff9ef578
JH
2010 break;
2011 case DISCOVERY_STARTING:
2012 break;
343f935b 2013 case DISCOVERY_FINDING:
ff9ef578
JH
2014 mgmt_discovering(hdev, 1);
2015 break;
30dc78e1
JH
2016 case DISCOVERY_RESOLVING:
2017 break;
ff9ef578
JH
2018 case DISCOVERY_STOPPING:
2019 break;
2020 }
ff9ef578
JH
2021}
2022
1f9b9a5d 2023void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 2024{
30883512 2025 struct discovery_state *cache = &hdev->discovery;
b57c1a56 2026 struct inquiry_entry *p, *n;
1da177e4 2027
561aafbc
JH
2028 list_for_each_entry_safe(p, n, &cache->all, all) {
2029 list_del(&p->all);
b57c1a56 2030 kfree(p);
1da177e4 2031 }
561aafbc
JH
2032
2033 INIT_LIST_HEAD(&cache->unknown);
2034 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
2035}
2036
a8c5fb1a
GP
2037struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2038 bdaddr_t *bdaddr)
1da177e4 2039{
30883512 2040 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2041 struct inquiry_entry *e;
2042
6ed93dc6 2043 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 2044
561aafbc
JH
2045 list_for_each_entry(e, &cache->all, all) {
2046 if (!bacmp(&e->data.bdaddr, bdaddr))
2047 return e;
2048 }
2049
2050 return NULL;
2051}
2052
2053struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 2054 bdaddr_t *bdaddr)
561aafbc 2055{
30883512 2056 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
2057 struct inquiry_entry *e;
2058
6ed93dc6 2059 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
2060
2061 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 2062 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
2063 return e;
2064 }
2065
2066 return NULL;
1da177e4
LT
2067}
2068
30dc78e1 2069struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
2070 bdaddr_t *bdaddr,
2071 int state)
30dc78e1
JH
2072{
2073 struct discovery_state *cache = &hdev->discovery;
2074 struct inquiry_entry *e;
2075
6ed93dc6 2076 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
2077
2078 list_for_each_entry(e, &cache->resolve, list) {
2079 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2080 return e;
2081 if (!bacmp(&e->data.bdaddr, bdaddr))
2082 return e;
2083 }
2084
2085 return NULL;
2086}
2087
a3d4e20a 2088void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 2089 struct inquiry_entry *ie)
a3d4e20a
JH
2090{
2091 struct discovery_state *cache = &hdev->discovery;
2092 struct list_head *pos = &cache->resolve;
2093 struct inquiry_entry *p;
2094
2095 list_del(&ie->list);
2096
2097 list_for_each_entry(p, &cache->resolve, list) {
2098 if (p->name_state != NAME_PENDING &&
a8c5fb1a 2099 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
2100 break;
2101 pos = &p->list;
2102 }
2103
2104 list_add(&ie->list, pos);
2105}
2106
af58925c
MH
2107u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2108 bool name_known)
1da177e4 2109{
30883512 2110 struct discovery_state *cache = &hdev->discovery;
70f23020 2111 struct inquiry_entry *ie;
af58925c 2112 u32 flags = 0;
1da177e4 2113
6ed93dc6 2114 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2115
2b2fec4d
SJ
2116 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2117
af58925c
MH
2118 if (!data->ssp_mode)
2119 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2120
70f23020 2121 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2122 if (ie) {
af58925c
MH
2123 if (!ie->data.ssp_mode)
2124 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2125
a3d4e20a 2126 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2127 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2128 ie->data.rssi = data->rssi;
2129 hci_inquiry_cache_update_resolve(hdev, ie);
2130 }
2131
561aafbc 2132 goto update;
a3d4e20a 2133 }
561aafbc
JH
2134
2135 /* Entry not in the cache. Add new one. */
27f70f3e 2136 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
2137 if (!ie) {
2138 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2139 goto done;
2140 }
561aafbc
JH
2141
2142 list_add(&ie->all, &cache->all);
2143
2144 if (name_known) {
2145 ie->name_state = NAME_KNOWN;
2146 } else {
2147 ie->name_state = NAME_NOT_KNOWN;
2148 list_add(&ie->list, &cache->unknown);
2149 }
70f23020 2150
561aafbc
JH
2151update:
2152 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2153 ie->name_state != NAME_PENDING) {
561aafbc
JH
2154 ie->name_state = NAME_KNOWN;
2155 list_del(&ie->list);
1da177e4
LT
2156 }
2157
70f23020
AE
2158 memcpy(&ie->data, data, sizeof(*data));
2159 ie->timestamp = jiffies;
1da177e4 2160 cache->timestamp = jiffies;
3175405b
JH
2161
2162 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2163 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2164
af58925c
MH
2165done:
2166 return flags;
1da177e4
LT
2167}
2168
2169static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2170{
30883512 2171 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2172 struct inquiry_info *info = (struct inquiry_info *) buf;
2173 struct inquiry_entry *e;
2174 int copied = 0;
2175
561aafbc 2176 list_for_each_entry(e, &cache->all, all) {
1da177e4 2177 struct inquiry_data *data = &e->data;
b57c1a56
JH
2178
2179 if (copied >= num)
2180 break;
2181
1da177e4
LT
2182 bacpy(&info->bdaddr, &data->bdaddr);
2183 info->pscan_rep_mode = data->pscan_rep_mode;
2184 info->pscan_period_mode = data->pscan_period_mode;
2185 info->pscan_mode = data->pscan_mode;
2186 memcpy(info->dev_class, data->dev_class, 3);
2187 info->clock_offset = data->clock_offset;
b57c1a56 2188
1da177e4 2189 info++;
b57c1a56 2190 copied++;
1da177e4
LT
2191 }
2192
2193 BT_DBG("cache %p, copied %d", cache, copied);
2194 return copied;
2195}
2196
42c6b129 2197static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2198{
2199 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2200 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2201 struct hci_cp_inquiry cp;
2202
2203 BT_DBG("%s", hdev->name);
2204
2205 if (test_bit(HCI_INQUIRY, &hdev->flags))
2206 return;
2207
2208 /* Start Inquiry */
2209 memcpy(&cp.lap, &ir->lap, 3);
2210 cp.length = ir->length;
2211 cp.num_rsp = ir->num_rsp;
42c6b129 2212 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2213}
2214
2215int hci_inquiry(void __user *arg)
2216{
2217 __u8 __user *ptr = arg;
2218 struct hci_inquiry_req ir;
2219 struct hci_dev *hdev;
2220 int err = 0, do_inquiry = 0, max_rsp;
2221 long timeo;
2222 __u8 *buf;
2223
2224 if (copy_from_user(&ir, ptr, sizeof(ir)))
2225 return -EFAULT;
2226
5a08ecce
AE
2227 hdev = hci_dev_get(ir.dev_id);
2228 if (!hdev)
1da177e4
LT
2229 return -ENODEV;
2230
0736cfa8
MH
2231 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2232 err = -EBUSY;
2233 goto done;
2234 }
2235
4a964404 2236 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2237 err = -EOPNOTSUPP;
2238 goto done;
2239 }
2240
5b69bef5
MH
2241 if (hdev->dev_type != HCI_BREDR) {
2242 err = -EOPNOTSUPP;
2243 goto done;
2244 }
2245
56f87901
JH
2246 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2247 err = -EOPNOTSUPP;
2248 goto done;
2249 }
2250
09fd0de5 2251 hci_dev_lock(hdev);
8e87d142 2252 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2253 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2254 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2255 do_inquiry = 1;
2256 }
09fd0de5 2257 hci_dev_unlock(hdev);
1da177e4 2258
04837f64 2259 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2260
2261 if (do_inquiry) {
01178cd4
JH
2262 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2263 timeo);
70f23020
AE
2264 if (err < 0)
2265 goto done;
3e13fa1e
AG
2266
2267 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2268 * cleared). If it is interrupted by a signal, return -EINTR.
2269 */
74316201 2270 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
2271 TASK_INTERRUPTIBLE))
2272 return -EINTR;
70f23020 2273 }
1da177e4 2274
8fc9ced3
GP
2275 /* for unlimited number of responses we will use buffer with
2276 * 255 entries
2277 */
1da177e4
LT
2278 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2279
2280 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2281 * copy it to the user space.
2282 */
01df8c31 2283 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2284 if (!buf) {
1da177e4
LT
2285 err = -ENOMEM;
2286 goto done;
2287 }
2288
09fd0de5 2289 hci_dev_lock(hdev);
1da177e4 2290 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2291 hci_dev_unlock(hdev);
1da177e4
LT
2292
2293 BT_DBG("num_rsp %d", ir.num_rsp);
2294
2295 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2296 ptr += sizeof(ir);
2297 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2298 ir.num_rsp))
1da177e4 2299 err = -EFAULT;
8e87d142 2300 } else
1da177e4
LT
2301 err = -EFAULT;
2302
2303 kfree(buf);
2304
2305done:
2306 hci_dev_put(hdev);
2307 return err;
2308}
2309
cbed0ca1 2310static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2311{
1da177e4
LT
2312 int ret = 0;
2313
1da177e4
LT
2314 BT_DBG("%s %p", hdev->name, hdev);
2315
2316 hci_req_lock(hdev);
2317
94324962
JH
2318 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2319 ret = -ENODEV;
2320 goto done;
2321 }
2322
d603b76b
MH
2323 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2324 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
a5c8f270
MH
2325 /* Check for rfkill but allow the HCI setup stage to
2326 * proceed (which in itself doesn't cause any RF activity).
2327 */
2328 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2329 ret = -ERFKILL;
2330 goto done;
2331 }
2332
2333 /* Check for valid public address or a configured static
2334 * random adddress, but let the HCI setup proceed to
2335 * be able to determine if there is a public address
2336 * or not.
2337 *
c6beca0e
MH
2338 * In case of user channel usage, it is not important
2339 * if a public address or static random address is
2340 * available.
2341 *
a5c8f270
MH
2342 * This check is only valid for BR/EDR controllers
2343 * since AMP controllers do not have an address.
2344 */
c6beca0e
MH
2345 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2346 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2347 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2348 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2349 ret = -EADDRNOTAVAIL;
2350 goto done;
2351 }
611b30f7
MH
2352 }
2353
1da177e4
LT
2354 if (test_bit(HCI_UP, &hdev->flags)) {
2355 ret = -EALREADY;
2356 goto done;
2357 }
2358
1da177e4
LT
2359 if (hdev->open(hdev)) {
2360 ret = -EIO;
2361 goto done;
2362 }
2363
f41c70c4
MH
2364 atomic_set(&hdev->cmd_cnt, 1);
2365 set_bit(HCI_INIT, &hdev->flags);
2366
af202f84
MH
2367 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2368 if (hdev->setup)
2369 ret = hdev->setup(hdev);
f41c70c4 2370
af202f84
MH
2371 /* The transport driver can set these quirks before
2372 * creating the HCI device or in its setup callback.
2373 *
2374 * In case any of them is set, the controller has to
2375 * start up as unconfigured.
2376 */
eb1904f4
MH
2377 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2378 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
89bc22d2 2379 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
f41c70c4 2380
0ebca7d6
MH
2381 /* For an unconfigured controller it is required to
2382 * read at least the version information provided by
2383 * the Read Local Version Information command.
2384 *
2385 * If the set_bdaddr driver callback is provided, then
2386 * also the original Bluetooth public device address
2387 * will be read using the Read BD Address command.
2388 */
2389 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2390 ret = __hci_unconf_init(hdev);
89bc22d2
MH
2391 }
2392
9713c17b
MH
2393 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2394 /* If public address change is configured, ensure that
2395 * the address gets programmed. If the driver does not
2396 * support changing the public address, fail the power
2397 * on procedure.
2398 */
2399 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2400 hdev->set_bdaddr)
24c457e2
MH
2401 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2402 else
2403 ret = -EADDRNOTAVAIL;
2404 }
2405
f41c70c4 2406 if (!ret) {
4a964404 2407 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2408 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2409 ret = __hci_init(hdev);
1da177e4
LT
2410 }
2411
f41c70c4
MH
2412 clear_bit(HCI_INIT, &hdev->flags);
2413
1da177e4
LT
2414 if (!ret) {
2415 hci_dev_hold(hdev);
d6bfd59c 2416 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2417 set_bit(HCI_UP, &hdev->flags);
2418 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2419 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
d603b76b 2420 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
4a964404 2421 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2422 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2423 hdev->dev_type == HCI_BREDR) {
09fd0de5 2424 hci_dev_lock(hdev);
744cf19e 2425 mgmt_powered(hdev, 1);
09fd0de5 2426 hci_dev_unlock(hdev);
56e5cb86 2427 }
8e87d142 2428 } else {
1da177e4 2429 /* Init failed, cleanup */
3eff45ea 2430 flush_work(&hdev->tx_work);
c347b765 2431 flush_work(&hdev->cmd_work);
b78752cc 2432 flush_work(&hdev->rx_work);
1da177e4
LT
2433
2434 skb_queue_purge(&hdev->cmd_q);
2435 skb_queue_purge(&hdev->rx_q);
2436
2437 if (hdev->flush)
2438 hdev->flush(hdev);
2439
2440 if (hdev->sent_cmd) {
2441 kfree_skb(hdev->sent_cmd);
2442 hdev->sent_cmd = NULL;
2443 }
2444
2445 hdev->close(hdev);
fee746b0 2446 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2447 }
2448
2449done:
2450 hci_req_unlock(hdev);
1da177e4
LT
2451 return ret;
2452}
2453
cbed0ca1
JH
2454/* ---- HCI ioctl helpers ---- */
2455
2456int hci_dev_open(__u16 dev)
2457{
2458 struct hci_dev *hdev;
2459 int err;
2460
2461 hdev = hci_dev_get(dev);
2462 if (!hdev)
2463 return -ENODEV;
2464
4a964404 2465 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
2466 * up as user channel. Trying to bring them up as normal devices
2467 * will result into a failure. Only user channel operation is
2468 * possible.
2469 *
2470 * When this function is called for a user channel, the flag
2471 * HCI_USER_CHANNEL will be set first before attempting to
2472 * open the device.
2473 */
4a964404 2474 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
fee746b0
MH
2475 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2476 err = -EOPNOTSUPP;
2477 goto done;
2478 }
2479
e1d08f40
JH
2480 /* We need to ensure that no other power on/off work is pending
2481 * before proceeding to call hci_dev_do_open. This is
2482 * particularly important if the setup procedure has not yet
2483 * completed.
2484 */
2485 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2486 cancel_delayed_work(&hdev->power_off);
2487
a5c8f270
MH
2488 /* After this call it is guaranteed that the setup procedure
2489 * has finished. This means that error conditions like RFKILL
2490 * or no valid public or static random address apply.
2491 */
e1d08f40
JH
2492 flush_workqueue(hdev->req_workqueue);
2493
12aa4f0a 2494 /* For controllers not using the management interface and that
b6ae8457 2495 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
2496 * so that pairing works for them. Once the management interface
2497 * is in use this bit will be cleared again and userspace has
2498 * to explicitly enable it.
2499 */
2500 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2501 !test_bit(HCI_MGMT, &hdev->dev_flags))
b6ae8457 2502 set_bit(HCI_BONDABLE, &hdev->dev_flags);
12aa4f0a 2503
cbed0ca1
JH
2504 err = hci_dev_do_open(hdev);
2505
fee746b0 2506done:
cbed0ca1 2507 hci_dev_put(hdev);
cbed0ca1
JH
2508 return err;
2509}
2510
d7347f3c
JH
2511/* This function requires the caller holds hdev->lock */
2512static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2513{
2514 struct hci_conn_params *p;
2515
f161dd41
JH
2516 list_for_each_entry(p, &hdev->le_conn_params, list) {
2517 if (p->conn) {
2518 hci_conn_drop(p->conn);
f8aaf9b6 2519 hci_conn_put(p->conn);
f161dd41
JH
2520 p->conn = NULL;
2521 }
d7347f3c 2522 list_del_init(&p->action);
f161dd41 2523 }
d7347f3c
JH
2524
2525 BT_DBG("All LE pending actions cleared");
2526}
2527
1da177e4
LT
2528static int hci_dev_do_close(struct hci_dev *hdev)
2529{
2530 BT_DBG("%s %p", hdev->name, hdev);
2531
78c04c0b
VCG
2532 cancel_delayed_work(&hdev->power_off);
2533
1da177e4
LT
2534 hci_req_cancel(hdev, ENODEV);
2535 hci_req_lock(hdev);
2536
2537 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2538 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2539 hci_req_unlock(hdev);
2540 return 0;
2541 }
2542
3eff45ea
GP
2543 /* Flush RX and TX works */
2544 flush_work(&hdev->tx_work);
b78752cc 2545 flush_work(&hdev->rx_work);
1da177e4 2546
16ab91ab 2547 if (hdev->discov_timeout > 0) {
e0f9309f 2548 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2549 hdev->discov_timeout = 0;
5e5282bb 2550 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2551 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2552 }
2553
a8b2d5c2 2554 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2555 cancel_delayed_work(&hdev->service_cache);
2556
7ba8b4be 2557 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2558
2559 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2560 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2561
76727c02
JH
2562 /* Avoid potential lockdep warnings from the *_flush() calls by
2563 * ensuring the workqueue is empty up front.
2564 */
2565 drain_workqueue(hdev->workqueue);
2566
09fd0de5 2567 hci_dev_lock(hdev);
1f9b9a5d 2568 hci_inquiry_cache_flush(hdev);
d7347f3c 2569 hci_pend_le_actions_clear(hdev);
f161dd41 2570 hci_conn_hash_flush(hdev);
09fd0de5 2571 hci_dev_unlock(hdev);
1da177e4
LT
2572
2573 hci_notify(hdev, HCI_DEV_DOWN);
2574
2575 if (hdev->flush)
2576 hdev->flush(hdev);
2577
2578 /* Reset device */
2579 skb_queue_purge(&hdev->cmd_q);
2580 atomic_set(&hdev->cmd_cnt, 1);
4a964404
MH
2581 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2582 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
a6c511c6 2583 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2584 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2585 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2586 clear_bit(HCI_INIT, &hdev->flags);
2587 }
2588
c347b765
GP
2589 /* flush cmd work */
2590 flush_work(&hdev->cmd_work);
1da177e4
LT
2591
2592 /* Drop queues */
2593 skb_queue_purge(&hdev->rx_q);
2594 skb_queue_purge(&hdev->cmd_q);
2595 skb_queue_purge(&hdev->raw_q);
2596
2597 /* Drop last sent command */
2598 if (hdev->sent_cmd) {
65cc2b49 2599 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2600 kfree_skb(hdev->sent_cmd);
2601 hdev->sent_cmd = NULL;
2602 }
2603
b6ddb638
JH
2604 kfree_skb(hdev->recv_evt);
2605 hdev->recv_evt = NULL;
2606
1da177e4
LT
2607 /* After this point our queues are empty
2608 * and no tasks are scheduled. */
2609 hdev->close(hdev);
2610
35b973c9 2611 /* Clear flags */
fee746b0 2612 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2613 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2614
93c311a0
MH
2615 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2616 if (hdev->dev_type == HCI_BREDR) {
2617 hci_dev_lock(hdev);
2618 mgmt_powered(hdev, 0);
2619 hci_dev_unlock(hdev);
2620 }
8ee56540 2621 }
5add6af8 2622
ced5c338 2623 /* Controller radio is available but is currently powered down */
536619e8 2624 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2625
e59fda8d 2626 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2627 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2628 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2629
1da177e4
LT
2630 hci_req_unlock(hdev);
2631
2632 hci_dev_put(hdev);
2633 return 0;
2634}
2635
2636int hci_dev_close(__u16 dev)
2637{
2638 struct hci_dev *hdev;
2639 int err;
2640
70f23020
AE
2641 hdev = hci_dev_get(dev);
2642 if (!hdev)
1da177e4 2643 return -ENODEV;
8ee56540 2644
0736cfa8
MH
2645 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2646 err = -EBUSY;
2647 goto done;
2648 }
2649
8ee56540
MH
2650 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2651 cancel_delayed_work(&hdev->power_off);
2652
1da177e4 2653 err = hci_dev_do_close(hdev);
8ee56540 2654
0736cfa8 2655done:
1da177e4
LT
2656 hci_dev_put(hdev);
2657 return err;
2658}
2659
2660int hci_dev_reset(__u16 dev)
2661{
2662 struct hci_dev *hdev;
2663 int ret = 0;
2664
70f23020
AE
2665 hdev = hci_dev_get(dev);
2666 if (!hdev)
1da177e4
LT
2667 return -ENODEV;
2668
2669 hci_req_lock(hdev);
1da177e4 2670
808a049e
MH
2671 if (!test_bit(HCI_UP, &hdev->flags)) {
2672 ret = -ENETDOWN;
1da177e4 2673 goto done;
808a049e 2674 }
1da177e4 2675
0736cfa8
MH
2676 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2677 ret = -EBUSY;
2678 goto done;
2679 }
2680
4a964404 2681 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2682 ret = -EOPNOTSUPP;
2683 goto done;
2684 }
2685
1da177e4
LT
2686 /* Drop queues */
2687 skb_queue_purge(&hdev->rx_q);
2688 skb_queue_purge(&hdev->cmd_q);
2689
76727c02
JH
2690 /* Avoid potential lockdep warnings from the *_flush() calls by
2691 * ensuring the workqueue is empty up front.
2692 */
2693 drain_workqueue(hdev->workqueue);
2694
09fd0de5 2695 hci_dev_lock(hdev);
1f9b9a5d 2696 hci_inquiry_cache_flush(hdev);
1da177e4 2697 hci_conn_hash_flush(hdev);
09fd0de5 2698 hci_dev_unlock(hdev);
1da177e4
LT
2699
2700 if (hdev->flush)
2701 hdev->flush(hdev);
2702
8e87d142 2703 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2704 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2705
fee746b0 2706 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2707
2708done:
1da177e4
LT
2709 hci_req_unlock(hdev);
2710 hci_dev_put(hdev);
2711 return ret;
2712}
2713
2714int hci_dev_reset_stat(__u16 dev)
2715{
2716 struct hci_dev *hdev;
2717 int ret = 0;
2718
70f23020
AE
2719 hdev = hci_dev_get(dev);
2720 if (!hdev)
1da177e4
LT
2721 return -ENODEV;
2722
0736cfa8
MH
2723 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2724 ret = -EBUSY;
2725 goto done;
2726 }
2727
4a964404 2728 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2729 ret = -EOPNOTSUPP;
2730 goto done;
2731 }
2732
1da177e4
LT
2733 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2734
0736cfa8 2735done:
1da177e4 2736 hci_dev_put(hdev);
1da177e4
LT
2737 return ret;
2738}
2739
123abc08
JH
2740static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2741{
bc6d2d04 2742 bool conn_changed, discov_changed;
123abc08
JH
2743
2744 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2745
2746 if ((scan & SCAN_PAGE))
2747 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2748 &hdev->dev_flags);
2749 else
2750 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2751 &hdev->dev_flags);
2752
bc6d2d04
JH
2753 if ((scan & SCAN_INQUIRY)) {
2754 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2755 &hdev->dev_flags);
2756 } else {
2757 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2758 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2759 &hdev->dev_flags);
2760 }
2761
123abc08
JH
2762 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2763 return;
2764
bc6d2d04
JH
2765 if (conn_changed || discov_changed) {
2766 /* In case this was disabled through mgmt */
2767 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2768
2769 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2770 mgmt_update_adv_data(hdev);
2771
123abc08 2772 mgmt_new_settings(hdev);
bc6d2d04 2773 }
123abc08
JH
2774}
2775
1da177e4
LT
2776int hci_dev_cmd(unsigned int cmd, void __user *arg)
2777{
2778 struct hci_dev *hdev;
2779 struct hci_dev_req dr;
2780 int err = 0;
2781
2782 if (copy_from_user(&dr, arg, sizeof(dr)))
2783 return -EFAULT;
2784
70f23020
AE
2785 hdev = hci_dev_get(dr.dev_id);
2786 if (!hdev)
1da177e4
LT
2787 return -ENODEV;
2788
0736cfa8
MH
2789 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2790 err = -EBUSY;
2791 goto done;
2792 }
2793
4a964404 2794 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2795 err = -EOPNOTSUPP;
2796 goto done;
2797 }
2798
5b69bef5
MH
2799 if (hdev->dev_type != HCI_BREDR) {
2800 err = -EOPNOTSUPP;
2801 goto done;
2802 }
2803
56f87901
JH
2804 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2805 err = -EOPNOTSUPP;
2806 goto done;
2807 }
2808
1da177e4
LT
2809 switch (cmd) {
2810 case HCISETAUTH:
01178cd4
JH
2811 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2812 HCI_INIT_TIMEOUT);
1da177e4
LT
2813 break;
2814
2815 case HCISETENCRYPT:
2816 if (!lmp_encrypt_capable(hdev)) {
2817 err = -EOPNOTSUPP;
2818 break;
2819 }
2820
2821 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2822 /* Auth must be enabled first */
01178cd4
JH
2823 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2824 HCI_INIT_TIMEOUT);
1da177e4
LT
2825 if (err)
2826 break;
2827 }
2828
01178cd4
JH
2829 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2830 HCI_INIT_TIMEOUT);
1da177e4
LT
2831 break;
2832
2833 case HCISETSCAN:
01178cd4
JH
2834 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2835 HCI_INIT_TIMEOUT);
91a668b0 2836
bc6d2d04
JH
2837 /* Ensure that the connectable and discoverable states
2838 * get correctly modified as this was a non-mgmt change.
91a668b0 2839 */
123abc08
JH
2840 if (!err)
2841 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
2842 break;
2843
1da177e4 2844 case HCISETLINKPOL:
01178cd4
JH
2845 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2846 HCI_INIT_TIMEOUT);
1da177e4
LT
2847 break;
2848
2849 case HCISETLINKMODE:
e4e8e37c
MH
2850 hdev->link_mode = ((__u16) dr.dev_opt) &
2851 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2852 break;
2853
2854 case HCISETPTYPE:
2855 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2856 break;
2857
2858 case HCISETACLMTU:
e4e8e37c
MH
2859 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2860 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2861 break;
2862
2863 case HCISETSCOMTU:
e4e8e37c
MH
2864 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2865 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2866 break;
2867
2868 default:
2869 err = -EINVAL;
2870 break;
2871 }
e4e8e37c 2872
0736cfa8 2873done:
1da177e4
LT
2874 hci_dev_put(hdev);
2875 return err;
2876}
2877
2878int hci_get_dev_list(void __user *arg)
2879{
8035ded4 2880 struct hci_dev *hdev;
1da177e4
LT
2881 struct hci_dev_list_req *dl;
2882 struct hci_dev_req *dr;
1da177e4
LT
2883 int n = 0, size, err;
2884 __u16 dev_num;
2885
2886 if (get_user(dev_num, (__u16 __user *) arg))
2887 return -EFAULT;
2888
2889 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2890 return -EINVAL;
2891
2892 size = sizeof(*dl) + dev_num * sizeof(*dr);
2893
70f23020
AE
2894 dl = kzalloc(size, GFP_KERNEL);
2895 if (!dl)
1da177e4
LT
2896 return -ENOMEM;
2897
2898 dr = dl->dev_req;
2899
f20d09d5 2900 read_lock(&hci_dev_list_lock);
8035ded4 2901 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2902 unsigned long flags = hdev->flags;
c542a06c 2903
2e84d8db
MH
2904 /* When the auto-off is configured it means the transport
2905 * is running, but in that case still indicate that the
2906 * device is actually down.
2907 */
2908 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2909 flags &= ~BIT(HCI_UP);
c542a06c 2910
1da177e4 2911 (dr + n)->dev_id = hdev->id;
2e84d8db 2912 (dr + n)->dev_opt = flags;
c542a06c 2913
1da177e4
LT
2914 if (++n >= dev_num)
2915 break;
2916 }
f20d09d5 2917 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2918
2919 dl->dev_num = n;
2920 size = sizeof(*dl) + n * sizeof(*dr);
2921
2922 err = copy_to_user(arg, dl, size);
2923 kfree(dl);
2924
2925 return err ? -EFAULT : 0;
2926}
2927
2928int hci_get_dev_info(void __user *arg)
2929{
2930 struct hci_dev *hdev;
2931 struct hci_dev_info di;
2e84d8db 2932 unsigned long flags;
1da177e4
LT
2933 int err = 0;
2934
2935 if (copy_from_user(&di, arg, sizeof(di)))
2936 return -EFAULT;
2937
70f23020
AE
2938 hdev = hci_dev_get(di.dev_id);
2939 if (!hdev)
1da177e4
LT
2940 return -ENODEV;
2941
2e84d8db
MH
2942 /* When the auto-off is configured it means the transport
2943 * is running, but in that case still indicate that the
2944 * device is actually down.
2945 */
2946 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2947 flags = hdev->flags & ~BIT(HCI_UP);
2948 else
2949 flags = hdev->flags;
c542a06c 2950
1da177e4
LT
2951 strcpy(di.name, hdev->name);
2952 di.bdaddr = hdev->bdaddr;
60f2a3ed 2953 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2954 di.flags = flags;
1da177e4 2955 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2956 if (lmp_bredr_capable(hdev)) {
2957 di.acl_mtu = hdev->acl_mtu;
2958 di.acl_pkts = hdev->acl_pkts;
2959 di.sco_mtu = hdev->sco_mtu;
2960 di.sco_pkts = hdev->sco_pkts;
2961 } else {
2962 di.acl_mtu = hdev->le_mtu;
2963 di.acl_pkts = hdev->le_pkts;
2964 di.sco_mtu = 0;
2965 di.sco_pkts = 0;
2966 }
1da177e4
LT
2967 di.link_policy = hdev->link_policy;
2968 di.link_mode = hdev->link_mode;
2969
2970 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2971 memcpy(&di.features, &hdev->features, sizeof(di.features));
2972
2973 if (copy_to_user(arg, &di, sizeof(di)))
2974 err = -EFAULT;
2975
2976 hci_dev_put(hdev);
2977
2978 return err;
2979}
2980
2981/* ---- Interface to HCI drivers ---- */
2982
611b30f7
MH
2983static int hci_rfkill_set_block(void *data, bool blocked)
2984{
2985 struct hci_dev *hdev = data;
2986
2987 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2988
0736cfa8
MH
2989 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2990 return -EBUSY;
2991
5e130367
JH
2992 if (blocked) {
2993 set_bit(HCI_RFKILLED, &hdev->dev_flags);
d603b76b
MH
2994 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2995 !test_bit(HCI_CONFIG, &hdev->dev_flags))
bf543036 2996 hci_dev_do_close(hdev);
5e130367
JH
2997 } else {
2998 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2999 }
611b30f7
MH
3000
3001 return 0;
3002}
3003
3004static const struct rfkill_ops hci_rfkill_ops = {
3005 .set_block = hci_rfkill_set_block,
3006};
3007
ab81cbf9
JH
3008static void hci_power_on(struct work_struct *work)
3009{
3010 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 3011 int err;
ab81cbf9
JH
3012
3013 BT_DBG("%s", hdev->name);
3014
cbed0ca1 3015 err = hci_dev_do_open(hdev);
96570ffc
JH
3016 if (err < 0) {
3017 mgmt_set_powered_failed(hdev, err);
ab81cbf9 3018 return;
96570ffc 3019 }
ab81cbf9 3020
a5c8f270
MH
3021 /* During the HCI setup phase, a few error conditions are
3022 * ignored and they need to be checked now. If they are still
3023 * valid, it is important to turn the device back off.
3024 */
3025 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
4a964404 3026 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
a5c8f270
MH
3027 (hdev->dev_type == HCI_BREDR &&
3028 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3029 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
3030 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3031 hci_dev_do_close(hdev);
3032 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
3033 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3034 HCI_AUTO_OFF_TIMEOUT);
bf543036 3035 }
ab81cbf9 3036
fee746b0 3037 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
4a964404
MH
3038 /* For unconfigured devices, set the HCI_RAW flag
3039 * so that userspace can easily identify them.
4a964404
MH
3040 */
3041 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3042 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
3043
3044 /* For fully configured devices, this will send
3045 * the Index Added event. For unconfigured devices,
3046 * it will send Unconfigued Index Added event.
3047 *
3048 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3049 * and no event will be send.
3050 */
3051 mgmt_index_added(hdev);
d603b76b 3052 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
5ea234d3
MH
3053 /* When the controller is now configured, then it
3054 * is important to clear the HCI_RAW flag.
3055 */
3056 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3057 clear_bit(HCI_RAW, &hdev->flags);
3058
d603b76b
MH
3059 /* Powering on the controller with HCI_CONFIG set only
3060 * happens with the transition from unconfigured to
3061 * configured. This will send the Index Added event.
3062 */
744cf19e 3063 mgmt_index_added(hdev);
fee746b0 3064 }
ab81cbf9
JH
3065}
3066
3067static void hci_power_off(struct work_struct *work)
3068{
3243553f 3069 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 3070 power_off.work);
ab81cbf9
JH
3071
3072 BT_DBG("%s", hdev->name);
3073
8ee56540 3074 hci_dev_do_close(hdev);
ab81cbf9
JH
3075}
3076
16ab91ab
JH
3077static void hci_discov_off(struct work_struct *work)
3078{
3079 struct hci_dev *hdev;
16ab91ab
JH
3080
3081 hdev = container_of(work, struct hci_dev, discov_off.work);
3082
3083 BT_DBG("%s", hdev->name);
3084
d1967ff8 3085 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
3086}
3087
35f7498a 3088void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 3089{
4821002c 3090 struct bt_uuid *uuid, *tmp;
2aeb9a1a 3091
4821002c
JH
3092 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3093 list_del(&uuid->list);
2aeb9a1a
JH
3094 kfree(uuid);
3095 }
2aeb9a1a
JH
3096}
3097
35f7498a 3098void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 3099{
0378b597 3100 struct link_key *key;
55ed8ca1 3101
0378b597
JH
3102 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3103 list_del_rcu(&key->list);
3104 kfree_rcu(key, rcu);
55ed8ca1 3105 }
55ed8ca1
JH
3106}
3107
35f7498a 3108void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 3109{
970d0f1b 3110 struct smp_ltk *k;
b899efaf 3111
970d0f1b
JH
3112 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3113 list_del_rcu(&k->list);
3114 kfree_rcu(k, rcu);
b899efaf 3115 }
b899efaf
VCG
3116}
3117
970c4e46
JH
3118void hci_smp_irks_clear(struct hci_dev *hdev)
3119{
adae20cb 3120 struct smp_irk *k;
970c4e46 3121
adae20cb
JH
3122 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3123 list_del_rcu(&k->list);
3124 kfree_rcu(k, rcu);
970c4e46
JH
3125 }
3126}
3127
55ed8ca1
JH
3128struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3129{
8035ded4 3130 struct link_key *k;
55ed8ca1 3131
0378b597
JH
3132 rcu_read_lock();
3133 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3134 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3135 rcu_read_unlock();
55ed8ca1 3136 return k;
0378b597
JH
3137 }
3138 }
3139 rcu_read_unlock();
55ed8ca1
JH
3140
3141 return NULL;
3142}
3143
745c0ce3 3144static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 3145 u8 key_type, u8 old_key_type)
d25e28ab
JH
3146{
3147 /* Legacy key */
3148 if (key_type < 0x03)
745c0ce3 3149 return true;
d25e28ab
JH
3150
3151 /* Debug keys are insecure so don't store them persistently */
3152 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 3153 return false;
d25e28ab
JH
3154
3155 /* Changed combination key and there's no previous one */
3156 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 3157 return false;
d25e28ab
JH
3158
3159 /* Security mode 3 case */
3160 if (!conn)
745c0ce3 3161 return true;
d25e28ab
JH
3162
3163 /* Neither local nor remote side had no-bonding as requirement */
3164 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 3165 return true;
d25e28ab
JH
3166
3167 /* Local side had dedicated bonding as requirement */
3168 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 3169 return true;
d25e28ab
JH
3170
3171 /* Remote side had dedicated bonding as requirement */
3172 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 3173 return true;
d25e28ab
JH
3174
3175 /* If none of the above criteria match, then don't store the key
3176 * persistently */
745c0ce3 3177 return false;
d25e28ab
JH
3178}
3179
e804d25d 3180static u8 ltk_role(u8 type)
98a0b845 3181{
e804d25d
JH
3182 if (type == SMP_LTK)
3183 return HCI_ROLE_MASTER;
98a0b845 3184
e804d25d 3185 return HCI_ROLE_SLAVE;
98a0b845
JH
3186}
3187
c9839a11 3188struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
e804d25d 3189 u8 addr_type, u8 role)
75d262c2 3190{
c9839a11 3191 struct smp_ltk *k;
75d262c2 3192
970d0f1b
JH
3193 rcu_read_lock();
3194 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
3195 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3196 continue;
3197
3198 if (smp_ltk_is_sc(k)) {
3199 if (k->type == SMP_LTK_P256_DEBUG &&
3200 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
3201 continue;
3202 rcu_read_unlock();
3203 return k;
3204 }
3205
3206 if (ltk_role(k->type) == role) {
970d0f1b 3207 rcu_read_unlock();
75d262c2 3208 return k;
970d0f1b
JH
3209 }
3210 }
3211 rcu_read_unlock();
75d262c2
VCG
3212
3213 return NULL;
3214}
75d262c2 3215
970c4e46
JH
3216struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3217{
3218 struct smp_irk *irk;
3219
adae20cb
JH
3220 rcu_read_lock();
3221 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3222 if (!bacmp(&irk->rpa, rpa)) {
3223 rcu_read_unlock();
970c4e46 3224 return irk;
adae20cb 3225 }
970c4e46
JH
3226 }
3227
adae20cb 3228 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 3229 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 3230 bacpy(&irk->rpa, rpa);
adae20cb 3231 rcu_read_unlock();
970c4e46
JH
3232 return irk;
3233 }
3234 }
adae20cb 3235 rcu_read_unlock();
970c4e46
JH
3236
3237 return NULL;
3238}
3239
3240struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3241 u8 addr_type)
3242{
3243 struct smp_irk *irk;
3244
6cfc9988
JH
3245 /* Identity Address must be public or static random */
3246 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3247 return NULL;
3248
adae20cb
JH
3249 rcu_read_lock();
3250 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 3251 if (addr_type == irk->addr_type &&
adae20cb
JH
3252 bacmp(bdaddr, &irk->bdaddr) == 0) {
3253 rcu_read_unlock();
970c4e46 3254 return irk;
adae20cb 3255 }
970c4e46 3256 }
adae20cb 3257 rcu_read_unlock();
970c4e46
JH
3258
3259 return NULL;
3260}
3261
567fa2aa 3262struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
3263 bdaddr_t *bdaddr, u8 *val, u8 type,
3264 u8 pin_len, bool *persistent)
55ed8ca1
JH
3265{
3266 struct link_key *key, *old_key;
745c0ce3 3267 u8 old_key_type;
55ed8ca1
JH
3268
3269 old_key = hci_find_link_key(hdev, bdaddr);
3270 if (old_key) {
3271 old_key_type = old_key->type;
3272 key = old_key;
3273 } else {
12adcf3a 3274 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3275 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 3276 if (!key)
567fa2aa 3277 return NULL;
0378b597 3278 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
3279 }
3280
6ed93dc6 3281 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3282
d25e28ab
JH
3283 /* Some buggy controller combinations generate a changed
3284 * combination key for legacy pairing even when there's no
3285 * previous key */
3286 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3287 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3288 type = HCI_LK_COMBINATION;
655fe6ec
JH
3289 if (conn)
3290 conn->key_type = type;
3291 }
d25e28ab 3292
55ed8ca1 3293 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3294 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3295 key->pin_len = pin_len;
3296
b6020ba0 3297 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3298 key->type = old_key_type;
4748fed2
JH
3299 else
3300 key->type = type;
3301
7652ff6a
JH
3302 if (persistent)
3303 *persistent = hci_persistent_key(hdev, conn, type,
3304 old_key_type);
4df378a1 3305
567fa2aa 3306 return key;
55ed8ca1
JH
3307}
3308
ca9142b8 3309struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3310 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3311 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3312{
c9839a11 3313 struct smp_ltk *key, *old_key;
e804d25d 3314 u8 role = ltk_role(type);
75d262c2 3315
e804d25d 3316 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
c9839a11 3317 if (old_key)
75d262c2 3318 key = old_key;
c9839a11 3319 else {
0a14ab41 3320 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3321 if (!key)
ca9142b8 3322 return NULL;
970d0f1b 3323 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3324 }
3325
75d262c2 3326 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3327 key->bdaddr_type = addr_type;
3328 memcpy(key->val, tk, sizeof(key->val));
3329 key->authenticated = authenticated;
3330 key->ediv = ediv;
fe39c7b2 3331 key->rand = rand;
c9839a11
VCG
3332 key->enc_size = enc_size;
3333 key->type = type;
75d262c2 3334
ca9142b8 3335 return key;
75d262c2
VCG
3336}
3337
ca9142b8
JH
3338struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3339 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3340{
3341 struct smp_irk *irk;
3342
3343 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3344 if (!irk) {
3345 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3346 if (!irk)
ca9142b8 3347 return NULL;
970c4e46
JH
3348
3349 bacpy(&irk->bdaddr, bdaddr);
3350 irk->addr_type = addr_type;
3351
adae20cb 3352 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
3353 }
3354
3355 memcpy(irk->val, val, 16);
3356 bacpy(&irk->rpa, rpa);
3357
ca9142b8 3358 return irk;
970c4e46
JH
3359}
3360
55ed8ca1
JH
3361int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3362{
3363 struct link_key *key;
3364
3365 key = hci_find_link_key(hdev, bdaddr);
3366 if (!key)
3367 return -ENOENT;
3368
6ed93dc6 3369 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 3370
0378b597
JH
3371 list_del_rcu(&key->list);
3372 kfree_rcu(key, rcu);
55ed8ca1
JH
3373
3374 return 0;
3375}
3376
e0b2b27e 3377int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 3378{
970d0f1b 3379 struct smp_ltk *k;
c51ffa0b 3380 int removed = 0;
b899efaf 3381
970d0f1b 3382 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 3383 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3384 continue;
3385
6ed93dc6 3386 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 3387
970d0f1b
JH
3388 list_del_rcu(&k->list);
3389 kfree_rcu(k, rcu);
c51ffa0b 3390 removed++;
b899efaf
VCG
3391 }
3392
c51ffa0b 3393 return removed ? 0 : -ENOENT;
b899efaf
VCG
3394}
3395
a7ec7338
JH
3396void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3397{
adae20cb 3398 struct smp_irk *k;
a7ec7338 3399
adae20cb 3400 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3401 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3402 continue;
3403
3404 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3405
adae20cb
JH
3406 list_del_rcu(&k->list);
3407 kfree_rcu(k, rcu);
a7ec7338
JH
3408 }
3409}
3410
6bd32326 3411/* HCI command timer function */
65cc2b49 3412static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3413{
65cc2b49
MH
3414 struct hci_dev *hdev = container_of(work, struct hci_dev,
3415 cmd_timer.work);
6bd32326 3416
bda4f23a
AE
3417 if (hdev->sent_cmd) {
3418 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3419 u16 opcode = __le16_to_cpu(sent->opcode);
3420
3421 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3422 } else {
3423 BT_ERR("%s command tx timeout", hdev->name);
3424 }
3425
6bd32326 3426 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3427 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3428}
3429
2763eda6 3430struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3431 bdaddr_t *bdaddr)
2763eda6
SJ
3432{
3433 struct oob_data *data;
3434
3435 list_for_each_entry(data, &hdev->remote_oob_data, list)
3436 if (bacmp(bdaddr, &data->bdaddr) == 0)
3437 return data;
3438
3439 return NULL;
3440}
3441
3442int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3443{
3444 struct oob_data *data;
3445
3446 data = hci_find_remote_oob_data(hdev, bdaddr);
3447 if (!data)
3448 return -ENOENT;
3449
6ed93dc6 3450 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3451
3452 list_del(&data->list);
3453 kfree(data);
3454
3455 return 0;
3456}
3457
35f7498a 3458void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3459{
3460 struct oob_data *data, *n;
3461
3462 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3463 list_del(&data->list);
3464 kfree(data);
3465 }
2763eda6
SJ
3466}
3467
0798872e 3468int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
38da1703 3469 u8 *hash, u8 *rand)
2763eda6
SJ
3470{
3471 struct oob_data *data;
3472
3473 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3474 if (!data) {
0a14ab41 3475 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3476 if (!data)
3477 return -ENOMEM;
3478
3479 bacpy(&data->bdaddr, bdaddr);
3480 list_add(&data->list, &hdev->remote_oob_data);
3481 }
3482
519ca9d0 3483 memcpy(data->hash192, hash, sizeof(data->hash192));
38da1703 3484 memcpy(data->rand192, rand, sizeof(data->rand192));
2763eda6 3485
0798872e 3486 memset(data->hash256, 0, sizeof(data->hash256));
38da1703 3487 memset(data->rand256, 0, sizeof(data->rand256));
0798872e
MH
3488
3489 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3490
3491 return 0;
3492}
3493
3494int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
38da1703
JH
3495 u8 *hash192, u8 *rand192,
3496 u8 *hash256, u8 *rand256)
0798872e
MH
3497{
3498 struct oob_data *data;
3499
3500 data = hci_find_remote_oob_data(hdev, bdaddr);
3501 if (!data) {
0a14ab41 3502 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3503 if (!data)
3504 return -ENOMEM;
3505
3506 bacpy(&data->bdaddr, bdaddr);
3507 list_add(&data->list, &hdev->remote_oob_data);
3508 }
3509
3510 memcpy(data->hash192, hash192, sizeof(data->hash192));
38da1703 3511 memcpy(data->rand192, rand192, sizeof(data->rand192));
0798872e
MH
3512
3513 memcpy(data->hash256, hash256, sizeof(data->hash256));
38da1703 3514 memcpy(data->rand256, rand256, sizeof(data->rand256));
0798872e 3515
6ed93dc6 3516 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3517
3518 return 0;
3519}
3520
dcc36c16 3521struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 3522 bdaddr_t *bdaddr, u8 type)
b2a66aad 3523{
8035ded4 3524 struct bdaddr_list *b;
b2a66aad 3525
dcc36c16 3526 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 3527 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3528 return b;
b9ee0a78 3529 }
b2a66aad
AJ
3530
3531 return NULL;
3532}
3533
dcc36c16 3534void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
3535{
3536 struct list_head *p, *n;
3537
dcc36c16 3538 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 3539 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3540
3541 list_del(p);
3542 kfree(b);
3543 }
b2a66aad
AJ
3544}
3545
dcc36c16 3546int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3547{
3548 struct bdaddr_list *entry;
b2a66aad 3549
b9ee0a78 3550 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3551 return -EBADF;
3552
dcc36c16 3553 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 3554 return -EEXIST;
b2a66aad 3555
27f70f3e 3556 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
3557 if (!entry)
3558 return -ENOMEM;
b2a66aad
AJ
3559
3560 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3561 entry->bdaddr_type = type;
b2a66aad 3562
dcc36c16 3563 list_add(&entry->list, list);
b2a66aad 3564
2a8357f2 3565 return 0;
b2a66aad
AJ
3566}
3567
dcc36c16 3568int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3569{
3570 struct bdaddr_list *entry;
b2a66aad 3571
35f7498a 3572 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 3573 hci_bdaddr_list_clear(list);
35f7498a
JH
3574 return 0;
3575 }
b2a66aad 3576
dcc36c16 3577 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
3578 if (!entry)
3579 return -ENOENT;
3580
3581 list_del(&entry->list);
3582 kfree(entry);
3583
3584 return 0;
3585}
3586
15819a70
AG
3587/* This function requires the caller holds hdev->lock */
3588struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3589 bdaddr_t *addr, u8 addr_type)
3590{
3591 struct hci_conn_params *params;
3592
738f6185
JH
3593 /* The conn params list only contains identity addresses */
3594 if (!hci_is_identity_address(addr, addr_type))
3595 return NULL;
3596
15819a70
AG
3597 list_for_each_entry(params, &hdev->le_conn_params, list) {
3598 if (bacmp(&params->addr, addr) == 0 &&
3599 params->addr_type == addr_type) {
3600 return params;
3601 }
3602 }
3603
3604 return NULL;
3605}
3606
cef952ce
AG
3607static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3608{
3609 struct hci_conn *conn;
3610
3611 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3612 if (!conn)
3613 return false;
3614
3615 if (conn->dst_type != type)
3616 return false;
3617
3618 if (conn->state != BT_CONNECTED)
3619 return false;
3620
3621 return true;
3622}
3623
4b10966f 3624/* This function requires the caller holds hdev->lock */
501f8827
JH
3625struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3626 bdaddr_t *addr, u8 addr_type)
a9b0a04c 3627{
912b42ef 3628 struct hci_conn_params *param;
a9b0a04c 3629
738f6185
JH
3630 /* The list only contains identity addresses */
3631 if (!hci_is_identity_address(addr, addr_type))
3632 return NULL;
a9b0a04c 3633
501f8827 3634 list_for_each_entry(param, list, action) {
912b42ef
JH
3635 if (bacmp(&param->addr, addr) == 0 &&
3636 param->addr_type == addr_type)
3637 return param;
4b10966f
MH
3638 }
3639
3640 return NULL;
a9b0a04c
AG
3641}
3642
15819a70 3643/* This function requires the caller holds hdev->lock */
51d167c0
MH
3644struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3645 bdaddr_t *addr, u8 addr_type)
15819a70
AG
3646{
3647 struct hci_conn_params *params;
3648
c46245b3 3649 if (!hci_is_identity_address(addr, addr_type))
51d167c0 3650 return NULL;
a9b0a04c 3651
15819a70 3652 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 3653 if (params)
51d167c0 3654 return params;
15819a70
AG
3655
3656 params = kzalloc(sizeof(*params), GFP_KERNEL);
3657 if (!params) {
3658 BT_ERR("Out of memory");
51d167c0 3659 return NULL;
15819a70
AG
3660 }
3661
3662 bacpy(&params->addr, addr);
3663 params->addr_type = addr_type;
cef952ce
AG
3664
3665 list_add(&params->list, &hdev->le_conn_params);
93450c75 3666 INIT_LIST_HEAD(&params->action);
cef952ce 3667
bf5b3c8b
MH
3668 params->conn_min_interval = hdev->le_conn_min_interval;
3669 params->conn_max_interval = hdev->le_conn_max_interval;
3670 params->conn_latency = hdev->le_conn_latency;
3671 params->supervision_timeout = hdev->le_supv_timeout;
3672 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3673
3674 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3675
51d167c0 3676 return params;
bf5b3c8b
MH
3677}
3678
3679/* This function requires the caller holds hdev->lock */
3680int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
d06b50ce 3681 u8 auto_connect)
15819a70
AG
3682{
3683 struct hci_conn_params *params;
3684
8c87aae1
MH
3685 params = hci_conn_params_add(hdev, addr, addr_type);
3686 if (!params)
3687 return -EIO;
cef952ce 3688
42ce26de
JH
3689 if (params->auto_connect == auto_connect)
3690 return 0;
3691
95305baa 3692 list_del_init(&params->action);
15819a70 3693
cef952ce
AG
3694 switch (auto_connect) {
3695 case HCI_AUTO_CONN_DISABLED:
3696 case HCI_AUTO_CONN_LINK_LOSS:
95305baa 3697 hci_update_background_scan(hdev);
cef952ce 3698 break;
851efca8 3699 case HCI_AUTO_CONN_REPORT:
95305baa
JH
3700 list_add(&params->action, &hdev->pend_le_reports);
3701 hci_update_background_scan(hdev);
cef952ce 3702 break;
4b9e7e75 3703 case HCI_AUTO_CONN_DIRECT:
cef952ce 3704 case HCI_AUTO_CONN_ALWAYS:
95305baa
JH
3705 if (!is_connected(hdev, addr, addr_type)) {
3706 list_add(&params->action, &hdev->pend_le_conns);
3707 hci_update_background_scan(hdev);
3708 }
cef952ce
AG
3709 break;
3710 }
15819a70 3711
851efca8
JH
3712 params->auto_connect = auto_connect;
3713
d06b50ce
MH
3714 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3715 auto_connect);
a9b0a04c
AG
3716
3717 return 0;
15819a70
AG
3718}
3719
f6c63249 3720static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 3721{
f8aaf9b6 3722 if (params->conn) {
f161dd41 3723 hci_conn_drop(params->conn);
f8aaf9b6
JH
3724 hci_conn_put(params->conn);
3725 }
f161dd41 3726
95305baa 3727 list_del(&params->action);
15819a70
AG
3728 list_del(&params->list);
3729 kfree(params);
f6c63249
JH
3730}
3731
3732/* This function requires the caller holds hdev->lock */
3733void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3734{
3735 struct hci_conn_params *params;
3736
3737 params = hci_conn_params_lookup(hdev, addr, addr_type);
3738 if (!params)
3739 return;
3740
3741 hci_conn_params_free(params);
15819a70 3742
95305baa
JH
3743 hci_update_background_scan(hdev);
3744
15819a70
AG
3745 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3746}
3747
3748/* This function requires the caller holds hdev->lock */
55af49a8 3749void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3750{
3751 struct hci_conn_params *params, *tmp;
3752
3753 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3754 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3755 continue;
15819a70
AG
3756 list_del(&params->list);
3757 kfree(params);
3758 }
3759
55af49a8 3760 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3761}
3762
3763/* This function requires the caller holds hdev->lock */
373110c5 3764void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3765{
15819a70 3766 struct hci_conn_params *params, *tmp;
77a77a30 3767
f6c63249
JH
3768 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3769 hci_conn_params_free(params);
77a77a30 3770
a4790dbd 3771 hci_update_background_scan(hdev);
77a77a30 3772
15819a70 3773 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3774}
3775
4c87eaab 3776static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3777{
4c87eaab
AG
3778 if (status) {
3779 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3780
4c87eaab
AG
3781 hci_dev_lock(hdev);
3782 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3783 hci_dev_unlock(hdev);
3784 return;
3785 }
7ba8b4be
AG
3786}
3787
4c87eaab 3788static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3789{
4c87eaab
AG
3790 /* General inquiry access code (GIAC) */
3791 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3792 struct hci_request req;
3793 struct hci_cp_inquiry cp;
7ba8b4be
AG
3794 int err;
3795
4c87eaab
AG
3796 if (status) {
3797 BT_ERR("Failed to disable LE scanning: status %d", status);
3798 return;
3799 }
7ba8b4be 3800
4c87eaab
AG
3801 switch (hdev->discovery.type) {
3802 case DISCOV_TYPE_LE:
3803 hci_dev_lock(hdev);
3804 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3805 hci_dev_unlock(hdev);
3806 break;
7ba8b4be 3807
4c87eaab
AG
3808 case DISCOV_TYPE_INTERLEAVED:
3809 hci_req_init(&req, hdev);
7ba8b4be 3810
4c87eaab
AG
3811 memset(&cp, 0, sizeof(cp));
3812 memcpy(&cp.lap, lap, sizeof(cp.lap));
3813 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3814 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3815
4c87eaab 3816 hci_dev_lock(hdev);
7dbfac1d 3817
4c87eaab 3818 hci_inquiry_cache_flush(hdev);
7dbfac1d 3819
4c87eaab
AG
3820 err = hci_req_run(&req, inquiry_complete);
3821 if (err) {
3822 BT_ERR("Inquiry request failed: err %d", err);
3823 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3824 }
7dbfac1d 3825
4c87eaab
AG
3826 hci_dev_unlock(hdev);
3827 break;
7dbfac1d 3828 }
7dbfac1d
AG
3829}
3830
7ba8b4be
AG
3831static void le_scan_disable_work(struct work_struct *work)
3832{
3833 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3834 le_scan_disable.work);
4c87eaab
AG
3835 struct hci_request req;
3836 int err;
7ba8b4be
AG
3837
3838 BT_DBG("%s", hdev->name);
3839
4c87eaab 3840 hci_req_init(&req, hdev);
28b75a89 3841
b1efcc28 3842 hci_req_add_le_scan_disable(&req);
28b75a89 3843
4c87eaab
AG
3844 err = hci_req_run(&req, le_scan_disable_work_complete);
3845 if (err)
3846 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3847}
3848
8d97250e
JH
3849static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3850{
3851 struct hci_dev *hdev = req->hdev;
3852
3853 /* If we're advertising or initiating an LE connection we can't
3854 * go ahead and change the random address at this time. This is
3855 * because the eventual initiator address used for the
3856 * subsequently created connection will be undefined (some
3857 * controllers use the new address and others the one we had
3858 * when the operation started).
3859 *
3860 * In this kind of scenario skip the update and let the random
3861 * address be updated at the next cycle.
3862 */
5ce194c4 3863 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
8d97250e
JH
3864 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3865 BT_DBG("Deferring random address update");
9a783a13 3866 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
8d97250e
JH
3867 return;
3868 }
3869
3870 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3871}
3872
94b1fc92
MH
3873int hci_update_random_address(struct hci_request *req, bool require_privacy,
3874 u8 *own_addr_type)
ebd3a747
JH
3875{
3876 struct hci_dev *hdev = req->hdev;
3877 int err;
3878
3879 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3880 * current RPA has expired or there is something else than
3881 * the current RPA in use, then generate a new one.
ebd3a747
JH
3882 */
3883 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3884 int to;
3885
3886 *own_addr_type = ADDR_LE_DEV_RANDOM;
3887
3888 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3889 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3890 return 0;
3891
defce9e8 3892 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
ebd3a747
JH
3893 if (err < 0) {
3894 BT_ERR("%s failed to generate new RPA", hdev->name);
3895 return err;
3896 }
3897
8d97250e 3898 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3899
3900 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3901 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3902
3903 return 0;
94b1fc92
MH
3904 }
3905
3906 /* In case of required privacy without resolvable private address,
3907 * use an unresolvable private address. This is useful for active
3908 * scanning and non-connectable advertising.
3909 */
3910 if (require_privacy) {
3911 bdaddr_t urpa;
3912
3913 get_random_bytes(&urpa, 6);
3914 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3915
3916 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3917 set_random_addr(req, &urpa);
94b1fc92 3918 return 0;
ebd3a747
JH
3919 }
3920
3921 /* If forcing static address is in use or there is no public
3922 * address use the static address as random address (but skip
3923 * the HCI command if the current random address is already the
3924 * static one.
3925 */
111902f7 3926 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3927 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3928 *own_addr_type = ADDR_LE_DEV_RANDOM;
3929 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3930 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3931 &hdev->static_addr);
3932 return 0;
3933 }
3934
3935 /* Neither privacy nor static address is being used so use a
3936 * public address.
3937 */
3938 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3939
3940 return 0;
3941}
3942
a1f4c318
JH
3943/* Copy the Identity Address of the controller.
3944 *
3945 * If the controller has a public BD_ADDR, then by default use that one.
3946 * If this is a LE only controller without a public address, default to
3947 * the static random address.
3948 *
3949 * For debugging purposes it is possible to force controllers with a
3950 * public address to use the static random address instead.
3951 */
3952void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3953 u8 *bdaddr_type)
3954{
111902f7 3955 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3956 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3957 bacpy(bdaddr, &hdev->static_addr);
3958 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3959 } else {
3960 bacpy(bdaddr, &hdev->bdaddr);
3961 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3962 }
3963}
3964
9be0dab7
DH
3965/* Alloc HCI device */
3966struct hci_dev *hci_alloc_dev(void)
3967{
3968 struct hci_dev *hdev;
3969
27f70f3e 3970 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3971 if (!hdev)
3972 return NULL;
3973
b1b813d4
DH
3974 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3975 hdev->esco_type = (ESCO_HV1);
3976 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3977 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3978 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3979 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3980 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3981 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3982
b1b813d4
DH
3983 hdev->sniff_max_interval = 800;
3984 hdev->sniff_min_interval = 80;
3985
3f959d46 3986 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3987 hdev->le_adv_min_interval = 0x0800;
3988 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3989 hdev->le_scan_interval = 0x0060;
3990 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3991 hdev->le_conn_min_interval = 0x0028;
3992 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3993 hdev->le_conn_latency = 0x0000;
3994 hdev->le_supv_timeout = 0x002a;
bef64738 3995
d6bfd59c 3996 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3997 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3998 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3999 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 4000
b1b813d4
DH
4001 mutex_init(&hdev->lock);
4002 mutex_init(&hdev->req_lock);
4003
4004 INIT_LIST_HEAD(&hdev->mgmt_pending);
4005 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 4006 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
4007 INIT_LIST_HEAD(&hdev->uuids);
4008 INIT_LIST_HEAD(&hdev->link_keys);
4009 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 4010 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 4011 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 4012 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 4013 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 4014 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 4015 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 4016 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
4017
4018 INIT_WORK(&hdev->rx_work, hci_rx_work);
4019 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4020 INIT_WORK(&hdev->tx_work, hci_tx_work);
4021 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 4022
b1b813d4
DH
4023 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4024 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4025 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4026
b1b813d4
DH
4027 skb_queue_head_init(&hdev->rx_q);
4028 skb_queue_head_init(&hdev->cmd_q);
4029 skb_queue_head_init(&hdev->raw_q);
4030
4031 init_waitqueue_head(&hdev->req_wait_q);
4032
65cc2b49 4033 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 4034
b1b813d4
DH
4035 hci_init_sysfs(hdev);
4036 discovery_init(hdev);
9be0dab7
DH
4037
4038 return hdev;
4039}
4040EXPORT_SYMBOL(hci_alloc_dev);
4041
4042/* Free HCI device */
4043void hci_free_dev(struct hci_dev *hdev)
4044{
9be0dab7
DH
4045 /* will free via device release */
4046 put_device(&hdev->dev);
4047}
4048EXPORT_SYMBOL(hci_free_dev);
4049
1da177e4
LT
4050/* Register HCI device */
4051int hci_register_dev(struct hci_dev *hdev)
4052{
b1b813d4 4053 int id, error;
1da177e4 4054
74292d5a 4055 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
4056 return -EINVAL;
4057
08add513
MM
4058 /* Do not allow HCI_AMP devices to register at index 0,
4059 * so the index can be used as the AMP controller ID.
4060 */
3df92b31
SL
4061 switch (hdev->dev_type) {
4062 case HCI_BREDR:
4063 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4064 break;
4065 case HCI_AMP:
4066 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4067 break;
4068 default:
4069 return -EINVAL;
1da177e4 4070 }
8e87d142 4071
3df92b31
SL
4072 if (id < 0)
4073 return id;
4074
1da177e4
LT
4075 sprintf(hdev->name, "hci%d", id);
4076 hdev->id = id;
2d8b3a11
AE
4077
4078 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4079
d8537548
KC
4080 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4081 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
4082 if (!hdev->workqueue) {
4083 error = -ENOMEM;
4084 goto err;
4085 }
f48fd9c8 4086
d8537548
KC
4087 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4088 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
4089 if (!hdev->req_workqueue) {
4090 destroy_workqueue(hdev->workqueue);
4091 error = -ENOMEM;
4092 goto err;
4093 }
4094
0153e2ec
MH
4095 if (!IS_ERR_OR_NULL(bt_debugfs))
4096 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4097
bdc3e0f1
MH
4098 dev_set_name(&hdev->dev, "%s", hdev->name);
4099
4100 error = device_add(&hdev->dev);
33ca954d 4101 if (error < 0)
54506918 4102 goto err_wqueue;
1da177e4 4103
611b30f7 4104 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
4105 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4106 hdev);
611b30f7
MH
4107 if (hdev->rfkill) {
4108 if (rfkill_register(hdev->rfkill) < 0) {
4109 rfkill_destroy(hdev->rfkill);
4110 hdev->rfkill = NULL;
4111 }
4112 }
4113
5e130367
JH
4114 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4115 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4116
a8b2d5c2 4117 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 4118 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 4119
01cd3404 4120 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
4121 /* Assume BR/EDR support until proven otherwise (such as
4122 * through reading supported features during init.
4123 */
4124 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4125 }
ce2be9ac 4126
fcee3377
GP
4127 write_lock(&hci_dev_list_lock);
4128 list_add(&hdev->list, &hci_dev_list);
4129 write_unlock(&hci_dev_list_lock);
4130
4a964404
MH
4131 /* Devices that are marked for raw-only usage are unconfigured
4132 * and should not be included in normal operation.
fee746b0
MH
4133 */
4134 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4a964404 4135 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
fee746b0 4136
1da177e4 4137 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 4138 hci_dev_hold(hdev);
1da177e4 4139
19202573 4140 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 4141
1da177e4 4142 return id;
f48fd9c8 4143
33ca954d
DH
4144err_wqueue:
4145 destroy_workqueue(hdev->workqueue);
6ead1bbc 4146 destroy_workqueue(hdev->req_workqueue);
33ca954d 4147err:
3df92b31 4148 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 4149
33ca954d 4150 return error;
1da177e4
LT
4151}
4152EXPORT_SYMBOL(hci_register_dev);
4153
4154/* Unregister HCI device */
59735631 4155void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 4156{
3df92b31 4157 int i, id;
ef222013 4158
c13854ce 4159 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 4160
94324962
JH
4161 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4162
3df92b31
SL
4163 id = hdev->id;
4164
f20d09d5 4165 write_lock(&hci_dev_list_lock);
1da177e4 4166 list_del(&hdev->list);
f20d09d5 4167 write_unlock(&hci_dev_list_lock);
1da177e4
LT
4168
4169 hci_dev_do_close(hdev);
4170
cd4c5391 4171 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
4172 kfree_skb(hdev->reassembly[i]);
4173
b9b5ef18
GP
4174 cancel_work_sync(&hdev->power_on);
4175
ab81cbf9 4176 if (!test_bit(HCI_INIT, &hdev->flags) &&
d603b76b
MH
4177 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4178 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
09fd0de5 4179 hci_dev_lock(hdev);
744cf19e 4180 mgmt_index_removed(hdev);
09fd0de5 4181 hci_dev_unlock(hdev);
56e5cb86 4182 }
ab81cbf9 4183
2e58ef3e
JH
4184 /* mgmt_index_removed should take care of emptying the
4185 * pending list */
4186 BUG_ON(!list_empty(&hdev->mgmt_pending));
4187
1da177e4
LT
4188 hci_notify(hdev, HCI_DEV_UNREG);
4189
611b30f7
MH
4190 if (hdev->rfkill) {
4191 rfkill_unregister(hdev->rfkill);
4192 rfkill_destroy(hdev->rfkill);
4193 }
4194
711eafe3 4195 smp_unregister(hdev);
99780a7b 4196
bdc3e0f1 4197 device_del(&hdev->dev);
147e2d59 4198
0153e2ec
MH
4199 debugfs_remove_recursive(hdev->debugfs);
4200
f48fd9c8 4201 destroy_workqueue(hdev->workqueue);
6ead1bbc 4202 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4203
09fd0de5 4204 hci_dev_lock(hdev);
dcc36c16 4205 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 4206 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 4207 hci_uuids_clear(hdev);
55ed8ca1 4208 hci_link_keys_clear(hdev);
b899efaf 4209 hci_smp_ltks_clear(hdev);
970c4e46 4210 hci_smp_irks_clear(hdev);
2763eda6 4211 hci_remote_oob_data_clear(hdev);
dcc36c16 4212 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 4213 hci_conn_params_clear_all(hdev);
09fd0de5 4214 hci_dev_unlock(hdev);
e2e0cacb 4215
dc946bd8 4216 hci_dev_put(hdev);
3df92b31
SL
4217
4218 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4219}
4220EXPORT_SYMBOL(hci_unregister_dev);
4221
4222/* Suspend HCI device */
4223int hci_suspend_dev(struct hci_dev *hdev)
4224{
4225 hci_notify(hdev, HCI_DEV_SUSPEND);
4226 return 0;
4227}
4228EXPORT_SYMBOL(hci_suspend_dev);
4229
4230/* Resume HCI device */
4231int hci_resume_dev(struct hci_dev *hdev)
4232{
4233 hci_notify(hdev, HCI_DEV_RESUME);
4234 return 0;
4235}
4236EXPORT_SYMBOL(hci_resume_dev);
4237
75e0569f
MH
4238/* Reset HCI device */
4239int hci_reset_dev(struct hci_dev *hdev)
4240{
4241 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4242 struct sk_buff *skb;
4243
4244 skb = bt_skb_alloc(3, GFP_ATOMIC);
4245 if (!skb)
4246 return -ENOMEM;
4247
4248 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4249 memcpy(skb_put(skb, 3), hw_err, 3);
4250
4251 /* Send Hardware Error to upper stack */
4252 return hci_recv_frame(hdev, skb);
4253}
4254EXPORT_SYMBOL(hci_reset_dev);
4255
76bca880 4256/* Receive frame from HCI drivers */
e1a26170 4257int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4258{
76bca880 4259 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4260 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4261 kfree_skb(skb);
4262 return -ENXIO;
4263 }
4264
d82603c6 4265 /* Incoming skb */
76bca880
MH
4266 bt_cb(skb)->incoming = 1;
4267
4268 /* Time stamp */
4269 __net_timestamp(skb);
4270
76bca880 4271 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4272 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4273
76bca880
MH
4274 return 0;
4275}
4276EXPORT_SYMBOL(hci_recv_frame);
4277
33e882a5 4278static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4279 int count, __u8 index)
33e882a5
SS
4280{
4281 int len = 0;
4282 int hlen = 0;
4283 int remain = count;
4284 struct sk_buff *skb;
4285 struct bt_skb_cb *scb;
4286
4287 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4288 index >= NUM_REASSEMBLY)
33e882a5
SS
4289 return -EILSEQ;
4290
4291 skb = hdev->reassembly[index];
4292
4293 if (!skb) {
4294 switch (type) {
4295 case HCI_ACLDATA_PKT:
4296 len = HCI_MAX_FRAME_SIZE;
4297 hlen = HCI_ACL_HDR_SIZE;
4298 break;
4299 case HCI_EVENT_PKT:
4300 len = HCI_MAX_EVENT_SIZE;
4301 hlen = HCI_EVENT_HDR_SIZE;
4302 break;
4303 case HCI_SCODATA_PKT:
4304 len = HCI_MAX_SCO_SIZE;
4305 hlen = HCI_SCO_HDR_SIZE;
4306 break;
4307 }
4308
1e429f38 4309 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4310 if (!skb)
4311 return -ENOMEM;
4312
4313 scb = (void *) skb->cb;
4314 scb->expect = hlen;
4315 scb->pkt_type = type;
4316
33e882a5
SS
4317 hdev->reassembly[index] = skb;
4318 }
4319
4320 while (count) {
4321 scb = (void *) skb->cb;
89bb46d0 4322 len = min_t(uint, scb->expect, count);
33e882a5
SS
4323
4324 memcpy(skb_put(skb, len), data, len);
4325
4326 count -= len;
4327 data += len;
4328 scb->expect -= len;
4329 remain = count;
4330
4331 switch (type) {
4332 case HCI_EVENT_PKT:
4333 if (skb->len == HCI_EVENT_HDR_SIZE) {
4334 struct hci_event_hdr *h = hci_event_hdr(skb);
4335 scb->expect = h->plen;
4336
4337 if (skb_tailroom(skb) < scb->expect) {
4338 kfree_skb(skb);
4339 hdev->reassembly[index] = NULL;
4340 return -ENOMEM;
4341 }
4342 }
4343 break;
4344
4345 case HCI_ACLDATA_PKT:
4346 if (skb->len == HCI_ACL_HDR_SIZE) {
4347 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4348 scb->expect = __le16_to_cpu(h->dlen);
4349
4350 if (skb_tailroom(skb) < scb->expect) {
4351 kfree_skb(skb);
4352 hdev->reassembly[index] = NULL;
4353 return -ENOMEM;
4354 }
4355 }
4356 break;
4357
4358 case HCI_SCODATA_PKT:
4359 if (skb->len == HCI_SCO_HDR_SIZE) {
4360 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4361 scb->expect = h->dlen;
4362
4363 if (skb_tailroom(skb) < scb->expect) {
4364 kfree_skb(skb);
4365 hdev->reassembly[index] = NULL;
4366 return -ENOMEM;
4367 }
4368 }
4369 break;
4370 }
4371
4372 if (scb->expect == 0) {
4373 /* Complete frame */
4374
4375 bt_cb(skb)->pkt_type = type;
e1a26170 4376 hci_recv_frame(hdev, skb);
33e882a5
SS
4377
4378 hdev->reassembly[index] = NULL;
4379 return remain;
4380 }
4381 }
4382
4383 return remain;
4384}
4385
99811510
SS
4386#define STREAM_REASSEMBLY 0
4387
4388int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4389{
4390 int type;
4391 int rem = 0;
4392
da5f6c37 4393 while (count) {
99811510
SS
4394 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4395
4396 if (!skb) {
4397 struct { char type; } *pkt;
4398
4399 /* Start of the frame */
4400 pkt = data;
4401 type = pkt->type;
4402
4403 data++;
4404 count--;
4405 } else
4406 type = bt_cb(skb)->pkt_type;
4407
1e429f38 4408 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4409 STREAM_REASSEMBLY);
99811510
SS
4410 if (rem < 0)
4411 return rem;
4412
4413 data += (count - rem);
4414 count = rem;
f81c6224 4415 }
99811510
SS
4416
4417 return rem;
4418}
4419EXPORT_SYMBOL(hci_recv_stream_fragment);
4420
1da177e4
LT
4421/* ---- Interface to upper protocols ---- */
4422
1da177e4
LT
4423int hci_register_cb(struct hci_cb *cb)
4424{
4425 BT_DBG("%p name %s", cb, cb->name);
4426
f20d09d5 4427 write_lock(&hci_cb_list_lock);
1da177e4 4428 list_add(&cb->list, &hci_cb_list);
f20d09d5 4429 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4430
4431 return 0;
4432}
4433EXPORT_SYMBOL(hci_register_cb);
4434
4435int hci_unregister_cb(struct hci_cb *cb)
4436{
4437 BT_DBG("%p name %s", cb, cb->name);
4438
f20d09d5 4439 write_lock(&hci_cb_list_lock);
1da177e4 4440 list_del(&cb->list);
f20d09d5 4441 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4442
4443 return 0;
4444}
4445EXPORT_SYMBOL(hci_unregister_cb);
4446
51086991 4447static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4448{
cdc52faa
MH
4449 int err;
4450
0d48d939 4451 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4452
cd82e61c
MH
4453 /* Time stamp */
4454 __net_timestamp(skb);
1da177e4 4455
cd82e61c
MH
4456 /* Send copy to monitor */
4457 hci_send_to_monitor(hdev, skb);
4458
4459 if (atomic_read(&hdev->promisc)) {
4460 /* Send copy to the sockets */
470fe1b5 4461 hci_send_to_sock(hdev, skb);
1da177e4
LT
4462 }
4463
4464 /* Get rid of skb owner, prior to sending to the driver. */
4465 skb_orphan(skb);
4466
cdc52faa
MH
4467 err = hdev->send(hdev, skb);
4468 if (err < 0) {
4469 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4470 kfree_skb(skb);
4471 }
1da177e4
LT
4472}
4473
3119ae95
JH
4474void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4475{
4476 skb_queue_head_init(&req->cmd_q);
4477 req->hdev = hdev;
5d73e034 4478 req->err = 0;
3119ae95
JH
4479}
4480
4481int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4482{
4483 struct hci_dev *hdev = req->hdev;
4484 struct sk_buff *skb;
4485 unsigned long flags;
4486
4487 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4488
49c922bb 4489 /* If an error occurred during request building, remove all HCI
5d73e034
AG
4490 * commands queued on the HCI request queue.
4491 */
4492 if (req->err) {
4493 skb_queue_purge(&req->cmd_q);
4494 return req->err;
4495 }
4496
3119ae95
JH
4497 /* Do not allow empty requests */
4498 if (skb_queue_empty(&req->cmd_q))
382b0c39 4499 return -ENODATA;
3119ae95
JH
4500
4501 skb = skb_peek_tail(&req->cmd_q);
4502 bt_cb(skb)->req.complete = complete;
4503
4504 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4505 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4506 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4507
4508 queue_work(hdev->workqueue, &hdev->cmd_work);
4509
4510 return 0;
4511}
4512
899de765
MH
4513bool hci_req_pending(struct hci_dev *hdev)
4514{
4515 return (hdev->req_status == HCI_REQ_PEND);
4516}
4517
1ca3a9d0 4518static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4519 u32 plen, const void *param)
1da177e4
LT
4520{
4521 int len = HCI_COMMAND_HDR_SIZE + plen;
4522 struct hci_command_hdr *hdr;
4523 struct sk_buff *skb;
4524
1da177e4 4525 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4526 if (!skb)
4527 return NULL;
1da177e4
LT
4528
4529 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4530 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4531 hdr->plen = plen;
4532
4533 if (plen)
4534 memcpy(skb_put(skb, plen), param, plen);
4535
4536 BT_DBG("skb len %d", skb->len);
4537
0d48d939 4538 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
43e73e4e 4539 bt_cb(skb)->opcode = opcode;
c78ae283 4540
1ca3a9d0
JH
4541 return skb;
4542}
4543
4544/* Send HCI command */
07dc93dd
JH
4545int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4546 const void *param)
1ca3a9d0
JH
4547{
4548 struct sk_buff *skb;
4549
4550 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4551
4552 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4553 if (!skb) {
4554 BT_ERR("%s no memory for command", hdev->name);
4555 return -ENOMEM;
4556 }
4557
49c922bb 4558 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
4559 * single-command requests.
4560 */
4561 bt_cb(skb)->req.start = true;
4562
1da177e4 4563 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4564 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4565
4566 return 0;
4567}
1da177e4 4568
71c76a17 4569/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4570void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4571 const void *param, u8 event)
71c76a17
JH
4572{
4573 struct hci_dev *hdev = req->hdev;
4574 struct sk_buff *skb;
4575
4576 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4577
49c922bb 4578 /* If an error occurred during request building, there is no point in
34739c1e
AG
4579 * queueing the HCI command. We can simply return.
4580 */
4581 if (req->err)
4582 return;
4583
71c76a17
JH
4584 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4585 if (!skb) {
5d73e034
AG
4586 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4587 hdev->name, opcode);
4588 req->err = -ENOMEM;
e348fe6b 4589 return;
71c76a17
JH
4590 }
4591
4592 if (skb_queue_empty(&req->cmd_q))
4593 bt_cb(skb)->req.start = true;
4594
02350a72
JH
4595 bt_cb(skb)->req.event = event;
4596
71c76a17 4597 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4598}
4599
07dc93dd
JH
4600void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4601 const void *param)
02350a72
JH
4602{
4603 hci_req_add_ev(req, opcode, plen, param, 0);
4604}
4605
1da177e4 4606/* Get data from the previously sent command */
a9de9248 4607void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4608{
4609 struct hci_command_hdr *hdr;
4610
4611 if (!hdev->sent_cmd)
4612 return NULL;
4613
4614 hdr = (void *) hdev->sent_cmd->data;
4615
a9de9248 4616 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4617 return NULL;
4618
f0e09510 4619 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4620
4621 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4622}
4623
4624/* Send ACL data */
4625static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4626{
4627 struct hci_acl_hdr *hdr;
4628 int len = skb->len;
4629
badff6d0
ACM
4630 skb_push(skb, HCI_ACL_HDR_SIZE);
4631 skb_reset_transport_header(skb);
9c70220b 4632 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4633 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4634 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4635}
4636
ee22be7e 4637static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4638 struct sk_buff *skb, __u16 flags)
1da177e4 4639{
ee22be7e 4640 struct hci_conn *conn = chan->conn;
1da177e4
LT
4641 struct hci_dev *hdev = conn->hdev;
4642 struct sk_buff *list;
4643
087bfd99
GP
4644 skb->len = skb_headlen(skb);
4645 skb->data_len = 0;
4646
4647 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4648
4649 switch (hdev->dev_type) {
4650 case HCI_BREDR:
4651 hci_add_acl_hdr(skb, conn->handle, flags);
4652 break;
4653 case HCI_AMP:
4654 hci_add_acl_hdr(skb, chan->handle, flags);
4655 break;
4656 default:
4657 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4658 return;
4659 }
087bfd99 4660
70f23020
AE
4661 list = skb_shinfo(skb)->frag_list;
4662 if (!list) {
1da177e4
LT
4663 /* Non fragmented */
4664 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4665
73d80deb 4666 skb_queue_tail(queue, skb);
1da177e4
LT
4667 } else {
4668 /* Fragmented */
4669 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4670
4671 skb_shinfo(skb)->frag_list = NULL;
4672
9cfd5a23
JR
4673 /* Queue all fragments atomically. We need to use spin_lock_bh
4674 * here because of 6LoWPAN links, as there this function is
4675 * called from softirq and using normal spin lock could cause
4676 * deadlocks.
4677 */
4678 spin_lock_bh(&queue->lock);
1da177e4 4679
73d80deb 4680 __skb_queue_tail(queue, skb);
e702112f
AE
4681
4682 flags &= ~ACL_START;
4683 flags |= ACL_CONT;
1da177e4
LT
4684 do {
4685 skb = list; list = list->next;
8e87d142 4686
0d48d939 4687 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4688 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4689
4690 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4691
73d80deb 4692 __skb_queue_tail(queue, skb);
1da177e4
LT
4693 } while (list);
4694
9cfd5a23 4695 spin_unlock_bh(&queue->lock);
1da177e4 4696 }
73d80deb
LAD
4697}
4698
4699void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4700{
ee22be7e 4701 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4702
f0e09510 4703 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4704
ee22be7e 4705 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4706
3eff45ea 4707 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4708}
1da177e4
LT
4709
4710/* Send SCO data */
0d861d8b 4711void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4712{
4713 struct hci_dev *hdev = conn->hdev;
4714 struct hci_sco_hdr hdr;
4715
4716 BT_DBG("%s len %d", hdev->name, skb->len);
4717
aca3192c 4718 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4719 hdr.dlen = skb->len;
4720
badff6d0
ACM
4721 skb_push(skb, HCI_SCO_HDR_SIZE);
4722 skb_reset_transport_header(skb);
9c70220b 4723 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4724
0d48d939 4725 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4726
1da177e4 4727 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4728 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4729}
1da177e4
LT
4730
4731/* ---- HCI TX task (outgoing data) ---- */
4732
4733/* HCI Connection scheduler */
6039aa73
GP
4734static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4735 int *quote)
1da177e4
LT
4736{
4737 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4738 struct hci_conn *conn = NULL, *c;
abc5de8f 4739 unsigned int num = 0, min = ~0;
1da177e4 4740
8e87d142 4741 /* We don't have to lock device here. Connections are always
1da177e4 4742 * added and removed with TX task disabled. */
bf4c6325
GP
4743
4744 rcu_read_lock();
4745
4746 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4747 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4748 continue;
769be974
MH
4749
4750 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4751 continue;
4752
1da177e4
LT
4753 num++;
4754
4755 if (c->sent < min) {
4756 min = c->sent;
4757 conn = c;
4758 }
52087a79
LAD
4759
4760 if (hci_conn_num(hdev, type) == num)
4761 break;
1da177e4
LT
4762 }
4763
bf4c6325
GP
4764 rcu_read_unlock();
4765
1da177e4 4766 if (conn) {
6ed58ec5
VT
4767 int cnt, q;
4768
4769 switch (conn->type) {
4770 case ACL_LINK:
4771 cnt = hdev->acl_cnt;
4772 break;
4773 case SCO_LINK:
4774 case ESCO_LINK:
4775 cnt = hdev->sco_cnt;
4776 break;
4777 case LE_LINK:
4778 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4779 break;
4780 default:
4781 cnt = 0;
4782 BT_ERR("Unknown link type");
4783 }
4784
4785 q = cnt / num;
1da177e4
LT
4786 *quote = q ? q : 1;
4787 } else
4788 *quote = 0;
4789
4790 BT_DBG("conn %p quote %d", conn, *quote);
4791 return conn;
4792}
4793
6039aa73 4794static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4795{
4796 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4797 struct hci_conn *c;
1da177e4 4798
bae1f5d9 4799 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4800
bf4c6325
GP
4801 rcu_read_lock();
4802
1da177e4 4803 /* Kill stalled connections */
bf4c6325 4804 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4805 if (c->type == type && c->sent) {
6ed93dc6
AE
4806 BT_ERR("%s killing stalled connection %pMR",
4807 hdev->name, &c->dst);
bed71748 4808 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4809 }
4810 }
bf4c6325
GP
4811
4812 rcu_read_unlock();
1da177e4
LT
4813}
4814
6039aa73
GP
4815static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4816 int *quote)
1da177e4 4817{
73d80deb
LAD
4818 struct hci_conn_hash *h = &hdev->conn_hash;
4819 struct hci_chan *chan = NULL;
abc5de8f 4820 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4821 struct hci_conn *conn;
73d80deb
LAD
4822 int cnt, q, conn_num = 0;
4823
4824 BT_DBG("%s", hdev->name);
4825
bf4c6325
GP
4826 rcu_read_lock();
4827
4828 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4829 struct hci_chan *tmp;
4830
4831 if (conn->type != type)
4832 continue;
4833
4834 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4835 continue;
4836
4837 conn_num++;
4838
8192edef 4839 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4840 struct sk_buff *skb;
4841
4842 if (skb_queue_empty(&tmp->data_q))
4843 continue;
4844
4845 skb = skb_peek(&tmp->data_q);
4846 if (skb->priority < cur_prio)
4847 continue;
4848
4849 if (skb->priority > cur_prio) {
4850 num = 0;
4851 min = ~0;
4852 cur_prio = skb->priority;
4853 }
4854
4855 num++;
4856
4857 if (conn->sent < min) {
4858 min = conn->sent;
4859 chan = tmp;
4860 }
4861 }
4862
4863 if (hci_conn_num(hdev, type) == conn_num)
4864 break;
4865 }
4866
bf4c6325
GP
4867 rcu_read_unlock();
4868
73d80deb
LAD
4869 if (!chan)
4870 return NULL;
4871
4872 switch (chan->conn->type) {
4873 case ACL_LINK:
4874 cnt = hdev->acl_cnt;
4875 break;
bd1eb66b
AE
4876 case AMP_LINK:
4877 cnt = hdev->block_cnt;
4878 break;
73d80deb
LAD
4879 case SCO_LINK:
4880 case ESCO_LINK:
4881 cnt = hdev->sco_cnt;
4882 break;
4883 case LE_LINK:
4884 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4885 break;
4886 default:
4887 cnt = 0;
4888 BT_ERR("Unknown link type");
4889 }
4890
4891 q = cnt / num;
4892 *quote = q ? q : 1;
4893 BT_DBG("chan %p quote %d", chan, *quote);
4894 return chan;
4895}
4896
02b20f0b
LAD
4897static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4898{
4899 struct hci_conn_hash *h = &hdev->conn_hash;
4900 struct hci_conn *conn;
4901 int num = 0;
4902
4903 BT_DBG("%s", hdev->name);
4904
bf4c6325
GP
4905 rcu_read_lock();
4906
4907 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4908 struct hci_chan *chan;
4909
4910 if (conn->type != type)
4911 continue;
4912
4913 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4914 continue;
4915
4916 num++;
4917
8192edef 4918 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4919 struct sk_buff *skb;
4920
4921 if (chan->sent) {
4922 chan->sent = 0;
4923 continue;
4924 }
4925
4926 if (skb_queue_empty(&chan->data_q))
4927 continue;
4928
4929 skb = skb_peek(&chan->data_q);
4930 if (skb->priority >= HCI_PRIO_MAX - 1)
4931 continue;
4932
4933 skb->priority = HCI_PRIO_MAX - 1;
4934
4935 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4936 skb->priority);
02b20f0b
LAD
4937 }
4938
4939 if (hci_conn_num(hdev, type) == num)
4940 break;
4941 }
bf4c6325
GP
4942
4943 rcu_read_unlock();
4944
02b20f0b
LAD
4945}
4946
b71d385a
AE
4947static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4948{
4949 /* Calculate count of blocks used by this packet */
4950 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4951}
4952
6039aa73 4953static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4954{
4a964404 4955 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1da177e4
LT
4956 /* ACL tx timeout must be longer than maximum
4957 * link supervision timeout (40.9 seconds) */
63d2bc1b 4958 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4959 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4960 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4961 }
63d2bc1b 4962}
1da177e4 4963
6039aa73 4964static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4965{
4966 unsigned int cnt = hdev->acl_cnt;
4967 struct hci_chan *chan;
4968 struct sk_buff *skb;
4969 int quote;
4970
4971 __check_timeout(hdev, cnt);
04837f64 4972
73d80deb 4973 while (hdev->acl_cnt &&
a8c5fb1a 4974 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4975 u32 priority = (skb_peek(&chan->data_q))->priority;
4976 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4977 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4978 skb->len, skb->priority);
73d80deb 4979
ec1cce24
LAD
4980 /* Stop if priority has changed */
4981 if (skb->priority < priority)
4982 break;
4983
4984 skb = skb_dequeue(&chan->data_q);
4985
73d80deb 4986 hci_conn_enter_active_mode(chan->conn,
04124681 4987 bt_cb(skb)->force_active);
04837f64 4988
57d17d70 4989 hci_send_frame(hdev, skb);
1da177e4
LT
4990 hdev->acl_last_tx = jiffies;
4991
4992 hdev->acl_cnt--;
73d80deb
LAD
4993 chan->sent++;
4994 chan->conn->sent++;
1da177e4
LT
4995 }
4996 }
02b20f0b
LAD
4997
4998 if (cnt != hdev->acl_cnt)
4999 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
5000}
5001
6039aa73 5002static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 5003{
63d2bc1b 5004 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
5005 struct hci_chan *chan;
5006 struct sk_buff *skb;
5007 int quote;
bd1eb66b 5008 u8 type;
b71d385a 5009
63d2bc1b 5010 __check_timeout(hdev, cnt);
b71d385a 5011
bd1eb66b
AE
5012 BT_DBG("%s", hdev->name);
5013
5014 if (hdev->dev_type == HCI_AMP)
5015 type = AMP_LINK;
5016 else
5017 type = ACL_LINK;
5018
b71d385a 5019 while (hdev->block_cnt > 0 &&
bd1eb66b 5020 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
5021 u32 priority = (skb_peek(&chan->data_q))->priority;
5022 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5023 int blocks;
5024
5025 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5026 skb->len, skb->priority);
b71d385a
AE
5027
5028 /* Stop if priority has changed */
5029 if (skb->priority < priority)
5030 break;
5031
5032 skb = skb_dequeue(&chan->data_q);
5033
5034 blocks = __get_blocks(hdev, skb);
5035 if (blocks > hdev->block_cnt)
5036 return;
5037
5038 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 5039 bt_cb(skb)->force_active);
b71d385a 5040
57d17d70 5041 hci_send_frame(hdev, skb);
b71d385a
AE
5042 hdev->acl_last_tx = jiffies;
5043
5044 hdev->block_cnt -= blocks;
5045 quote -= blocks;
5046
5047 chan->sent += blocks;
5048 chan->conn->sent += blocks;
5049 }
5050 }
5051
5052 if (cnt != hdev->block_cnt)
bd1eb66b 5053 hci_prio_recalculate(hdev, type);
b71d385a
AE
5054}
5055
6039aa73 5056static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
5057{
5058 BT_DBG("%s", hdev->name);
5059
bd1eb66b
AE
5060 /* No ACL link over BR/EDR controller */
5061 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5062 return;
5063
5064 /* No AMP link over AMP controller */
5065 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
5066 return;
5067
5068 switch (hdev->flow_ctl_mode) {
5069 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5070 hci_sched_acl_pkt(hdev);
5071 break;
5072
5073 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5074 hci_sched_acl_blk(hdev);
5075 break;
5076 }
5077}
5078
1da177e4 5079/* Schedule SCO */
6039aa73 5080static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
5081{
5082 struct hci_conn *conn;
5083 struct sk_buff *skb;
5084 int quote;
5085
5086 BT_DBG("%s", hdev->name);
5087
52087a79
LAD
5088 if (!hci_conn_num(hdev, SCO_LINK))
5089 return;
5090
1da177e4
LT
5091 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5092 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5093 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5094 hci_send_frame(hdev, skb);
1da177e4
LT
5095
5096 conn->sent++;
5097 if (conn->sent == ~0)
5098 conn->sent = 0;
5099 }
5100 }
5101}
5102
6039aa73 5103static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
5104{
5105 struct hci_conn *conn;
5106 struct sk_buff *skb;
5107 int quote;
5108
5109 BT_DBG("%s", hdev->name);
5110
52087a79
LAD
5111 if (!hci_conn_num(hdev, ESCO_LINK))
5112 return;
5113
8fc9ced3
GP
5114 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5115 &quote))) {
b6a0dc82
MH
5116 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5117 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5118 hci_send_frame(hdev, skb);
b6a0dc82
MH
5119
5120 conn->sent++;
5121 if (conn->sent == ~0)
5122 conn->sent = 0;
5123 }
5124 }
5125}
5126
6039aa73 5127static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 5128{
73d80deb 5129 struct hci_chan *chan;
6ed58ec5 5130 struct sk_buff *skb;
02b20f0b 5131 int quote, cnt, tmp;
6ed58ec5
VT
5132
5133 BT_DBG("%s", hdev->name);
5134
52087a79
LAD
5135 if (!hci_conn_num(hdev, LE_LINK))
5136 return;
5137
4a964404 5138 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6ed58ec5
VT
5139 /* LE tx timeout must be longer than maximum
5140 * link supervision timeout (40.9 seconds) */
bae1f5d9 5141 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 5142 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 5143 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
5144 }
5145
5146 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 5147 tmp = cnt;
73d80deb 5148 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
5149 u32 priority = (skb_peek(&chan->data_q))->priority;
5150 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 5151 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5152 skb->len, skb->priority);
6ed58ec5 5153
ec1cce24
LAD
5154 /* Stop if priority has changed */
5155 if (skb->priority < priority)
5156 break;
5157
5158 skb = skb_dequeue(&chan->data_q);
5159
57d17d70 5160 hci_send_frame(hdev, skb);
6ed58ec5
VT
5161 hdev->le_last_tx = jiffies;
5162
5163 cnt--;
73d80deb
LAD
5164 chan->sent++;
5165 chan->conn->sent++;
6ed58ec5
VT
5166 }
5167 }
73d80deb 5168
6ed58ec5
VT
5169 if (hdev->le_pkts)
5170 hdev->le_cnt = cnt;
5171 else
5172 hdev->acl_cnt = cnt;
02b20f0b
LAD
5173
5174 if (cnt != tmp)
5175 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
5176}
5177
3eff45ea 5178static void hci_tx_work(struct work_struct *work)
1da177e4 5179{
3eff45ea 5180 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
5181 struct sk_buff *skb;
5182
6ed58ec5 5183 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 5184 hdev->sco_cnt, hdev->le_cnt);
1da177e4 5185
52de599e
MH
5186 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5187 /* Schedule queues and send stuff to HCI driver */
5188 hci_sched_acl(hdev);
5189 hci_sched_sco(hdev);
5190 hci_sched_esco(hdev);
5191 hci_sched_le(hdev);
5192 }
6ed58ec5 5193
1da177e4
LT
5194 /* Send next queued raw (unknown type) packet */
5195 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 5196 hci_send_frame(hdev, skb);
1da177e4
LT
5197}
5198
25985edc 5199/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5200
5201/* ACL data packet */
6039aa73 5202static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5203{
5204 struct hci_acl_hdr *hdr = (void *) skb->data;
5205 struct hci_conn *conn;
5206 __u16 handle, flags;
5207
5208 skb_pull(skb, HCI_ACL_HDR_SIZE);
5209
5210 handle = __le16_to_cpu(hdr->handle);
5211 flags = hci_flags(handle);
5212 handle = hci_handle(handle);
5213
f0e09510 5214 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5215 handle, flags);
1da177e4
LT
5216
5217 hdev->stat.acl_rx++;
5218
5219 hci_dev_lock(hdev);
5220 conn = hci_conn_hash_lookup_handle(hdev, handle);
5221 hci_dev_unlock(hdev);
8e87d142 5222
1da177e4 5223 if (conn) {
65983fc7 5224 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5225
1da177e4 5226 /* Send to upper protocol */
686ebf28
UF
5227 l2cap_recv_acldata(conn, skb, flags);
5228 return;
1da177e4 5229 } else {
8e87d142 5230 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5231 hdev->name, handle);
1da177e4
LT
5232 }
5233
5234 kfree_skb(skb);
5235}
5236
5237/* SCO data packet */
6039aa73 5238static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5239{
5240 struct hci_sco_hdr *hdr = (void *) skb->data;
5241 struct hci_conn *conn;
5242 __u16 handle;
5243
5244 skb_pull(skb, HCI_SCO_HDR_SIZE);
5245
5246 handle = __le16_to_cpu(hdr->handle);
5247
f0e09510 5248 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5249
5250 hdev->stat.sco_rx++;
5251
5252 hci_dev_lock(hdev);
5253 conn = hci_conn_hash_lookup_handle(hdev, handle);
5254 hci_dev_unlock(hdev);
5255
5256 if (conn) {
1da177e4 5257 /* Send to upper protocol */
686ebf28
UF
5258 sco_recv_scodata(conn, skb);
5259 return;
1da177e4 5260 } else {
8e87d142 5261 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5262 hdev->name, handle);
1da177e4
LT
5263 }
5264
5265 kfree_skb(skb);
5266}
5267
9238f36a
JH
5268static bool hci_req_is_complete(struct hci_dev *hdev)
5269{
5270 struct sk_buff *skb;
5271
5272 skb = skb_peek(&hdev->cmd_q);
5273 if (!skb)
5274 return true;
5275
5276 return bt_cb(skb)->req.start;
5277}
5278
42c6b129
JH
5279static void hci_resend_last(struct hci_dev *hdev)
5280{
5281 struct hci_command_hdr *sent;
5282 struct sk_buff *skb;
5283 u16 opcode;
5284
5285 if (!hdev->sent_cmd)
5286 return;
5287
5288 sent = (void *) hdev->sent_cmd->data;
5289 opcode = __le16_to_cpu(sent->opcode);
5290 if (opcode == HCI_OP_RESET)
5291 return;
5292
5293 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5294 if (!skb)
5295 return;
5296
5297 skb_queue_head(&hdev->cmd_q, skb);
5298 queue_work(hdev->workqueue, &hdev->cmd_work);
5299}
5300
9238f36a
JH
5301void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5302{
5303 hci_req_complete_t req_complete = NULL;
5304 struct sk_buff *skb;
5305 unsigned long flags;
5306
5307 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5308
42c6b129
JH
5309 /* If the completed command doesn't match the last one that was
5310 * sent we need to do special handling of it.
9238f36a 5311 */
42c6b129
JH
5312 if (!hci_sent_cmd_data(hdev, opcode)) {
5313 /* Some CSR based controllers generate a spontaneous
5314 * reset complete event during init and any pending
5315 * command will never be completed. In such a case we
5316 * need to resend whatever was the last sent
5317 * command.
5318 */
5319 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5320 hci_resend_last(hdev);
5321
9238f36a 5322 return;
42c6b129 5323 }
9238f36a
JH
5324
5325 /* If the command succeeded and there's still more commands in
5326 * this request the request is not yet complete.
5327 */
5328 if (!status && !hci_req_is_complete(hdev))
5329 return;
5330
5331 /* If this was the last command in a request the complete
5332 * callback would be found in hdev->sent_cmd instead of the
5333 * command queue (hdev->cmd_q).
5334 */
5335 if (hdev->sent_cmd) {
5336 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5337
5338 if (req_complete) {
5339 /* We must set the complete callback to NULL to
5340 * avoid calling the callback more than once if
5341 * this function gets called again.
5342 */
5343 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5344
9238f36a 5345 goto call_complete;
53e21fbc 5346 }
9238f36a
JH
5347 }
5348
5349 /* Remove all pending commands belonging to this request */
5350 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5351 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5352 if (bt_cb(skb)->req.start) {
5353 __skb_queue_head(&hdev->cmd_q, skb);
5354 break;
5355 }
5356
5357 req_complete = bt_cb(skb)->req.complete;
5358 kfree_skb(skb);
5359 }
5360 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5361
5362call_complete:
5363 if (req_complete)
5364 req_complete(hdev, status);
5365}
5366
b78752cc 5367static void hci_rx_work(struct work_struct *work)
1da177e4 5368{
b78752cc 5369 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5370 struct sk_buff *skb;
5371
5372 BT_DBG("%s", hdev->name);
5373
1da177e4 5374 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5375 /* Send copy to monitor */
5376 hci_send_to_monitor(hdev, skb);
5377
1da177e4
LT
5378 if (atomic_read(&hdev->promisc)) {
5379 /* Send copy to the sockets */
470fe1b5 5380 hci_send_to_sock(hdev, skb);
1da177e4
LT
5381 }
5382
fee746b0 5383 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5384 kfree_skb(skb);
5385 continue;
5386 }
5387
5388 if (test_bit(HCI_INIT, &hdev->flags)) {
5389 /* Don't process data packets in this states. */
0d48d939 5390 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5391 case HCI_ACLDATA_PKT:
5392 case HCI_SCODATA_PKT:
5393 kfree_skb(skb);
5394 continue;
3ff50b79 5395 }
1da177e4
LT
5396 }
5397
5398 /* Process frame */
0d48d939 5399 switch (bt_cb(skb)->pkt_type) {
1da177e4 5400 case HCI_EVENT_PKT:
b78752cc 5401 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5402 hci_event_packet(hdev, skb);
5403 break;
5404
5405 case HCI_ACLDATA_PKT:
5406 BT_DBG("%s ACL data packet", hdev->name);
5407 hci_acldata_packet(hdev, skb);
5408 break;
5409
5410 case HCI_SCODATA_PKT:
5411 BT_DBG("%s SCO data packet", hdev->name);
5412 hci_scodata_packet(hdev, skb);
5413 break;
5414
5415 default:
5416 kfree_skb(skb);
5417 break;
5418 }
5419 }
1da177e4
LT
5420}
5421
c347b765 5422static void hci_cmd_work(struct work_struct *work)
1da177e4 5423{
c347b765 5424 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5425 struct sk_buff *skb;
5426
2104786b
AE
5427 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5428 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5429
1da177e4 5430 /* Send queued commands */
5a08ecce
AE
5431 if (atomic_read(&hdev->cmd_cnt)) {
5432 skb = skb_dequeue(&hdev->cmd_q);
5433 if (!skb)
5434 return;
5435
7585b97a 5436 kfree_skb(hdev->sent_cmd);
1da177e4 5437
a675d7f1 5438 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5439 if (hdev->sent_cmd) {
1da177e4 5440 atomic_dec(&hdev->cmd_cnt);
57d17d70 5441 hci_send_frame(hdev, skb);
7bdb8a5c 5442 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5443 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5444 else
65cc2b49
MH
5445 schedule_delayed_work(&hdev->cmd_timer,
5446 HCI_CMD_TIMEOUT);
1da177e4
LT
5447 } else {
5448 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5449 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5450 }
5451 }
5452}
b1efcc28
AG
5453
5454void hci_req_add_le_scan_disable(struct hci_request *req)
5455{
5456 struct hci_cp_le_set_scan_enable cp;
5457
5458 memset(&cp, 0, sizeof(cp));
5459 cp.enable = LE_SCAN_DISABLE;
5460 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5461}
a4790dbd 5462
8540f6c0
MH
5463static void add_to_white_list(struct hci_request *req,
5464 struct hci_conn_params *params)
5465{
5466 struct hci_cp_le_add_to_white_list cp;
5467
5468 cp.bdaddr_type = params->addr_type;
5469 bacpy(&cp.bdaddr, &params->addr);
5470
5471 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5472}
5473
5474static u8 update_white_list(struct hci_request *req)
5475{
5476 struct hci_dev *hdev = req->hdev;
5477 struct hci_conn_params *params;
5478 struct bdaddr_list *b;
5479 uint8_t white_list_entries = 0;
5480
5481 /* Go through the current white list programmed into the
5482 * controller one by one and check if that address is still
5483 * in the list of pending connections or list of devices to
5484 * report. If not present in either list, then queue the
5485 * command to remove it from the controller.
5486 */
5487 list_for_each_entry(b, &hdev->le_white_list, list) {
5488 struct hci_cp_le_del_from_white_list cp;
5489
5490 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5491 &b->bdaddr, b->bdaddr_type) ||
5492 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5493 &b->bdaddr, b->bdaddr_type)) {
5494 white_list_entries++;
5495 continue;
5496 }
5497
5498 cp.bdaddr_type = b->bdaddr_type;
5499 bacpy(&cp.bdaddr, &b->bdaddr);
5500
5501 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5502 sizeof(cp), &cp);
5503 }
5504
5505 /* Since all no longer valid white list entries have been
5506 * removed, walk through the list of pending connections
5507 * and ensure that any new device gets programmed into
5508 * the controller.
5509 *
5510 * If the list of the devices is larger than the list of
5511 * available white list entries in the controller, then
5512 * just abort and return filer policy value to not use the
5513 * white list.
5514 */
5515 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5516 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5517 &params->addr, params->addr_type))
5518 continue;
5519
5520 if (white_list_entries >= hdev->le_white_list_size) {
5521 /* Select filter policy to accept all advertising */
5522 return 0x00;
5523 }
5524
66d8e837
MH
5525 if (hci_find_irk_by_addr(hdev, &params->addr,
5526 params->addr_type)) {
5527 /* White list can not be used with RPAs */
5528 return 0x00;
5529 }
5530
8540f6c0
MH
5531 white_list_entries++;
5532 add_to_white_list(req, params);
5533 }
5534
5535 /* After adding all new pending connections, walk through
5536 * the list of pending reports and also add these to the
5537 * white list if there is still space.
5538 */
5539 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5540 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5541 &params->addr, params->addr_type))
5542 continue;
5543
5544 if (white_list_entries >= hdev->le_white_list_size) {
5545 /* Select filter policy to accept all advertising */
5546 return 0x00;
5547 }
5548
66d8e837
MH
5549 if (hci_find_irk_by_addr(hdev, &params->addr,
5550 params->addr_type)) {
5551 /* White list can not be used with RPAs */
5552 return 0x00;
5553 }
5554
8540f6c0
MH
5555 white_list_entries++;
5556 add_to_white_list(req, params);
5557 }
5558
5559 /* Select filter policy to use white list */
5560 return 0x01;
5561}
5562
8ef30fd3
AG
5563void hci_req_add_le_passive_scan(struct hci_request *req)
5564{
5565 struct hci_cp_le_set_scan_param param_cp;
5566 struct hci_cp_le_set_scan_enable enable_cp;
5567 struct hci_dev *hdev = req->hdev;
5568 u8 own_addr_type;
8540f6c0 5569 u8 filter_policy;
8ef30fd3 5570
6ab535a7
MH
5571 /* Set require_privacy to false since no SCAN_REQ are send
5572 * during passive scanning. Not using an unresolvable address
5573 * here is important so that peer devices using direct
5574 * advertising with our address will be correctly reported
5575 * by the controller.
8ef30fd3 5576 */
6ab535a7 5577 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5578 return;
5579
8540f6c0
MH
5580 /* Adding or removing entries from the white list must
5581 * happen before enabling scanning. The controller does
5582 * not allow white list modification while scanning.
5583 */
5584 filter_policy = update_white_list(req);
5585
8ef30fd3
AG
5586 memset(&param_cp, 0, sizeof(param_cp));
5587 param_cp.type = LE_SCAN_PASSIVE;
5588 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5589 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5590 param_cp.own_address_type = own_addr_type;
8540f6c0 5591 param_cp.filter_policy = filter_policy;
8ef30fd3
AG
5592 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5593 &param_cp);
5594
5595 memset(&enable_cp, 0, sizeof(enable_cp));
5596 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5597 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5598 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5599 &enable_cp);
5600}
5601
a4790dbd
AG
5602static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5603{
5604 if (status)
5605 BT_DBG("HCI request failed to update background scanning: "
5606 "status 0x%2.2x", status);
5607}
5608
5609/* This function controls the background scanning based on hdev->pend_le_conns
5610 * list. If there are pending LE connection we start the background scanning,
5611 * otherwise we stop it.
5612 *
5613 * This function requires the caller holds hdev->lock.
5614 */
5615void hci_update_background_scan(struct hci_dev *hdev)
5616{
a4790dbd
AG
5617 struct hci_request req;
5618 struct hci_conn *conn;
5619 int err;
5620
c20c02d5
MH
5621 if (!test_bit(HCI_UP, &hdev->flags) ||
5622 test_bit(HCI_INIT, &hdev->flags) ||
5623 test_bit(HCI_SETUP, &hdev->dev_flags) ||
d603b76b 5624 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
b8221770 5625 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
c20c02d5 5626 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5627 return;
5628
a70f4b5f
JH
5629 /* No point in doing scanning if LE support hasn't been enabled */
5630 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5631 return;
5632
ae23ada4
JH
5633 /* If discovery is active don't interfere with it */
5634 if (hdev->discovery.state != DISCOVERY_STOPPED)
5635 return;
5636
a4790dbd
AG
5637 hci_req_init(&req, hdev);
5638
d1d588c1 5639 if (list_empty(&hdev->pend_le_conns) &&
66f8455a 5640 list_empty(&hdev->pend_le_reports)) {
0d2bf134
JH
5641 /* If there is no pending LE connections or devices
5642 * to be scanned for, we should stop the background
5643 * scanning.
a4790dbd
AG
5644 */
5645
5646 /* If controller is not scanning we are done. */
5647 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5648 return;
5649
5650 hci_req_add_le_scan_disable(&req);
5651
5652 BT_DBG("%s stopping background scanning", hdev->name);
5653 } else {
a4790dbd
AG
5654 /* If there is at least one pending LE connection, we should
5655 * keep the background scan running.
5656 */
5657
a4790dbd
AG
5658 /* If controller is connecting, we should not start scanning
5659 * since some controllers are not able to scan and connect at
5660 * the same time.
5661 */
5662 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5663 if (conn)
5664 return;
5665
4340a124
AG
5666 /* If controller is currently scanning, we stop it to ensure we
5667 * don't miss any advertising (due to duplicates filter).
5668 */
5669 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5670 hci_req_add_le_scan_disable(&req);
5671
8ef30fd3 5672 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5673
5674 BT_DBG("%s starting background scanning", hdev->name);
5675 }
5676
5677 err = hci_req_run(&req, update_background_scan_complete);
5678 if (err)
5679 BT_ERR("Failed to run HCI request: err %d", err);
5680}
432df05e 5681
22f433dc
JH
5682static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5683{
5684 struct bdaddr_list *b;
5685
5686 list_for_each_entry(b, &hdev->whitelist, list) {
5687 struct hci_conn *conn;
5688
5689 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5690 if (!conn)
5691 return true;
5692
5693 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5694 return true;
5695 }
5696
5697 return false;
5698}
5699
432df05e
JH
5700void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5701{
5702 u8 scan;
5703
5704 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5705 return;
5706
5707 if (!hdev_is_powered(hdev))
5708 return;
5709
5710 if (mgmt_powering_down(hdev))
5711 return;
5712
5713 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
22f433dc 5714 disconnected_whitelist_entries(hdev))
432df05e
JH
5715 scan = SCAN_PAGE;
5716 else
5717 scan = SCAN_DISABLED;
5718
5719 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5720 return;
5721
5722 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5723 scan |= SCAN_INQUIRY;
5724
5725 if (req)
5726 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5727 else
5728 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5729}