Bluetooth: Update hci_add_link_key() to return pointer to key
[linux-block.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
1da177e4 38
970c4e46
JH
39#include "smp.h"
40
b78752cc 41static void hci_rx_work(struct work_struct *work);
c347b765 42static void hci_cmd_work(struct work_struct *work);
3eff45ea 43static void hci_tx_work(struct work_struct *work);
1da177e4 44
1da177e4
LT
45/* HCI device list */
46LIST_HEAD(hci_dev_list);
47DEFINE_RWLOCK(hci_dev_list_lock);
48
49/* HCI callback list */
50LIST_HEAD(hci_cb_list);
51DEFINE_RWLOCK(hci_cb_list_lock);
52
3df92b31
SL
53/* HCI ID Numbering */
54static DEFINE_IDA(hci_index_ida);
55
1da177e4
LT
56/* ---- HCI notifications ---- */
57
6516455d 58static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 59{
040030ef 60 hci_sock_dev_event(hdev, event);
1da177e4
LT
61}
62
baf27f6e
MH
63/* ---- HCI debugfs entries ---- */
64
4b4148e9
MH
65static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
111902f7 71 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
72 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75}
76
77static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 char buf[32];
83 size_t buf_size = min(count, (sizeof(buf)-1));
84 bool enable;
85 int err;
86
87 if (!test_bit(HCI_UP, &hdev->flags))
88 return -ENETDOWN;
89
90 if (copy_from_user(buf, user_buf, buf_size))
91 return -EFAULT;
92
93 buf[buf_size] = '\0';
94 if (strtobool(buf, &enable))
95 return -EINVAL;
96
111902f7 97 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
98 return -EALREADY;
99
100 hci_req_lock(hdev);
101 if (enable)
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 else
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 HCI_CMD_TIMEOUT);
107 hci_req_unlock(hdev);
108
109 if (IS_ERR(skb))
110 return PTR_ERR(skb);
111
112 err = -bt_to_errno(skb->data[0]);
113 kfree_skb(skb);
114
115 if (err < 0)
116 return err;
117
111902f7 118 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
dfb826a8
MH
130static int features_show(struct seq_file *f, void *ptr)
131{
132 struct hci_dev *hdev = f->private;
133 u8 p;
134
135 hci_dev_lock(hdev);
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
143 }
cfbb2b5b
MH
144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
151 hci_dev_unlock(hdev);
152
153 return 0;
154}
155
156static int features_open(struct inode *inode, struct file *file)
157{
158 return single_open(file, features_show, inode->i_private);
159}
160
161static const struct file_operations features_fops = {
162 .open = features_open,
163 .read = seq_read,
164 .llseek = seq_lseek,
165 .release = single_release,
166};
167
70afe0b8
MH
168static int blacklist_show(struct seq_file *f, void *p)
169{
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
172
173 hci_dev_lock(hdev);
174 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
176 hci_dev_unlock(hdev);
177
178 return 0;
179}
180
181static int blacklist_open(struct inode *inode, struct file *file)
182{
183 return single_open(file, blacklist_show, inode->i_private);
184}
185
186static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
191};
192
47219839
MH
193static int uuids_show(struct seq_file *f, void *p)
194{
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
197
198 hci_dev_lock(hdev);
199 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
200 u8 i, val[16];
201
202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
205 */
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
208
209 seq_printf(f, "%pUb\n", val);
47219839
MH
210 }
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int uuids_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, uuids_show, inode->i_private);
219}
220
221static const struct file_operations uuids_fops = {
222 .open = uuids_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
baf27f6e
MH
228static int inquiry_cache_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
233
234 hci_dev_lock(hdev);
235
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 &data->bdaddr,
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
245 }
246
247 hci_dev_unlock(hdev);
248
249 return 0;
250}
251
252static int inquiry_cache_open(struct inode *inode, struct file *file)
253{
254 return single_open(file, inquiry_cache_show, inode->i_private);
255}
256
257static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
259 .read = seq_read,
260 .llseek = seq_lseek,
261 .release = single_release,
262};
263
02d08d15
MH
264static int link_keys_show(struct seq_file *f, void *ptr)
265{
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
268
269 hci_dev_lock(hdev);
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 }
275 hci_dev_unlock(hdev);
276
277 return 0;
278}
279
280static int link_keys_open(struct inode *inode, struct file *file)
281{
282 return single_open(file, link_keys_show, inode->i_private);
283}
284
285static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
290};
291
babdbb3c
MH
292static int dev_class_show(struct seq_file *f, void *ptr)
293{
294 struct hci_dev *hdev = f->private;
295
296 hci_dev_lock(hdev);
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
300
301 return 0;
302}
303
304static int dev_class_open(struct inode *inode, struct file *file)
305{
306 return single_open(file, dev_class_show, inode->i_private);
307}
308
309static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
311 .read = seq_read,
312 .llseek = seq_lseek,
313 .release = single_release,
314};
315
041000b9
MH
316static int voice_setting_get(void *data, u64 *val)
317{
318 struct hci_dev *hdev = data;
319
320 hci_dev_lock(hdev);
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
323
324 return 0;
325}
326
327DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
329
ebd1e33b
MH
330static int auto_accept_delay_set(void *data, u64 val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341static int auto_accept_delay_get(void *data, u64 *val)
342{
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
348
349 return 0;
350}
351
352DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
354
06f5b778
MH
355static int ssp_debug_mode_set(void *data, u64 val)
356{
357 struct hci_dev *hdev = data;
358 struct sk_buff *skb;
359 __u8 mode;
360 int err;
361
362 if (val != 0 && val != 1)
363 return -EINVAL;
364
365 if (!test_bit(HCI_UP, &hdev->flags))
366 return -ENETDOWN;
367
368 hci_req_lock(hdev);
369 mode = val;
370 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
371 &mode, HCI_CMD_TIMEOUT);
372 hci_req_unlock(hdev);
373
374 if (IS_ERR(skb))
375 return PTR_ERR(skb);
376
377 err = -bt_to_errno(skb->data[0]);
378 kfree_skb(skb);
379
380 if (err < 0)
381 return err;
382
383 hci_dev_lock(hdev);
384 hdev->ssp_debug_mode = val;
385 hci_dev_unlock(hdev);
386
387 return 0;
388}
389
390static int ssp_debug_mode_get(void *data, u64 *val)
391{
392 struct hci_dev *hdev = data;
393
394 hci_dev_lock(hdev);
395 *val = hdev->ssp_debug_mode;
396 hci_dev_unlock(hdev);
397
398 return 0;
399}
400
401DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
402 ssp_debug_mode_set, "%llu\n");
403
5afeac14
MH
404static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
405 size_t count, loff_t *ppos)
406{
407 struct hci_dev *hdev = file->private_data;
408 char buf[3];
409
111902f7 410 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
411 buf[1] = '\n';
412 buf[2] = '\0';
413 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
414}
415
416static ssize_t force_sc_support_write(struct file *file,
417 const char __user *user_buf,
418 size_t count, loff_t *ppos)
419{
420 struct hci_dev *hdev = file->private_data;
421 char buf[32];
422 size_t buf_size = min(count, (sizeof(buf)-1));
423 bool enable;
424
425 if (test_bit(HCI_UP, &hdev->flags))
426 return -EBUSY;
427
428 if (copy_from_user(buf, user_buf, buf_size))
429 return -EFAULT;
430
431 buf[buf_size] = '\0';
432 if (strtobool(buf, &enable))
433 return -EINVAL;
434
111902f7 435 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
436 return -EALREADY;
437
111902f7 438 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
439
440 return count;
441}
442
443static const struct file_operations force_sc_support_fops = {
444 .open = simple_open,
445 .read = force_sc_support_read,
446 .write = force_sc_support_write,
447 .llseek = default_llseek,
448};
449
134c2a89
MH
450static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
451 size_t count, loff_t *ppos)
452{
453 struct hci_dev *hdev = file->private_data;
454 char buf[3];
455
456 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
457 buf[1] = '\n';
458 buf[2] = '\0';
459 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
460}
461
462static const struct file_operations sc_only_mode_fops = {
463 .open = simple_open,
464 .read = sc_only_mode_read,
465 .llseek = default_llseek,
466};
467
2bfa3531
MH
468static int idle_timeout_set(void *data, u64 val)
469{
470 struct hci_dev *hdev = data;
471
472 if (val != 0 && (val < 500 || val > 3600000))
473 return -EINVAL;
474
475 hci_dev_lock(hdev);
2be48b65 476 hdev->idle_timeout = val;
2bfa3531
MH
477 hci_dev_unlock(hdev);
478
479 return 0;
480}
481
482static int idle_timeout_get(void *data, u64 *val)
483{
484 struct hci_dev *hdev = data;
485
486 hci_dev_lock(hdev);
487 *val = hdev->idle_timeout;
488 hci_dev_unlock(hdev);
489
490 return 0;
491}
492
493DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
494 idle_timeout_set, "%llu\n");
495
c982b2ea
JH
496static int rpa_timeout_set(void *data, u64 val)
497{
498 struct hci_dev *hdev = data;
499
500 /* Require the RPA timeout to be at least 30 seconds and at most
501 * 24 hours.
502 */
503 if (val < 30 || val > (60 * 60 * 24))
504 return -EINVAL;
505
506 hci_dev_lock(hdev);
507 hdev->rpa_timeout = val;
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513static int rpa_timeout_get(void *data, u64 *val)
514{
515 struct hci_dev *hdev = data;
516
517 hci_dev_lock(hdev);
518 *val = hdev->rpa_timeout;
519 hci_dev_unlock(hdev);
520
521 return 0;
522}
523
524DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
525 rpa_timeout_set, "%llu\n");
526
2bfa3531
MH
527static int sniff_min_interval_set(void *data, u64 val)
528{
529 struct hci_dev *hdev = data;
530
531 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
532 return -EINVAL;
533
534 hci_dev_lock(hdev);
2be48b65 535 hdev->sniff_min_interval = val;
2bfa3531
MH
536 hci_dev_unlock(hdev);
537
538 return 0;
539}
540
541static int sniff_min_interval_get(void *data, u64 *val)
542{
543 struct hci_dev *hdev = data;
544
545 hci_dev_lock(hdev);
546 *val = hdev->sniff_min_interval;
547 hci_dev_unlock(hdev);
548
549 return 0;
550}
551
552DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
553 sniff_min_interval_set, "%llu\n");
554
555static int sniff_max_interval_set(void *data, u64 val)
556{
557 struct hci_dev *hdev = data;
558
559 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
560 return -EINVAL;
561
562 hci_dev_lock(hdev);
2be48b65 563 hdev->sniff_max_interval = val;
2bfa3531
MH
564 hci_dev_unlock(hdev);
565
566 return 0;
567}
568
569static int sniff_max_interval_get(void *data, u64 *val)
570{
571 struct hci_dev *hdev = data;
572
573 hci_dev_lock(hdev);
574 *val = hdev->sniff_max_interval;
575 hci_dev_unlock(hdev);
576
577 return 0;
578}
579
580DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
581 sniff_max_interval_set, "%llu\n");
582
31ad1691
AK
583static int conn_info_min_age_set(void *data, u64 val)
584{
585 struct hci_dev *hdev = data;
586
587 if (val == 0 || val > hdev->conn_info_max_age)
588 return -EINVAL;
589
590 hci_dev_lock(hdev);
591 hdev->conn_info_min_age = val;
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597static int conn_info_min_age_get(void *data, u64 *val)
598{
599 struct hci_dev *hdev = data;
600
601 hci_dev_lock(hdev);
602 *val = hdev->conn_info_min_age;
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
609 conn_info_min_age_set, "%llu\n");
610
611static int conn_info_max_age_set(void *data, u64 val)
612{
613 struct hci_dev *hdev = data;
614
615 if (val == 0 || val < hdev->conn_info_min_age)
616 return -EINVAL;
617
618 hci_dev_lock(hdev);
619 hdev->conn_info_max_age = val;
620 hci_dev_unlock(hdev);
621
622 return 0;
623}
624
625static int conn_info_max_age_get(void *data, u64 *val)
626{
627 struct hci_dev *hdev = data;
628
629 hci_dev_lock(hdev);
630 *val = hdev->conn_info_max_age;
631 hci_dev_unlock(hdev);
632
633 return 0;
634}
635
636DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
637 conn_info_max_age_set, "%llu\n");
638
ac345813
MH
639static int identity_show(struct seq_file *f, void *p)
640{
641 struct hci_dev *hdev = f->private;
a1f4c318 642 bdaddr_t addr;
ac345813
MH
643 u8 addr_type;
644
645 hci_dev_lock(hdev);
646
a1f4c318 647 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 648
a1f4c318 649 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 650 16, hdev->irk, &hdev->rpa);
ac345813
MH
651
652 hci_dev_unlock(hdev);
653
654 return 0;
655}
656
657static int identity_open(struct inode *inode, struct file *file)
658{
659 return single_open(file, identity_show, inode->i_private);
660}
661
662static const struct file_operations identity_fops = {
663 .open = identity_open,
664 .read = seq_read,
665 .llseek = seq_lseek,
666 .release = single_release,
667};
668
7a4cd51d
MH
669static int random_address_show(struct seq_file *f, void *p)
670{
671 struct hci_dev *hdev = f->private;
672
673 hci_dev_lock(hdev);
674 seq_printf(f, "%pMR\n", &hdev->random_addr);
675 hci_dev_unlock(hdev);
676
677 return 0;
678}
679
680static int random_address_open(struct inode *inode, struct file *file)
681{
682 return single_open(file, random_address_show, inode->i_private);
683}
684
685static const struct file_operations random_address_fops = {
686 .open = random_address_open,
687 .read = seq_read,
688 .llseek = seq_lseek,
689 .release = single_release,
690};
691
e7b8fc92
MH
692static int static_address_show(struct seq_file *f, void *p)
693{
694 struct hci_dev *hdev = f->private;
695
696 hci_dev_lock(hdev);
697 seq_printf(f, "%pMR\n", &hdev->static_addr);
698 hci_dev_unlock(hdev);
699
700 return 0;
701}
702
703static int static_address_open(struct inode *inode, struct file *file)
704{
705 return single_open(file, static_address_show, inode->i_private);
706}
707
708static const struct file_operations static_address_fops = {
709 .open = static_address_open,
710 .read = seq_read,
711 .llseek = seq_lseek,
712 .release = single_release,
713};
714
b32bba6c
MH
715static ssize_t force_static_address_read(struct file *file,
716 char __user *user_buf,
717 size_t count, loff_t *ppos)
92202185 718{
b32bba6c
MH
719 struct hci_dev *hdev = file->private_data;
720 char buf[3];
92202185 721
111902f7 722 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
723 buf[1] = '\n';
724 buf[2] = '\0';
725 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
726}
727
b32bba6c
MH
728static ssize_t force_static_address_write(struct file *file,
729 const char __user *user_buf,
730 size_t count, loff_t *ppos)
92202185 731{
b32bba6c
MH
732 struct hci_dev *hdev = file->private_data;
733 char buf[32];
734 size_t buf_size = min(count, (sizeof(buf)-1));
735 bool enable;
92202185 736
b32bba6c
MH
737 if (test_bit(HCI_UP, &hdev->flags))
738 return -EBUSY;
92202185 739
b32bba6c
MH
740 if (copy_from_user(buf, user_buf, buf_size))
741 return -EFAULT;
742
743 buf[buf_size] = '\0';
744 if (strtobool(buf, &enable))
745 return -EINVAL;
746
111902f7 747 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
748 return -EALREADY;
749
111902f7 750 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
751
752 return count;
92202185
MH
753}
754
b32bba6c
MH
755static const struct file_operations force_static_address_fops = {
756 .open = simple_open,
757 .read = force_static_address_read,
758 .write = force_static_address_write,
759 .llseek = default_llseek,
760};
92202185 761
d2ab0ac1
MH
762static int white_list_show(struct seq_file *f, void *ptr)
763{
764 struct hci_dev *hdev = f->private;
765 struct bdaddr_list *b;
766
767 hci_dev_lock(hdev);
768 list_for_each_entry(b, &hdev->le_white_list, list)
769 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
770 hci_dev_unlock(hdev);
771
772 return 0;
773}
774
775static int white_list_open(struct inode *inode, struct file *file)
776{
777 return single_open(file, white_list_show, inode->i_private);
778}
779
780static const struct file_operations white_list_fops = {
781 .open = white_list_open,
782 .read = seq_read,
783 .llseek = seq_lseek,
784 .release = single_release,
785};
786
3698d704
MH
787static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
788{
789 struct hci_dev *hdev = f->private;
790 struct list_head *p, *n;
791
792 hci_dev_lock(hdev);
793 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
794 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
795 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
796 &irk->bdaddr, irk->addr_type,
797 16, irk->val, &irk->rpa);
798 }
799 hci_dev_unlock(hdev);
800
801 return 0;
802}
803
804static int identity_resolving_keys_open(struct inode *inode, struct file *file)
805{
806 return single_open(file, identity_resolving_keys_show,
807 inode->i_private);
808}
809
810static const struct file_operations identity_resolving_keys_fops = {
811 .open = identity_resolving_keys_open,
812 .read = seq_read,
813 .llseek = seq_lseek,
814 .release = single_release,
815};
816
8f8625cd
MH
817static int long_term_keys_show(struct seq_file *f, void *ptr)
818{
819 struct hci_dev *hdev = f->private;
820 struct list_head *p, *n;
821
822 hci_dev_lock(hdev);
f813f1be 823 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 824 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
fe39c7b2 825 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
826 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 828 __le64_to_cpu(ltk->rand), 16, ltk->val);
8f8625cd
MH
829 }
830 hci_dev_unlock(hdev);
831
832 return 0;
833}
834
835static int long_term_keys_open(struct inode *inode, struct file *file)
836{
837 return single_open(file, long_term_keys_show, inode->i_private);
838}
839
840static const struct file_operations long_term_keys_fops = {
841 .open = long_term_keys_open,
842 .read = seq_read,
843 .llseek = seq_lseek,
844 .release = single_release,
845};
846
4e70c7e7
MH
847static int conn_min_interval_set(void *data, u64 val)
848{
849 struct hci_dev *hdev = data;
850
851 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
852 return -EINVAL;
853
854 hci_dev_lock(hdev);
2be48b65 855 hdev->le_conn_min_interval = val;
4e70c7e7
MH
856 hci_dev_unlock(hdev);
857
858 return 0;
859}
860
861static int conn_min_interval_get(void *data, u64 *val)
862{
863 struct hci_dev *hdev = data;
864
865 hci_dev_lock(hdev);
866 *val = hdev->le_conn_min_interval;
867 hci_dev_unlock(hdev);
868
869 return 0;
870}
871
872DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
873 conn_min_interval_set, "%llu\n");
874
875static int conn_max_interval_set(void *data, u64 val)
876{
877 struct hci_dev *hdev = data;
878
879 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
880 return -EINVAL;
881
882 hci_dev_lock(hdev);
2be48b65 883 hdev->le_conn_max_interval = val;
4e70c7e7
MH
884 hci_dev_unlock(hdev);
885
886 return 0;
887}
888
889static int conn_max_interval_get(void *data, u64 *val)
890{
891 struct hci_dev *hdev = data;
892
893 hci_dev_lock(hdev);
894 *val = hdev->le_conn_max_interval;
895 hci_dev_unlock(hdev);
896
897 return 0;
898}
899
900DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
901 conn_max_interval_set, "%llu\n");
902
3f959d46
MH
903static int adv_channel_map_set(void *data, u64 val)
904{
905 struct hci_dev *hdev = data;
906
907 if (val < 0x01 || val > 0x07)
908 return -EINVAL;
909
910 hci_dev_lock(hdev);
911 hdev->le_adv_channel_map = val;
912 hci_dev_unlock(hdev);
913
914 return 0;
915}
916
917static int adv_channel_map_get(void *data, u64 *val)
918{
919 struct hci_dev *hdev = data;
920
921 hci_dev_lock(hdev);
922 *val = hdev->le_adv_channel_map;
923 hci_dev_unlock(hdev);
924
925 return 0;
926}
927
928DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
929 adv_channel_map_set, "%llu\n");
930
7d474e06
AG
931static int le_auto_conn_show(struct seq_file *sf, void *ptr)
932{
933 struct hci_dev *hdev = sf->private;
934 struct hci_conn_params *p;
935
936 hci_dev_lock(hdev);
937
938 list_for_each_entry(p, &hdev->le_conn_params, list) {
939 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
940 p->auto_connect);
941 }
942
943 hci_dev_unlock(hdev);
944
945 return 0;
946}
947
948static int le_auto_conn_open(struct inode *inode, struct file *file)
949{
950 return single_open(file, le_auto_conn_show, inode->i_private);
951}
952
953static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
954 size_t count, loff_t *offset)
955{
956 struct seq_file *sf = file->private_data;
957 struct hci_dev *hdev = sf->private;
958 u8 auto_connect = 0;
959 bdaddr_t addr;
960 u8 addr_type;
961 char *buf;
962 int err = 0;
963 int n;
964
965 /* Don't allow partial write */
966 if (*offset != 0)
967 return -EINVAL;
968
969 if (count < 3)
970 return -EINVAL;
971
4408dd15
AG
972 buf = memdup_user(data, count);
973 if (IS_ERR(buf))
974 return PTR_ERR(buf);
7d474e06
AG
975
976 if (memcmp(buf, "add", 3) == 0) {
977 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
978 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
979 &addr.b[1], &addr.b[0], &addr_type,
980 &auto_connect);
981
982 if (n < 7) {
983 err = -EINVAL;
984 goto done;
985 }
986
987 hci_dev_lock(hdev);
988 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
989 hdev->le_conn_min_interval,
990 hdev->le_conn_max_interval);
991 hci_dev_unlock(hdev);
992
993 if (err)
994 goto done;
995 } else if (memcmp(buf, "del", 3) == 0) {
996 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
997 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
998 &addr.b[1], &addr.b[0], &addr_type);
999
1000 if (n < 7) {
1001 err = -EINVAL;
1002 goto done;
1003 }
1004
1005 hci_dev_lock(hdev);
1006 hci_conn_params_del(hdev, &addr, addr_type);
1007 hci_dev_unlock(hdev);
1008 } else if (memcmp(buf, "clr", 3) == 0) {
1009 hci_dev_lock(hdev);
1010 hci_conn_params_clear(hdev);
1011 hci_pend_le_conns_clear(hdev);
1012 hci_update_background_scan(hdev);
1013 hci_dev_unlock(hdev);
1014 } else {
1015 err = -EINVAL;
1016 }
1017
1018done:
1019 kfree(buf);
1020
1021 if (err)
1022 return err;
1023 else
1024 return count;
1025}
1026
1027static const struct file_operations le_auto_conn_fops = {
1028 .open = le_auto_conn_open,
1029 .read = seq_read,
1030 .write = le_auto_conn_write,
1031 .llseek = seq_lseek,
1032 .release = single_release,
1033};
1034
1da177e4
LT
1035/* ---- HCI requests ---- */
1036
42c6b129 1037static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 1038{
42c6b129 1039 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
1040
1041 if (hdev->req_status == HCI_REQ_PEND) {
1042 hdev->req_result = result;
1043 hdev->req_status = HCI_REQ_DONE;
1044 wake_up_interruptible(&hdev->req_wait_q);
1045 }
1046}
1047
1048static void hci_req_cancel(struct hci_dev *hdev, int err)
1049{
1050 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1051
1052 if (hdev->req_status == HCI_REQ_PEND) {
1053 hdev->req_result = err;
1054 hdev->req_status = HCI_REQ_CANCELED;
1055 wake_up_interruptible(&hdev->req_wait_q);
1056 }
1057}
1058
77a63e0a
FW
1059static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1060 u8 event)
75e84b7c
JH
1061{
1062 struct hci_ev_cmd_complete *ev;
1063 struct hci_event_hdr *hdr;
1064 struct sk_buff *skb;
1065
1066 hci_dev_lock(hdev);
1067
1068 skb = hdev->recv_evt;
1069 hdev->recv_evt = NULL;
1070
1071 hci_dev_unlock(hdev);
1072
1073 if (!skb)
1074 return ERR_PTR(-ENODATA);
1075
1076 if (skb->len < sizeof(*hdr)) {
1077 BT_ERR("Too short HCI event");
1078 goto failed;
1079 }
1080
1081 hdr = (void *) skb->data;
1082 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1083
7b1abbbe
JH
1084 if (event) {
1085 if (hdr->evt != event)
1086 goto failed;
1087 return skb;
1088 }
1089
75e84b7c
JH
1090 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1091 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1092 goto failed;
1093 }
1094
1095 if (skb->len < sizeof(*ev)) {
1096 BT_ERR("Too short cmd_complete event");
1097 goto failed;
1098 }
1099
1100 ev = (void *) skb->data;
1101 skb_pull(skb, sizeof(*ev));
1102
1103 if (opcode == __le16_to_cpu(ev->opcode))
1104 return skb;
1105
1106 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1107 __le16_to_cpu(ev->opcode));
1108
1109failed:
1110 kfree_skb(skb);
1111 return ERR_PTR(-ENODATA);
1112}
1113
7b1abbbe 1114struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1115 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1116{
1117 DECLARE_WAITQUEUE(wait, current);
1118 struct hci_request req;
1119 int err = 0;
1120
1121 BT_DBG("%s", hdev->name);
1122
1123 hci_req_init(&req, hdev);
1124
7b1abbbe 1125 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1126
1127 hdev->req_status = HCI_REQ_PEND;
1128
1129 err = hci_req_run(&req, hci_req_sync_complete);
1130 if (err < 0)
1131 return ERR_PTR(err);
1132
1133 add_wait_queue(&hdev->req_wait_q, &wait);
1134 set_current_state(TASK_INTERRUPTIBLE);
1135
1136 schedule_timeout(timeout);
1137
1138 remove_wait_queue(&hdev->req_wait_q, &wait);
1139
1140 if (signal_pending(current))
1141 return ERR_PTR(-EINTR);
1142
1143 switch (hdev->req_status) {
1144 case HCI_REQ_DONE:
1145 err = -bt_to_errno(hdev->req_result);
1146 break;
1147
1148 case HCI_REQ_CANCELED:
1149 err = -hdev->req_result;
1150 break;
1151
1152 default:
1153 err = -ETIMEDOUT;
1154 break;
1155 }
1156
1157 hdev->req_status = hdev->req_result = 0;
1158
1159 BT_DBG("%s end: err %d", hdev->name, err);
1160
1161 if (err < 0)
1162 return ERR_PTR(err);
1163
7b1abbbe
JH
1164 return hci_get_cmd_complete(hdev, opcode, event);
1165}
1166EXPORT_SYMBOL(__hci_cmd_sync_ev);
1167
1168struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1169 const void *param, u32 timeout)
7b1abbbe
JH
1170{
1171 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1172}
1173EXPORT_SYMBOL(__hci_cmd_sync);
1174
1da177e4 1175/* Execute request and wait for completion. */
01178cd4 1176static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1177 void (*func)(struct hci_request *req,
1178 unsigned long opt),
01178cd4 1179 unsigned long opt, __u32 timeout)
1da177e4 1180{
42c6b129 1181 struct hci_request req;
1da177e4
LT
1182 DECLARE_WAITQUEUE(wait, current);
1183 int err = 0;
1184
1185 BT_DBG("%s start", hdev->name);
1186
42c6b129
JH
1187 hci_req_init(&req, hdev);
1188
1da177e4
LT
1189 hdev->req_status = HCI_REQ_PEND;
1190
42c6b129 1191 func(&req, opt);
53cce22d 1192
42c6b129
JH
1193 err = hci_req_run(&req, hci_req_sync_complete);
1194 if (err < 0) {
53cce22d 1195 hdev->req_status = 0;
920c8300
AG
1196
1197 /* ENODATA means the HCI request command queue is empty.
1198 * This can happen when a request with conditionals doesn't
1199 * trigger any commands to be sent. This is normal behavior
1200 * and should not trigger an error return.
42c6b129 1201 */
920c8300
AG
1202 if (err == -ENODATA)
1203 return 0;
1204
1205 return err;
53cce22d
JH
1206 }
1207
bc4445c7
AG
1208 add_wait_queue(&hdev->req_wait_q, &wait);
1209 set_current_state(TASK_INTERRUPTIBLE);
1210
1da177e4
LT
1211 schedule_timeout(timeout);
1212
1213 remove_wait_queue(&hdev->req_wait_q, &wait);
1214
1215 if (signal_pending(current))
1216 return -EINTR;
1217
1218 switch (hdev->req_status) {
1219 case HCI_REQ_DONE:
e175072f 1220 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1221 break;
1222
1223 case HCI_REQ_CANCELED:
1224 err = -hdev->req_result;
1225 break;
1226
1227 default:
1228 err = -ETIMEDOUT;
1229 break;
3ff50b79 1230 }
1da177e4 1231
a5040efa 1232 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1233
1234 BT_DBG("%s end: err %d", hdev->name, err);
1235
1236 return err;
1237}
1238
01178cd4 1239static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1240 void (*req)(struct hci_request *req,
1241 unsigned long opt),
01178cd4 1242 unsigned long opt, __u32 timeout)
1da177e4
LT
1243{
1244 int ret;
1245
7c6a329e
MH
1246 if (!test_bit(HCI_UP, &hdev->flags))
1247 return -ENETDOWN;
1248
1da177e4
LT
1249 /* Serialize all requests */
1250 hci_req_lock(hdev);
01178cd4 1251 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1252 hci_req_unlock(hdev);
1253
1254 return ret;
1255}
1256
42c6b129 1257static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1258{
42c6b129 1259 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1260
1261 /* Reset device */
42c6b129
JH
1262 set_bit(HCI_RESET, &req->hdev->flags);
1263 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1264}
1265
42c6b129 1266static void bredr_init(struct hci_request *req)
1da177e4 1267{
42c6b129 1268 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1269
1da177e4 1270 /* Read Local Supported Features */
42c6b129 1271 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1272
1143e5a6 1273 /* Read Local Version */
42c6b129 1274 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1275
1276 /* Read BD Address */
42c6b129 1277 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1278}
1279
42c6b129 1280static void amp_init(struct hci_request *req)
e61ef499 1281{
42c6b129 1282 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1283
e61ef499 1284 /* Read Local Version */
42c6b129 1285 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1286
f6996cfe
MH
1287 /* Read Local Supported Commands */
1288 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1289
1290 /* Read Local Supported Features */
1291 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1292
6bcbc489 1293 /* Read Local AMP Info */
42c6b129 1294 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1295
1296 /* Read Data Blk size */
42c6b129 1297 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1298
f38ba941
MH
1299 /* Read Flow Control Mode */
1300 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1301
7528ca1c
MH
1302 /* Read Location Data */
1303 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1304}
1305
42c6b129 1306static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1307{
42c6b129 1308 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1309
1310 BT_DBG("%s %ld", hdev->name, opt);
1311
11778716
AE
1312 /* Reset */
1313 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1314 hci_reset_req(req, 0);
11778716 1315
e61ef499
AE
1316 switch (hdev->dev_type) {
1317 case HCI_BREDR:
42c6b129 1318 bredr_init(req);
e61ef499
AE
1319 break;
1320
1321 case HCI_AMP:
42c6b129 1322 amp_init(req);
e61ef499
AE
1323 break;
1324
1325 default:
1326 BT_ERR("Unknown device type %d", hdev->dev_type);
1327 break;
1328 }
e61ef499
AE
1329}
1330
42c6b129 1331static void bredr_setup(struct hci_request *req)
2177bab5 1332{
4ca048e3
MH
1333 struct hci_dev *hdev = req->hdev;
1334
2177bab5
JH
1335 __le16 param;
1336 __u8 flt_type;
1337
1338 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1339 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1340
1341 /* Read Class of Device */
42c6b129 1342 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1343
1344 /* Read Local Name */
42c6b129 1345 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1346
1347 /* Read Voice Setting */
42c6b129 1348 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1349
b4cb9fb2
MH
1350 /* Read Number of Supported IAC */
1351 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1352
4b836f39
MH
1353 /* Read Current IAC LAP */
1354 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1355
2177bab5
JH
1356 /* Clear Event Filters */
1357 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1358 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1359
1360 /* Connection accept timeout ~20 secs */
dcf4adbf 1361 param = cpu_to_le16(0x7d00);
42c6b129 1362 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1363
4ca048e3
MH
1364 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1365 * but it does not support page scan related HCI commands.
1366 */
1367 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1368 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1370 }
2177bab5
JH
1371}
1372
42c6b129 1373static void le_setup(struct hci_request *req)
2177bab5 1374{
c73eee91
JH
1375 struct hci_dev *hdev = req->hdev;
1376
2177bab5 1377 /* Read LE Buffer Size */
42c6b129 1378 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1379
1380 /* Read LE Local Supported Features */
42c6b129 1381 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1382
747d3f03
MH
1383 /* Read LE Supported States */
1384 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1385
2177bab5 1386 /* Read LE Advertising Channel TX Power */
42c6b129 1387 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1388
1389 /* Read LE White List Size */
42c6b129 1390 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1391
747d3f03
MH
1392 /* Clear LE White List */
1393 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1394
1395 /* LE-only controllers have LE implicitly enabled */
1396 if (!lmp_bredr_capable(hdev))
1397 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1398}
1399
1400static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1401{
1402 if (lmp_ext_inq_capable(hdev))
1403 return 0x02;
1404
1405 if (lmp_inq_rssi_capable(hdev))
1406 return 0x01;
1407
1408 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1409 hdev->lmp_subver == 0x0757)
1410 return 0x01;
1411
1412 if (hdev->manufacturer == 15) {
1413 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1414 return 0x01;
1415 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1416 return 0x01;
1417 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1418 return 0x01;
1419 }
1420
1421 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1422 hdev->lmp_subver == 0x1805)
1423 return 0x01;
1424
1425 return 0x00;
1426}
1427
42c6b129 1428static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1429{
1430 u8 mode;
1431
42c6b129 1432 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1433
42c6b129 1434 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1435}
1436
42c6b129 1437static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1438{
42c6b129
JH
1439 struct hci_dev *hdev = req->hdev;
1440
2177bab5
JH
1441 /* The second byte is 0xff instead of 0x9f (two reserved bits
1442 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1443 * command otherwise.
1444 */
1445 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1446
1447 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1448 * any event mask for pre 1.2 devices.
1449 */
1450 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1451 return;
1452
1453 if (lmp_bredr_capable(hdev)) {
1454 events[4] |= 0x01; /* Flow Specification Complete */
1455 events[4] |= 0x02; /* Inquiry Result with RSSI */
1456 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1457 events[5] |= 0x08; /* Synchronous Connection Complete */
1458 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1459 } else {
1460 /* Use a different default for LE-only devices */
1461 memset(events, 0, sizeof(events));
1462 events[0] |= 0x10; /* Disconnection Complete */
1463 events[0] |= 0x80; /* Encryption Change */
1464 events[1] |= 0x08; /* Read Remote Version Information Complete */
1465 events[1] |= 0x20; /* Command Complete */
1466 events[1] |= 0x40; /* Command Status */
1467 events[1] |= 0x80; /* Hardware Error */
1468 events[2] |= 0x04; /* Number of Completed Packets */
1469 events[3] |= 0x02; /* Data Buffer Overflow */
1470 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1471 }
1472
1473 if (lmp_inq_rssi_capable(hdev))
1474 events[4] |= 0x02; /* Inquiry Result with RSSI */
1475
1476 if (lmp_sniffsubr_capable(hdev))
1477 events[5] |= 0x20; /* Sniff Subrating */
1478
1479 if (lmp_pause_enc_capable(hdev))
1480 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1481
1482 if (lmp_ext_inq_capable(hdev))
1483 events[5] |= 0x40; /* Extended Inquiry Result */
1484
1485 if (lmp_no_flush_capable(hdev))
1486 events[7] |= 0x01; /* Enhanced Flush Complete */
1487
1488 if (lmp_lsto_capable(hdev))
1489 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1490
1491 if (lmp_ssp_capable(hdev)) {
1492 events[6] |= 0x01; /* IO Capability Request */
1493 events[6] |= 0x02; /* IO Capability Response */
1494 events[6] |= 0x04; /* User Confirmation Request */
1495 events[6] |= 0x08; /* User Passkey Request */
1496 events[6] |= 0x10; /* Remote OOB Data Request */
1497 events[6] |= 0x20; /* Simple Pairing Complete */
1498 events[7] |= 0x04; /* User Passkey Notification */
1499 events[7] |= 0x08; /* Keypress Notification */
1500 events[7] |= 0x10; /* Remote Host Supported
1501 * Features Notification
1502 */
1503 }
1504
1505 if (lmp_le_capable(hdev))
1506 events[7] |= 0x20; /* LE Meta-Event */
1507
42c6b129 1508 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1509
1510 if (lmp_le_capable(hdev)) {
1511 memset(events, 0, sizeof(events));
1512 events[0] = 0x1f;
42c6b129
JH
1513 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1514 sizeof(events), events);
2177bab5
JH
1515 }
1516}
1517
42c6b129 1518static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1519{
42c6b129
JH
1520 struct hci_dev *hdev = req->hdev;
1521
2177bab5 1522 if (lmp_bredr_capable(hdev))
42c6b129 1523 bredr_setup(req);
56f87901
JH
1524 else
1525 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1526
1527 if (lmp_le_capable(hdev))
42c6b129 1528 le_setup(req);
2177bab5 1529
42c6b129 1530 hci_setup_event_mask(req);
2177bab5 1531
3f8e2d75
JH
1532 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1533 * local supported commands HCI command.
1534 */
1535 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1536 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1537
1538 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1539 /* When SSP is available, then the host features page
1540 * should also be available as well. However some
1541 * controllers list the max_page as 0 as long as SSP
1542 * has not been enabled. To achieve proper debugging
1543 * output, force the minimum max_page to 1 at least.
1544 */
1545 hdev->max_page = 0x01;
1546
2177bab5
JH
1547 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1548 u8 mode = 0x01;
42c6b129
JH
1549 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1550 sizeof(mode), &mode);
2177bab5
JH
1551 } else {
1552 struct hci_cp_write_eir cp;
1553
1554 memset(hdev->eir, 0, sizeof(hdev->eir));
1555 memset(&cp, 0, sizeof(cp));
1556
42c6b129 1557 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1558 }
1559 }
1560
1561 if (lmp_inq_rssi_capable(hdev))
42c6b129 1562 hci_setup_inquiry_mode(req);
2177bab5
JH
1563
1564 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1565 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1566
1567 if (lmp_ext_feat_capable(hdev)) {
1568 struct hci_cp_read_local_ext_features cp;
1569
1570 cp.page = 0x01;
42c6b129
JH
1571 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1572 sizeof(cp), &cp);
2177bab5
JH
1573 }
1574
1575 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1576 u8 enable = 1;
42c6b129
JH
1577 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1578 &enable);
2177bab5
JH
1579 }
1580}
1581
42c6b129 1582static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1583{
42c6b129 1584 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1585 struct hci_cp_write_def_link_policy cp;
1586 u16 link_policy = 0;
1587
1588 if (lmp_rswitch_capable(hdev))
1589 link_policy |= HCI_LP_RSWITCH;
1590 if (lmp_hold_capable(hdev))
1591 link_policy |= HCI_LP_HOLD;
1592 if (lmp_sniff_capable(hdev))
1593 link_policy |= HCI_LP_SNIFF;
1594 if (lmp_park_capable(hdev))
1595 link_policy |= HCI_LP_PARK;
1596
1597 cp.policy = cpu_to_le16(link_policy);
42c6b129 1598 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1599}
1600
42c6b129 1601static void hci_set_le_support(struct hci_request *req)
2177bab5 1602{
42c6b129 1603 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1604 struct hci_cp_write_le_host_supported cp;
1605
c73eee91
JH
1606 /* LE-only devices do not support explicit enablement */
1607 if (!lmp_bredr_capable(hdev))
1608 return;
1609
2177bab5
JH
1610 memset(&cp, 0, sizeof(cp));
1611
1612 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1613 cp.le = 0x01;
1614 cp.simul = lmp_le_br_capable(hdev);
1615 }
1616
1617 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1618 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1619 &cp);
2177bab5
JH
1620}
1621
d62e6d67
JH
1622static void hci_set_event_mask_page_2(struct hci_request *req)
1623{
1624 struct hci_dev *hdev = req->hdev;
1625 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1626
1627 /* If Connectionless Slave Broadcast master role is supported
1628 * enable all necessary events for it.
1629 */
53b834d2 1630 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1631 events[1] |= 0x40; /* Triggered Clock Capture */
1632 events[1] |= 0x80; /* Synchronization Train Complete */
1633 events[2] |= 0x10; /* Slave Page Response Timeout */
1634 events[2] |= 0x20; /* CSB Channel Map Change */
1635 }
1636
1637 /* If Connectionless Slave Broadcast slave role is supported
1638 * enable all necessary events for it.
1639 */
53b834d2 1640 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1641 events[2] |= 0x01; /* Synchronization Train Received */
1642 events[2] |= 0x02; /* CSB Receive */
1643 events[2] |= 0x04; /* CSB Timeout */
1644 events[2] |= 0x08; /* Truncated Page Complete */
1645 }
1646
40c59fcb
MH
1647 /* Enable Authenticated Payload Timeout Expired event if supported */
1648 if (lmp_ping_capable(hdev))
1649 events[2] |= 0x80;
1650
d62e6d67
JH
1651 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1652}
1653
42c6b129 1654static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1655{
42c6b129 1656 struct hci_dev *hdev = req->hdev;
d2c5d77f 1657 u8 p;
42c6b129 1658
b8f4e068
GP
1659 /* Some Broadcom based Bluetooth controllers do not support the
1660 * Delete Stored Link Key command. They are clearly indicating its
1661 * absence in the bit mask of supported commands.
1662 *
1663 * Check the supported commands and only if the the command is marked
1664 * as supported send it. If not supported assume that the controller
1665 * does not have actual support for stored link keys which makes this
1666 * command redundant anyway.
f9f462fa
MH
1667 *
1668 * Some controllers indicate that they support handling deleting
1669 * stored link keys, but they don't. The quirk lets a driver
1670 * just disable this command.
637b4cae 1671 */
f9f462fa
MH
1672 if (hdev->commands[6] & 0x80 &&
1673 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1674 struct hci_cp_delete_stored_link_key cp;
1675
1676 bacpy(&cp.bdaddr, BDADDR_ANY);
1677 cp.delete_all = 0x01;
1678 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1679 sizeof(cp), &cp);
1680 }
1681
2177bab5 1682 if (hdev->commands[5] & 0x10)
42c6b129 1683 hci_setup_link_policy(req);
2177bab5 1684
7bf32048 1685 if (lmp_le_capable(hdev))
42c6b129 1686 hci_set_le_support(req);
d2c5d77f
JH
1687
1688 /* Read features beyond page 1 if available */
1689 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1690 struct hci_cp_read_local_ext_features cp;
1691
1692 cp.page = p;
1693 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1694 sizeof(cp), &cp);
1695 }
2177bab5
JH
1696}
1697
5d4e7e8d
JH
1698static void hci_init4_req(struct hci_request *req, unsigned long opt)
1699{
1700 struct hci_dev *hdev = req->hdev;
1701
d62e6d67
JH
1702 /* Set event mask page 2 if the HCI command for it is supported */
1703 if (hdev->commands[22] & 0x04)
1704 hci_set_event_mask_page_2(req);
1705
5d4e7e8d 1706 /* Check for Synchronization Train support */
53b834d2 1707 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1708 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1709
1710 /* Enable Secure Connections if supported and configured */
5afeac14 1711 if ((lmp_sc_capable(hdev) ||
111902f7 1712 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1713 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1714 u8 support = 0x01;
1715 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1716 sizeof(support), &support);
1717 }
5d4e7e8d
JH
1718}
1719
2177bab5
JH
1720static int __hci_init(struct hci_dev *hdev)
1721{
1722 int err;
1723
1724 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1725 if (err < 0)
1726 return err;
1727
4b4148e9
MH
1728 /* The Device Under Test (DUT) mode is special and available for
1729 * all controller types. So just create it early on.
1730 */
1731 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1732 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1733 &dut_mode_fops);
1734 }
1735
2177bab5
JH
1736 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1737 * BR/EDR/LE type controllers. AMP controllers only need the
1738 * first stage init.
1739 */
1740 if (hdev->dev_type != HCI_BREDR)
1741 return 0;
1742
1743 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1744 if (err < 0)
1745 return err;
1746
5d4e7e8d
JH
1747 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1748 if (err < 0)
1749 return err;
1750
baf27f6e
MH
1751 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1752 if (err < 0)
1753 return err;
1754
1755 /* Only create debugfs entries during the initial setup
1756 * phase and not every time the controller gets powered on.
1757 */
1758 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1759 return 0;
1760
dfb826a8
MH
1761 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1762 &features_fops);
ceeb3bc0
MH
1763 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1764 &hdev->manufacturer);
1765 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1766 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1767 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1768 &blacklist_fops);
47219839
MH
1769 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1770
31ad1691
AK
1771 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1772 &conn_info_min_age_fops);
1773 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1774 &conn_info_max_age_fops);
1775
baf27f6e
MH
1776 if (lmp_bredr_capable(hdev)) {
1777 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1778 hdev, &inquiry_cache_fops);
02d08d15
MH
1779 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1780 hdev, &link_keys_fops);
babdbb3c
MH
1781 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1782 hdev, &dev_class_fops);
041000b9
MH
1783 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1784 hdev, &voice_setting_fops);
baf27f6e
MH
1785 }
1786
06f5b778 1787 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1788 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1789 hdev, &auto_accept_delay_fops);
06f5b778
MH
1790 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1791 hdev, &ssp_debug_mode_fops);
5afeac14
MH
1792 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1793 hdev, &force_sc_support_fops);
134c2a89
MH
1794 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1795 hdev, &sc_only_mode_fops);
06f5b778 1796 }
ebd1e33b 1797
2bfa3531
MH
1798 if (lmp_sniff_capable(hdev)) {
1799 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1800 hdev, &idle_timeout_fops);
1801 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1802 hdev, &sniff_min_interval_fops);
1803 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1804 hdev, &sniff_max_interval_fops);
1805 }
1806
d0f729b8 1807 if (lmp_le_capable(hdev)) {
ac345813
MH
1808 debugfs_create_file("identity", 0400, hdev->debugfs,
1809 hdev, &identity_fops);
1810 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1811 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1812 debugfs_create_file("random_address", 0444, hdev->debugfs,
1813 hdev, &random_address_fops);
b32bba6c
MH
1814 debugfs_create_file("static_address", 0444, hdev->debugfs,
1815 hdev, &static_address_fops);
1816
1817 /* For controllers with a public address, provide a debug
1818 * option to force the usage of the configured static
1819 * address. By default the public address is used.
1820 */
1821 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1822 debugfs_create_file("force_static_address", 0644,
1823 hdev->debugfs, hdev,
1824 &force_static_address_fops);
1825
d0f729b8
MH
1826 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1827 &hdev->le_white_list_size);
d2ab0ac1
MH
1828 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1829 &white_list_fops);
3698d704
MH
1830 debugfs_create_file("identity_resolving_keys", 0400,
1831 hdev->debugfs, hdev,
1832 &identity_resolving_keys_fops);
8f8625cd
MH
1833 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1834 hdev, &long_term_keys_fops);
4e70c7e7
MH
1835 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1836 hdev, &conn_min_interval_fops);
1837 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1838 hdev, &conn_max_interval_fops);
3f959d46
MH
1839 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1840 hdev, &adv_channel_map_fops);
7d474e06
AG
1841 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1842 &le_auto_conn_fops);
b9a7a61e
LR
1843 debugfs_create_u16("discov_interleaved_timeout", 0644,
1844 hdev->debugfs,
1845 &hdev->discov_interleaved_timeout);
d0f729b8 1846 }
e7b8fc92 1847
baf27f6e 1848 return 0;
2177bab5
JH
1849}
1850
42c6b129 1851static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1852{
1853 __u8 scan = opt;
1854
42c6b129 1855 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1856
1857 /* Inquiry and Page scans */
42c6b129 1858 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1859}
1860
42c6b129 1861static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1862{
1863 __u8 auth = opt;
1864
42c6b129 1865 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1866
1867 /* Authentication */
42c6b129 1868 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1869}
1870
42c6b129 1871static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1872{
1873 __u8 encrypt = opt;
1874
42c6b129 1875 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1876
e4e8e37c 1877 /* Encryption */
42c6b129 1878 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1879}
1880
42c6b129 1881static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1882{
1883 __le16 policy = cpu_to_le16(opt);
1884
42c6b129 1885 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1886
1887 /* Default link policy */
42c6b129 1888 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1889}
1890
8e87d142 1891/* Get HCI device by index.
1da177e4
LT
1892 * Device is held on return. */
1893struct hci_dev *hci_dev_get(int index)
1894{
8035ded4 1895 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1896
1897 BT_DBG("%d", index);
1898
1899 if (index < 0)
1900 return NULL;
1901
1902 read_lock(&hci_dev_list_lock);
8035ded4 1903 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1904 if (d->id == index) {
1905 hdev = hci_dev_hold(d);
1906 break;
1907 }
1908 }
1909 read_unlock(&hci_dev_list_lock);
1910 return hdev;
1911}
1da177e4
LT
1912
1913/* ---- Inquiry support ---- */
ff9ef578 1914
30dc78e1
JH
1915bool hci_discovery_active(struct hci_dev *hdev)
1916{
1917 struct discovery_state *discov = &hdev->discovery;
1918
6fbe195d 1919 switch (discov->state) {
343f935b 1920 case DISCOVERY_FINDING:
6fbe195d 1921 case DISCOVERY_RESOLVING:
30dc78e1
JH
1922 return true;
1923
6fbe195d
AG
1924 default:
1925 return false;
1926 }
30dc78e1
JH
1927}
1928
ff9ef578
JH
1929void hci_discovery_set_state(struct hci_dev *hdev, int state)
1930{
1931 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1932
1933 if (hdev->discovery.state == state)
1934 return;
1935
1936 switch (state) {
1937 case DISCOVERY_STOPPED:
c54c3860
AG
1938 hci_update_background_scan(hdev);
1939
7b99b659
AG
1940 if (hdev->discovery.state != DISCOVERY_STARTING)
1941 mgmt_discovering(hdev, 0);
ff9ef578
JH
1942 break;
1943 case DISCOVERY_STARTING:
1944 break;
343f935b 1945 case DISCOVERY_FINDING:
ff9ef578
JH
1946 mgmt_discovering(hdev, 1);
1947 break;
30dc78e1
JH
1948 case DISCOVERY_RESOLVING:
1949 break;
ff9ef578
JH
1950 case DISCOVERY_STOPPING:
1951 break;
1952 }
1953
1954 hdev->discovery.state = state;
1955}
1956
1f9b9a5d 1957void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1958{
30883512 1959 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1960 struct inquiry_entry *p, *n;
1da177e4 1961
561aafbc
JH
1962 list_for_each_entry_safe(p, n, &cache->all, all) {
1963 list_del(&p->all);
b57c1a56 1964 kfree(p);
1da177e4 1965 }
561aafbc
JH
1966
1967 INIT_LIST_HEAD(&cache->unknown);
1968 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1969}
1970
a8c5fb1a
GP
1971struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1972 bdaddr_t *bdaddr)
1da177e4 1973{
30883512 1974 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1975 struct inquiry_entry *e;
1976
6ed93dc6 1977 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1978
561aafbc
JH
1979 list_for_each_entry(e, &cache->all, all) {
1980 if (!bacmp(&e->data.bdaddr, bdaddr))
1981 return e;
1982 }
1983
1984 return NULL;
1985}
1986
1987struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1988 bdaddr_t *bdaddr)
561aafbc 1989{
30883512 1990 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1991 struct inquiry_entry *e;
1992
6ed93dc6 1993 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1994
1995 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1996 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1997 return e;
1998 }
1999
2000 return NULL;
1da177e4
LT
2001}
2002
30dc78e1 2003struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
2004 bdaddr_t *bdaddr,
2005 int state)
30dc78e1
JH
2006{
2007 struct discovery_state *cache = &hdev->discovery;
2008 struct inquiry_entry *e;
2009
6ed93dc6 2010 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
2011
2012 list_for_each_entry(e, &cache->resolve, list) {
2013 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2014 return e;
2015 if (!bacmp(&e->data.bdaddr, bdaddr))
2016 return e;
2017 }
2018
2019 return NULL;
2020}
2021
a3d4e20a 2022void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 2023 struct inquiry_entry *ie)
a3d4e20a
JH
2024{
2025 struct discovery_state *cache = &hdev->discovery;
2026 struct list_head *pos = &cache->resolve;
2027 struct inquiry_entry *p;
2028
2029 list_del(&ie->list);
2030
2031 list_for_each_entry(p, &cache->resolve, list) {
2032 if (p->name_state != NAME_PENDING &&
a8c5fb1a 2033 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
2034 break;
2035 pos = &p->list;
2036 }
2037
2038 list_add(&ie->list, pos);
2039}
2040
3175405b 2041bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 2042 bool name_known, bool *ssp)
1da177e4 2043{
30883512 2044 struct discovery_state *cache = &hdev->discovery;
70f23020 2045 struct inquiry_entry *ie;
1da177e4 2046
6ed93dc6 2047 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2048
2b2fec4d
SJ
2049 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2050
01735bbd 2051 *ssp = data->ssp_mode;
388fc8fa 2052
70f23020 2053 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2054 if (ie) {
8002d77c 2055 if (ie->data.ssp_mode)
388fc8fa
JH
2056 *ssp = true;
2057
a3d4e20a 2058 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2059 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2060 ie->data.rssi = data->rssi;
2061 hci_inquiry_cache_update_resolve(hdev, ie);
2062 }
2063
561aafbc 2064 goto update;
a3d4e20a 2065 }
561aafbc
JH
2066
2067 /* Entry not in the cache. Add new one. */
2068 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2069 if (!ie)
3175405b 2070 return false;
561aafbc
JH
2071
2072 list_add(&ie->all, &cache->all);
2073
2074 if (name_known) {
2075 ie->name_state = NAME_KNOWN;
2076 } else {
2077 ie->name_state = NAME_NOT_KNOWN;
2078 list_add(&ie->list, &cache->unknown);
2079 }
70f23020 2080
561aafbc
JH
2081update:
2082 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2083 ie->name_state != NAME_PENDING) {
561aafbc
JH
2084 ie->name_state = NAME_KNOWN;
2085 list_del(&ie->list);
1da177e4
LT
2086 }
2087
70f23020
AE
2088 memcpy(&ie->data, data, sizeof(*data));
2089 ie->timestamp = jiffies;
1da177e4 2090 cache->timestamp = jiffies;
3175405b
JH
2091
2092 if (ie->name_state == NAME_NOT_KNOWN)
2093 return false;
2094
2095 return true;
1da177e4
LT
2096}
2097
2098static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2099{
30883512 2100 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2101 struct inquiry_info *info = (struct inquiry_info *) buf;
2102 struct inquiry_entry *e;
2103 int copied = 0;
2104
561aafbc 2105 list_for_each_entry(e, &cache->all, all) {
1da177e4 2106 struct inquiry_data *data = &e->data;
b57c1a56
JH
2107
2108 if (copied >= num)
2109 break;
2110
1da177e4
LT
2111 bacpy(&info->bdaddr, &data->bdaddr);
2112 info->pscan_rep_mode = data->pscan_rep_mode;
2113 info->pscan_period_mode = data->pscan_period_mode;
2114 info->pscan_mode = data->pscan_mode;
2115 memcpy(info->dev_class, data->dev_class, 3);
2116 info->clock_offset = data->clock_offset;
b57c1a56 2117
1da177e4 2118 info++;
b57c1a56 2119 copied++;
1da177e4
LT
2120 }
2121
2122 BT_DBG("cache %p, copied %d", cache, copied);
2123 return copied;
2124}
2125
42c6b129 2126static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2127{
2128 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2129 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2130 struct hci_cp_inquiry cp;
2131
2132 BT_DBG("%s", hdev->name);
2133
2134 if (test_bit(HCI_INQUIRY, &hdev->flags))
2135 return;
2136
2137 /* Start Inquiry */
2138 memcpy(&cp.lap, &ir->lap, 3);
2139 cp.length = ir->length;
2140 cp.num_rsp = ir->num_rsp;
42c6b129 2141 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2142}
2143
3e13fa1e
AG
2144static int wait_inquiry(void *word)
2145{
2146 schedule();
2147 return signal_pending(current);
2148}
2149
1da177e4
LT
2150int hci_inquiry(void __user *arg)
2151{
2152 __u8 __user *ptr = arg;
2153 struct hci_inquiry_req ir;
2154 struct hci_dev *hdev;
2155 int err = 0, do_inquiry = 0, max_rsp;
2156 long timeo;
2157 __u8 *buf;
2158
2159 if (copy_from_user(&ir, ptr, sizeof(ir)))
2160 return -EFAULT;
2161
5a08ecce
AE
2162 hdev = hci_dev_get(ir.dev_id);
2163 if (!hdev)
1da177e4
LT
2164 return -ENODEV;
2165
0736cfa8
MH
2166 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2167 err = -EBUSY;
2168 goto done;
2169 }
2170
5b69bef5
MH
2171 if (hdev->dev_type != HCI_BREDR) {
2172 err = -EOPNOTSUPP;
2173 goto done;
2174 }
2175
56f87901
JH
2176 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2177 err = -EOPNOTSUPP;
2178 goto done;
2179 }
2180
09fd0de5 2181 hci_dev_lock(hdev);
8e87d142 2182 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2183 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2184 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2185 do_inquiry = 1;
2186 }
09fd0de5 2187 hci_dev_unlock(hdev);
1da177e4 2188
04837f64 2189 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2190
2191 if (do_inquiry) {
01178cd4
JH
2192 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2193 timeo);
70f23020
AE
2194 if (err < 0)
2195 goto done;
3e13fa1e
AG
2196
2197 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2198 * cleared). If it is interrupted by a signal, return -EINTR.
2199 */
2200 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2201 TASK_INTERRUPTIBLE))
2202 return -EINTR;
70f23020 2203 }
1da177e4 2204
8fc9ced3
GP
2205 /* for unlimited number of responses we will use buffer with
2206 * 255 entries
2207 */
1da177e4
LT
2208 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2209
2210 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2211 * copy it to the user space.
2212 */
01df8c31 2213 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2214 if (!buf) {
1da177e4
LT
2215 err = -ENOMEM;
2216 goto done;
2217 }
2218
09fd0de5 2219 hci_dev_lock(hdev);
1da177e4 2220 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2221 hci_dev_unlock(hdev);
1da177e4
LT
2222
2223 BT_DBG("num_rsp %d", ir.num_rsp);
2224
2225 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2226 ptr += sizeof(ir);
2227 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2228 ir.num_rsp))
1da177e4 2229 err = -EFAULT;
8e87d142 2230 } else
1da177e4
LT
2231 err = -EFAULT;
2232
2233 kfree(buf);
2234
2235done:
2236 hci_dev_put(hdev);
2237 return err;
2238}
2239
cbed0ca1 2240static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2241{
1da177e4
LT
2242 int ret = 0;
2243
1da177e4
LT
2244 BT_DBG("%s %p", hdev->name, hdev);
2245
2246 hci_req_lock(hdev);
2247
94324962
JH
2248 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2249 ret = -ENODEV;
2250 goto done;
2251 }
2252
a5c8f270
MH
2253 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2254 /* Check for rfkill but allow the HCI setup stage to
2255 * proceed (which in itself doesn't cause any RF activity).
2256 */
2257 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2258 ret = -ERFKILL;
2259 goto done;
2260 }
2261
2262 /* Check for valid public address or a configured static
2263 * random adddress, but let the HCI setup proceed to
2264 * be able to determine if there is a public address
2265 * or not.
2266 *
c6beca0e
MH
2267 * In case of user channel usage, it is not important
2268 * if a public address or static random address is
2269 * available.
2270 *
a5c8f270
MH
2271 * This check is only valid for BR/EDR controllers
2272 * since AMP controllers do not have an address.
2273 */
c6beca0e
MH
2274 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2275 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2276 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2277 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2278 ret = -EADDRNOTAVAIL;
2279 goto done;
2280 }
611b30f7
MH
2281 }
2282
1da177e4
LT
2283 if (test_bit(HCI_UP, &hdev->flags)) {
2284 ret = -EALREADY;
2285 goto done;
2286 }
2287
1da177e4
LT
2288 if (hdev->open(hdev)) {
2289 ret = -EIO;
2290 goto done;
2291 }
2292
f41c70c4
MH
2293 atomic_set(&hdev->cmd_cnt, 1);
2294 set_bit(HCI_INIT, &hdev->flags);
2295
2296 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2297 ret = hdev->setup(hdev);
2298
2299 if (!ret) {
f41c70c4
MH
2300 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2301 set_bit(HCI_RAW, &hdev->flags);
2302
0736cfa8
MH
2303 if (!test_bit(HCI_RAW, &hdev->flags) &&
2304 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2305 ret = __hci_init(hdev);
1da177e4
LT
2306 }
2307
f41c70c4
MH
2308 clear_bit(HCI_INIT, &hdev->flags);
2309
1da177e4
LT
2310 if (!ret) {
2311 hci_dev_hold(hdev);
d6bfd59c 2312 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2313 set_bit(HCI_UP, &hdev->flags);
2314 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2315 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 2316 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2317 hdev->dev_type == HCI_BREDR) {
09fd0de5 2318 hci_dev_lock(hdev);
744cf19e 2319 mgmt_powered(hdev, 1);
09fd0de5 2320 hci_dev_unlock(hdev);
56e5cb86 2321 }
8e87d142 2322 } else {
1da177e4 2323 /* Init failed, cleanup */
3eff45ea 2324 flush_work(&hdev->tx_work);
c347b765 2325 flush_work(&hdev->cmd_work);
b78752cc 2326 flush_work(&hdev->rx_work);
1da177e4
LT
2327
2328 skb_queue_purge(&hdev->cmd_q);
2329 skb_queue_purge(&hdev->rx_q);
2330
2331 if (hdev->flush)
2332 hdev->flush(hdev);
2333
2334 if (hdev->sent_cmd) {
2335 kfree_skb(hdev->sent_cmd);
2336 hdev->sent_cmd = NULL;
2337 }
2338
2339 hdev->close(hdev);
2340 hdev->flags = 0;
2341 }
2342
2343done:
2344 hci_req_unlock(hdev);
1da177e4
LT
2345 return ret;
2346}
2347
cbed0ca1
JH
2348/* ---- HCI ioctl helpers ---- */
2349
2350int hci_dev_open(__u16 dev)
2351{
2352 struct hci_dev *hdev;
2353 int err;
2354
2355 hdev = hci_dev_get(dev);
2356 if (!hdev)
2357 return -ENODEV;
2358
e1d08f40
JH
2359 /* We need to ensure that no other power on/off work is pending
2360 * before proceeding to call hci_dev_do_open. This is
2361 * particularly important if the setup procedure has not yet
2362 * completed.
2363 */
2364 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2365 cancel_delayed_work(&hdev->power_off);
2366
a5c8f270
MH
2367 /* After this call it is guaranteed that the setup procedure
2368 * has finished. This means that error conditions like RFKILL
2369 * or no valid public or static random address apply.
2370 */
e1d08f40
JH
2371 flush_workqueue(hdev->req_workqueue);
2372
cbed0ca1
JH
2373 err = hci_dev_do_open(hdev);
2374
2375 hci_dev_put(hdev);
2376
2377 return err;
2378}
2379
1da177e4
LT
2380static int hci_dev_do_close(struct hci_dev *hdev)
2381{
2382 BT_DBG("%s %p", hdev->name, hdev);
2383
78c04c0b
VCG
2384 cancel_delayed_work(&hdev->power_off);
2385
1da177e4
LT
2386 hci_req_cancel(hdev, ENODEV);
2387 hci_req_lock(hdev);
2388
2389 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2390 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2391 hci_req_unlock(hdev);
2392 return 0;
2393 }
2394
3eff45ea
GP
2395 /* Flush RX and TX works */
2396 flush_work(&hdev->tx_work);
b78752cc 2397 flush_work(&hdev->rx_work);
1da177e4 2398
16ab91ab 2399 if (hdev->discov_timeout > 0) {
e0f9309f 2400 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2401 hdev->discov_timeout = 0;
5e5282bb 2402 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2403 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2404 }
2405
a8b2d5c2 2406 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2407 cancel_delayed_work(&hdev->service_cache);
2408
7ba8b4be 2409 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2410
2411 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2412 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2413
09fd0de5 2414 hci_dev_lock(hdev);
1f9b9a5d 2415 hci_inquiry_cache_flush(hdev);
1da177e4 2416 hci_conn_hash_flush(hdev);
6046dc3e 2417 hci_pend_le_conns_clear(hdev);
09fd0de5 2418 hci_dev_unlock(hdev);
1da177e4
LT
2419
2420 hci_notify(hdev, HCI_DEV_DOWN);
2421
2422 if (hdev->flush)
2423 hdev->flush(hdev);
2424
2425 /* Reset device */
2426 skb_queue_purge(&hdev->cmd_q);
2427 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2428 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2429 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2430 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2431 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2432 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2433 clear_bit(HCI_INIT, &hdev->flags);
2434 }
2435
c347b765
GP
2436 /* flush cmd work */
2437 flush_work(&hdev->cmd_work);
1da177e4
LT
2438
2439 /* Drop queues */
2440 skb_queue_purge(&hdev->rx_q);
2441 skb_queue_purge(&hdev->cmd_q);
2442 skb_queue_purge(&hdev->raw_q);
2443
2444 /* Drop last sent command */
2445 if (hdev->sent_cmd) {
65cc2b49 2446 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2447 kfree_skb(hdev->sent_cmd);
2448 hdev->sent_cmd = NULL;
2449 }
2450
b6ddb638
JH
2451 kfree_skb(hdev->recv_evt);
2452 hdev->recv_evt = NULL;
2453
1da177e4
LT
2454 /* After this point our queues are empty
2455 * and no tasks are scheduled. */
2456 hdev->close(hdev);
2457
35b973c9
JH
2458 /* Clear flags */
2459 hdev->flags = 0;
2460 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2461
93c311a0
MH
2462 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2463 if (hdev->dev_type == HCI_BREDR) {
2464 hci_dev_lock(hdev);
2465 mgmt_powered(hdev, 0);
2466 hci_dev_unlock(hdev);
2467 }
8ee56540 2468 }
5add6af8 2469
ced5c338 2470 /* Controller radio is available but is currently powered down */
536619e8 2471 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2472
e59fda8d 2473 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2474 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2475 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2476
1da177e4
LT
2477 hci_req_unlock(hdev);
2478
2479 hci_dev_put(hdev);
2480 return 0;
2481}
2482
2483int hci_dev_close(__u16 dev)
2484{
2485 struct hci_dev *hdev;
2486 int err;
2487
70f23020
AE
2488 hdev = hci_dev_get(dev);
2489 if (!hdev)
1da177e4 2490 return -ENODEV;
8ee56540 2491
0736cfa8
MH
2492 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2493 err = -EBUSY;
2494 goto done;
2495 }
2496
8ee56540
MH
2497 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2498 cancel_delayed_work(&hdev->power_off);
2499
1da177e4 2500 err = hci_dev_do_close(hdev);
8ee56540 2501
0736cfa8 2502done:
1da177e4
LT
2503 hci_dev_put(hdev);
2504 return err;
2505}
2506
2507int hci_dev_reset(__u16 dev)
2508{
2509 struct hci_dev *hdev;
2510 int ret = 0;
2511
70f23020
AE
2512 hdev = hci_dev_get(dev);
2513 if (!hdev)
1da177e4
LT
2514 return -ENODEV;
2515
2516 hci_req_lock(hdev);
1da177e4 2517
808a049e
MH
2518 if (!test_bit(HCI_UP, &hdev->flags)) {
2519 ret = -ENETDOWN;
1da177e4 2520 goto done;
808a049e 2521 }
1da177e4 2522
0736cfa8
MH
2523 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2524 ret = -EBUSY;
2525 goto done;
2526 }
2527
1da177e4
LT
2528 /* Drop queues */
2529 skb_queue_purge(&hdev->rx_q);
2530 skb_queue_purge(&hdev->cmd_q);
2531
09fd0de5 2532 hci_dev_lock(hdev);
1f9b9a5d 2533 hci_inquiry_cache_flush(hdev);
1da177e4 2534 hci_conn_hash_flush(hdev);
09fd0de5 2535 hci_dev_unlock(hdev);
1da177e4
LT
2536
2537 if (hdev->flush)
2538 hdev->flush(hdev);
2539
8e87d142 2540 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2541 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2542
2543 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2544 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2545
2546done:
1da177e4
LT
2547 hci_req_unlock(hdev);
2548 hci_dev_put(hdev);
2549 return ret;
2550}
2551
2552int hci_dev_reset_stat(__u16 dev)
2553{
2554 struct hci_dev *hdev;
2555 int ret = 0;
2556
70f23020
AE
2557 hdev = hci_dev_get(dev);
2558 if (!hdev)
1da177e4
LT
2559 return -ENODEV;
2560
0736cfa8
MH
2561 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2562 ret = -EBUSY;
2563 goto done;
2564 }
2565
1da177e4
LT
2566 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2567
0736cfa8 2568done:
1da177e4 2569 hci_dev_put(hdev);
1da177e4
LT
2570 return ret;
2571}
2572
2573int hci_dev_cmd(unsigned int cmd, void __user *arg)
2574{
2575 struct hci_dev *hdev;
2576 struct hci_dev_req dr;
2577 int err = 0;
2578
2579 if (copy_from_user(&dr, arg, sizeof(dr)))
2580 return -EFAULT;
2581
70f23020
AE
2582 hdev = hci_dev_get(dr.dev_id);
2583 if (!hdev)
1da177e4
LT
2584 return -ENODEV;
2585
0736cfa8
MH
2586 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2587 err = -EBUSY;
2588 goto done;
2589 }
2590
5b69bef5
MH
2591 if (hdev->dev_type != HCI_BREDR) {
2592 err = -EOPNOTSUPP;
2593 goto done;
2594 }
2595
56f87901
JH
2596 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2597 err = -EOPNOTSUPP;
2598 goto done;
2599 }
2600
1da177e4
LT
2601 switch (cmd) {
2602 case HCISETAUTH:
01178cd4
JH
2603 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2604 HCI_INIT_TIMEOUT);
1da177e4
LT
2605 break;
2606
2607 case HCISETENCRYPT:
2608 if (!lmp_encrypt_capable(hdev)) {
2609 err = -EOPNOTSUPP;
2610 break;
2611 }
2612
2613 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2614 /* Auth must be enabled first */
01178cd4
JH
2615 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2616 HCI_INIT_TIMEOUT);
1da177e4
LT
2617 if (err)
2618 break;
2619 }
2620
01178cd4
JH
2621 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2622 HCI_INIT_TIMEOUT);
1da177e4
LT
2623 break;
2624
2625 case HCISETSCAN:
01178cd4
JH
2626 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2627 HCI_INIT_TIMEOUT);
1da177e4
LT
2628 break;
2629
1da177e4 2630 case HCISETLINKPOL:
01178cd4
JH
2631 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2632 HCI_INIT_TIMEOUT);
1da177e4
LT
2633 break;
2634
2635 case HCISETLINKMODE:
e4e8e37c
MH
2636 hdev->link_mode = ((__u16) dr.dev_opt) &
2637 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2638 break;
2639
2640 case HCISETPTYPE:
2641 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2642 break;
2643
2644 case HCISETACLMTU:
e4e8e37c
MH
2645 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2646 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2647 break;
2648
2649 case HCISETSCOMTU:
e4e8e37c
MH
2650 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2651 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2652 break;
2653
2654 default:
2655 err = -EINVAL;
2656 break;
2657 }
e4e8e37c 2658
0736cfa8 2659done:
1da177e4
LT
2660 hci_dev_put(hdev);
2661 return err;
2662}
2663
2664int hci_get_dev_list(void __user *arg)
2665{
8035ded4 2666 struct hci_dev *hdev;
1da177e4
LT
2667 struct hci_dev_list_req *dl;
2668 struct hci_dev_req *dr;
1da177e4
LT
2669 int n = 0, size, err;
2670 __u16 dev_num;
2671
2672 if (get_user(dev_num, (__u16 __user *) arg))
2673 return -EFAULT;
2674
2675 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2676 return -EINVAL;
2677
2678 size = sizeof(*dl) + dev_num * sizeof(*dr);
2679
70f23020
AE
2680 dl = kzalloc(size, GFP_KERNEL);
2681 if (!dl)
1da177e4
LT
2682 return -ENOMEM;
2683
2684 dr = dl->dev_req;
2685
f20d09d5 2686 read_lock(&hci_dev_list_lock);
8035ded4 2687 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2688 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2689 cancel_delayed_work(&hdev->power_off);
c542a06c 2690
a8b2d5c2
JH
2691 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2692 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2693
1da177e4
LT
2694 (dr + n)->dev_id = hdev->id;
2695 (dr + n)->dev_opt = hdev->flags;
c542a06c 2696
1da177e4
LT
2697 if (++n >= dev_num)
2698 break;
2699 }
f20d09d5 2700 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2701
2702 dl->dev_num = n;
2703 size = sizeof(*dl) + n * sizeof(*dr);
2704
2705 err = copy_to_user(arg, dl, size);
2706 kfree(dl);
2707
2708 return err ? -EFAULT : 0;
2709}
2710
2711int hci_get_dev_info(void __user *arg)
2712{
2713 struct hci_dev *hdev;
2714 struct hci_dev_info di;
2715 int err = 0;
2716
2717 if (copy_from_user(&di, arg, sizeof(di)))
2718 return -EFAULT;
2719
70f23020
AE
2720 hdev = hci_dev_get(di.dev_id);
2721 if (!hdev)
1da177e4
LT
2722 return -ENODEV;
2723
a8b2d5c2 2724 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2725 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2726
a8b2d5c2
JH
2727 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2728 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2729
1da177e4
LT
2730 strcpy(di.name, hdev->name);
2731 di.bdaddr = hdev->bdaddr;
60f2a3ed 2732 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2733 di.flags = hdev->flags;
2734 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2735 if (lmp_bredr_capable(hdev)) {
2736 di.acl_mtu = hdev->acl_mtu;
2737 di.acl_pkts = hdev->acl_pkts;
2738 di.sco_mtu = hdev->sco_mtu;
2739 di.sco_pkts = hdev->sco_pkts;
2740 } else {
2741 di.acl_mtu = hdev->le_mtu;
2742 di.acl_pkts = hdev->le_pkts;
2743 di.sco_mtu = 0;
2744 di.sco_pkts = 0;
2745 }
1da177e4
LT
2746 di.link_policy = hdev->link_policy;
2747 di.link_mode = hdev->link_mode;
2748
2749 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2750 memcpy(&di.features, &hdev->features, sizeof(di.features));
2751
2752 if (copy_to_user(arg, &di, sizeof(di)))
2753 err = -EFAULT;
2754
2755 hci_dev_put(hdev);
2756
2757 return err;
2758}
2759
2760/* ---- Interface to HCI drivers ---- */
2761
611b30f7
MH
2762static int hci_rfkill_set_block(void *data, bool blocked)
2763{
2764 struct hci_dev *hdev = data;
2765
2766 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2767
0736cfa8
MH
2768 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2769 return -EBUSY;
2770
5e130367
JH
2771 if (blocked) {
2772 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2773 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2774 hci_dev_do_close(hdev);
5e130367
JH
2775 } else {
2776 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2777 }
611b30f7
MH
2778
2779 return 0;
2780}
2781
2782static const struct rfkill_ops hci_rfkill_ops = {
2783 .set_block = hci_rfkill_set_block,
2784};
2785
ab81cbf9
JH
2786static void hci_power_on(struct work_struct *work)
2787{
2788 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2789 int err;
ab81cbf9
JH
2790
2791 BT_DBG("%s", hdev->name);
2792
cbed0ca1 2793 err = hci_dev_do_open(hdev);
96570ffc
JH
2794 if (err < 0) {
2795 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2796 return;
96570ffc 2797 }
ab81cbf9 2798
a5c8f270
MH
2799 /* During the HCI setup phase, a few error conditions are
2800 * ignored and they need to be checked now. If they are still
2801 * valid, it is important to turn the device back off.
2802 */
2803 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2804 (hdev->dev_type == HCI_BREDR &&
2805 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2806 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2807 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2808 hci_dev_do_close(hdev);
2809 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2810 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2811 HCI_AUTO_OFF_TIMEOUT);
bf543036 2812 }
ab81cbf9 2813
a8b2d5c2 2814 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2815 mgmt_index_added(hdev);
ab81cbf9
JH
2816}
2817
2818static void hci_power_off(struct work_struct *work)
2819{
3243553f 2820 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2821 power_off.work);
ab81cbf9
JH
2822
2823 BT_DBG("%s", hdev->name);
2824
8ee56540 2825 hci_dev_do_close(hdev);
ab81cbf9
JH
2826}
2827
16ab91ab
JH
2828static void hci_discov_off(struct work_struct *work)
2829{
2830 struct hci_dev *hdev;
16ab91ab
JH
2831
2832 hdev = container_of(work, struct hci_dev, discov_off.work);
2833
2834 BT_DBG("%s", hdev->name);
2835
d1967ff8 2836 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2837}
2838
35f7498a 2839void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2840{
4821002c 2841 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2842
4821002c
JH
2843 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2844 list_del(&uuid->list);
2aeb9a1a
JH
2845 kfree(uuid);
2846 }
2aeb9a1a
JH
2847}
2848
35f7498a 2849void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2850{
2851 struct list_head *p, *n;
2852
2853 list_for_each_safe(p, n, &hdev->link_keys) {
2854 struct link_key *key;
2855
2856 key = list_entry(p, struct link_key, list);
2857
2858 list_del(p);
2859 kfree(key);
2860 }
55ed8ca1
JH
2861}
2862
35f7498a 2863void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2864{
2865 struct smp_ltk *k, *tmp;
2866
2867 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2868 list_del(&k->list);
2869 kfree(k);
2870 }
b899efaf
VCG
2871}
2872
970c4e46
JH
2873void hci_smp_irks_clear(struct hci_dev *hdev)
2874{
2875 struct smp_irk *k, *tmp;
2876
2877 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2878 list_del(&k->list);
2879 kfree(k);
2880 }
2881}
2882
55ed8ca1
JH
2883struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2884{
8035ded4 2885 struct link_key *k;
55ed8ca1 2886
8035ded4 2887 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2888 if (bacmp(bdaddr, &k->bdaddr) == 0)
2889 return k;
55ed8ca1
JH
2890
2891 return NULL;
2892}
2893
745c0ce3 2894static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2895 u8 key_type, u8 old_key_type)
d25e28ab
JH
2896{
2897 /* Legacy key */
2898 if (key_type < 0x03)
745c0ce3 2899 return true;
d25e28ab
JH
2900
2901 /* Debug keys are insecure so don't store them persistently */
2902 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2903 return false;
d25e28ab
JH
2904
2905 /* Changed combination key and there's no previous one */
2906 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2907 return false;
d25e28ab
JH
2908
2909 /* Security mode 3 case */
2910 if (!conn)
745c0ce3 2911 return true;
d25e28ab
JH
2912
2913 /* Neither local nor remote side had no-bonding as requirement */
2914 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2915 return true;
d25e28ab
JH
2916
2917 /* Local side had dedicated bonding as requirement */
2918 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2919 return true;
d25e28ab
JH
2920
2921 /* Remote side had dedicated bonding as requirement */
2922 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2923 return true;
d25e28ab
JH
2924
2925 /* If none of the above criteria match, then don't store the key
2926 * persistently */
745c0ce3 2927 return false;
d25e28ab
JH
2928}
2929
98a0b845
JH
2930static bool ltk_type_master(u8 type)
2931{
d97c9fb0 2932 return (type == SMP_LTK);
98a0b845
JH
2933}
2934
fe39c7b2 2935struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
98a0b845 2936 bool master)
75d262c2 2937{
c9839a11 2938 struct smp_ltk *k;
75d262c2 2939
c9839a11 2940 list_for_each_entry(k, &hdev->long_term_keys, list) {
fe39c7b2 2941 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
2942 continue;
2943
98a0b845
JH
2944 if (ltk_type_master(k->type) != master)
2945 continue;
2946
c9839a11 2947 return k;
75d262c2
VCG
2948 }
2949
2950 return NULL;
2951}
75d262c2 2952
c9839a11 2953struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2954 u8 addr_type, bool master)
75d262c2 2955{
c9839a11 2956 struct smp_ltk *k;
75d262c2 2957
c9839a11
VCG
2958 list_for_each_entry(k, &hdev->long_term_keys, list)
2959 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2960 bacmp(bdaddr, &k->bdaddr) == 0 &&
2961 ltk_type_master(k->type) == master)
75d262c2
VCG
2962 return k;
2963
2964 return NULL;
2965}
75d262c2 2966
970c4e46
JH
2967struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2968{
2969 struct smp_irk *irk;
2970
2971 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2972 if (!bacmp(&irk->rpa, rpa))
2973 return irk;
2974 }
2975
2976 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2977 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2978 bacpy(&irk->rpa, rpa);
2979 return irk;
2980 }
2981 }
2982
2983 return NULL;
2984}
2985
2986struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2987 u8 addr_type)
2988{
2989 struct smp_irk *irk;
2990
6cfc9988
JH
2991 /* Identity Address must be public or static random */
2992 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2993 return NULL;
2994
970c4e46
JH
2995 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2996 if (addr_type == irk->addr_type &&
2997 bacmp(bdaddr, &irk->bdaddr) == 0)
2998 return irk;
2999 }
3000
3001 return NULL;
3002}
3003
567fa2aa
JH
3004struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3005 int new_key, bdaddr_t *bdaddr, u8 *val,
3006 u8 type, u8 pin_len)
55ed8ca1
JH
3007{
3008 struct link_key *key, *old_key;
745c0ce3
VA
3009 u8 old_key_type;
3010 bool persistent;
55ed8ca1
JH
3011
3012 old_key = hci_find_link_key(hdev, bdaddr);
3013 if (old_key) {
3014 old_key_type = old_key->type;
3015 key = old_key;
3016 } else {
12adcf3a 3017 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3018 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 3019 if (!key)
567fa2aa 3020 return NULL;
55ed8ca1
JH
3021 list_add(&key->list, &hdev->link_keys);
3022 }
3023
6ed93dc6 3024 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3025
d25e28ab
JH
3026 /* Some buggy controller combinations generate a changed
3027 * combination key for legacy pairing even when there's no
3028 * previous key */
3029 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3030 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3031 type = HCI_LK_COMBINATION;
655fe6ec
JH
3032 if (conn)
3033 conn->key_type = type;
3034 }
d25e28ab 3035
55ed8ca1 3036 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3037 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3038 key->pin_len = pin_len;
3039
b6020ba0 3040 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3041 key->type = old_key_type;
4748fed2
JH
3042 else
3043 key->type = type;
3044
4df378a1 3045 if (!new_key)
567fa2aa 3046 return key;
4df378a1
JH
3047
3048 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3049
744cf19e 3050 mgmt_new_link_key(hdev, key, persistent);
4df378a1 3051
6ec5bcad
VA
3052 if (conn)
3053 conn->flush_key = !persistent;
55ed8ca1 3054
567fa2aa 3055 return key;
55ed8ca1
JH
3056}
3057
ca9142b8 3058struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3059 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3060 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3061{
c9839a11 3062 struct smp_ltk *key, *old_key;
98a0b845 3063 bool master = ltk_type_master(type);
75d262c2 3064
98a0b845 3065 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 3066 if (old_key)
75d262c2 3067 key = old_key;
c9839a11 3068 else {
0a14ab41 3069 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3070 if (!key)
ca9142b8 3071 return NULL;
c9839a11 3072 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3073 }
3074
75d262c2 3075 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3076 key->bdaddr_type = addr_type;
3077 memcpy(key->val, tk, sizeof(key->val));
3078 key->authenticated = authenticated;
3079 key->ediv = ediv;
fe39c7b2 3080 key->rand = rand;
c9839a11
VCG
3081 key->enc_size = enc_size;
3082 key->type = type;
75d262c2 3083
ca9142b8 3084 return key;
75d262c2
VCG
3085}
3086
ca9142b8
JH
3087struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3088 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3089{
3090 struct smp_irk *irk;
3091
3092 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3093 if (!irk) {
3094 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3095 if (!irk)
ca9142b8 3096 return NULL;
970c4e46
JH
3097
3098 bacpy(&irk->bdaddr, bdaddr);
3099 irk->addr_type = addr_type;
3100
3101 list_add(&irk->list, &hdev->identity_resolving_keys);
3102 }
3103
3104 memcpy(irk->val, val, 16);
3105 bacpy(&irk->rpa, rpa);
3106
ca9142b8 3107 return irk;
970c4e46
JH
3108}
3109
55ed8ca1
JH
3110int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3111{
3112 struct link_key *key;
3113
3114 key = hci_find_link_key(hdev, bdaddr);
3115 if (!key)
3116 return -ENOENT;
3117
6ed93dc6 3118 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3119
3120 list_del(&key->list);
3121 kfree(key);
3122
3123 return 0;
3124}
3125
e0b2b27e 3126int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3127{
3128 struct smp_ltk *k, *tmp;
c51ffa0b 3129 int removed = 0;
b899efaf
VCG
3130
3131 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3132 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3133 continue;
3134
6ed93dc6 3135 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3136
3137 list_del(&k->list);
3138 kfree(k);
c51ffa0b 3139 removed++;
b899efaf
VCG
3140 }
3141
c51ffa0b 3142 return removed ? 0 : -ENOENT;
b899efaf
VCG
3143}
3144
a7ec7338
JH
3145void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3146{
3147 struct smp_irk *k, *tmp;
3148
668b7b19 3149 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3150 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3151 continue;
3152
3153 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3154
3155 list_del(&k->list);
3156 kfree(k);
3157 }
3158}
3159
6bd32326 3160/* HCI command timer function */
65cc2b49 3161static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3162{
65cc2b49
MH
3163 struct hci_dev *hdev = container_of(work, struct hci_dev,
3164 cmd_timer.work);
6bd32326 3165
bda4f23a
AE
3166 if (hdev->sent_cmd) {
3167 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3168 u16 opcode = __le16_to_cpu(sent->opcode);
3169
3170 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3171 } else {
3172 BT_ERR("%s command tx timeout", hdev->name);
3173 }
3174
6bd32326 3175 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3176 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3177}
3178
2763eda6 3179struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3180 bdaddr_t *bdaddr)
2763eda6
SJ
3181{
3182 struct oob_data *data;
3183
3184 list_for_each_entry(data, &hdev->remote_oob_data, list)
3185 if (bacmp(bdaddr, &data->bdaddr) == 0)
3186 return data;
3187
3188 return NULL;
3189}
3190
3191int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3192{
3193 struct oob_data *data;
3194
3195 data = hci_find_remote_oob_data(hdev, bdaddr);
3196 if (!data)
3197 return -ENOENT;
3198
6ed93dc6 3199 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3200
3201 list_del(&data->list);
3202 kfree(data);
3203
3204 return 0;
3205}
3206
35f7498a 3207void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3208{
3209 struct oob_data *data, *n;
3210
3211 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3212 list_del(&data->list);
3213 kfree(data);
3214 }
2763eda6
SJ
3215}
3216
0798872e
MH
3217int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3218 u8 *hash, u8 *randomizer)
2763eda6
SJ
3219{
3220 struct oob_data *data;
3221
3222 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3223 if (!data) {
0a14ab41 3224 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3225 if (!data)
3226 return -ENOMEM;
3227
3228 bacpy(&data->bdaddr, bdaddr);
3229 list_add(&data->list, &hdev->remote_oob_data);
3230 }
3231
519ca9d0
MH
3232 memcpy(data->hash192, hash, sizeof(data->hash192));
3233 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3234
0798872e
MH
3235 memset(data->hash256, 0, sizeof(data->hash256));
3236 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3237
3238 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3239
3240 return 0;
3241}
3242
3243int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3244 u8 *hash192, u8 *randomizer192,
3245 u8 *hash256, u8 *randomizer256)
3246{
3247 struct oob_data *data;
3248
3249 data = hci_find_remote_oob_data(hdev, bdaddr);
3250 if (!data) {
0a14ab41 3251 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3252 if (!data)
3253 return -ENOMEM;
3254
3255 bacpy(&data->bdaddr, bdaddr);
3256 list_add(&data->list, &hdev->remote_oob_data);
3257 }
3258
3259 memcpy(data->hash192, hash192, sizeof(data->hash192));
3260 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3261
3262 memcpy(data->hash256, hash256, sizeof(data->hash256));
3263 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3264
6ed93dc6 3265 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3266
3267 return 0;
3268}
3269
b9ee0a78
MH
3270struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3271 bdaddr_t *bdaddr, u8 type)
b2a66aad 3272{
8035ded4 3273 struct bdaddr_list *b;
b2a66aad 3274
b9ee0a78
MH
3275 list_for_each_entry(b, &hdev->blacklist, list) {
3276 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3277 return b;
b9ee0a78 3278 }
b2a66aad
AJ
3279
3280 return NULL;
3281}
3282
c9507490 3283static void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3284{
3285 struct list_head *p, *n;
3286
3287 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3288 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3289
3290 list_del(p);
3291 kfree(b);
3292 }
b2a66aad
AJ
3293}
3294
88c1fe4b 3295int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3296{
3297 struct bdaddr_list *entry;
b2a66aad 3298
b9ee0a78 3299 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3300 return -EBADF;
3301
b9ee0a78 3302 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3303 return -EEXIST;
b2a66aad
AJ
3304
3305 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3306 if (!entry)
3307 return -ENOMEM;
b2a66aad
AJ
3308
3309 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3310 entry->bdaddr_type = type;
b2a66aad
AJ
3311
3312 list_add(&entry->list, &hdev->blacklist);
3313
88c1fe4b 3314 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
3315}
3316
88c1fe4b 3317int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3318{
3319 struct bdaddr_list *entry;
b2a66aad 3320
35f7498a
JH
3321 if (!bacmp(bdaddr, BDADDR_ANY)) {
3322 hci_blacklist_clear(hdev);
3323 return 0;
3324 }
b2a66aad 3325
b9ee0a78 3326 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3327 if (!entry)
5e762444 3328 return -ENOENT;
b2a66aad
AJ
3329
3330 list_del(&entry->list);
3331 kfree(entry);
3332
88c1fe4b 3333 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
3334}
3335
d2ab0ac1
MH
3336struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3337 bdaddr_t *bdaddr, u8 type)
3338{
3339 struct bdaddr_list *b;
3340
3341 list_for_each_entry(b, &hdev->le_white_list, list) {
3342 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3343 return b;
3344 }
3345
3346 return NULL;
3347}
3348
3349void hci_white_list_clear(struct hci_dev *hdev)
3350{
3351 struct list_head *p, *n;
3352
3353 list_for_each_safe(p, n, &hdev->le_white_list) {
3354 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3355
3356 list_del(p);
3357 kfree(b);
3358 }
3359}
3360
3361int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3362{
3363 struct bdaddr_list *entry;
3364
3365 if (!bacmp(bdaddr, BDADDR_ANY))
3366 return -EBADF;
3367
3368 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3369 if (!entry)
3370 return -ENOMEM;
3371
3372 bacpy(&entry->bdaddr, bdaddr);
3373 entry->bdaddr_type = type;
3374
3375 list_add(&entry->list, &hdev->le_white_list);
3376
3377 return 0;
3378}
3379
3380int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3381{
3382 struct bdaddr_list *entry;
3383
3384 if (!bacmp(bdaddr, BDADDR_ANY))
3385 return -EBADF;
3386
3387 entry = hci_white_list_lookup(hdev, bdaddr, type);
3388 if (!entry)
3389 return -ENOENT;
3390
3391 list_del(&entry->list);
3392 kfree(entry);
3393
3394 return 0;
3395}
3396
15819a70
AG
3397/* This function requires the caller holds hdev->lock */
3398struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3399 bdaddr_t *addr, u8 addr_type)
3400{
3401 struct hci_conn_params *params;
3402
3403 list_for_each_entry(params, &hdev->le_conn_params, list) {
3404 if (bacmp(&params->addr, addr) == 0 &&
3405 params->addr_type == addr_type) {
3406 return params;
3407 }
3408 }
3409
3410 return NULL;
3411}
3412
cef952ce
AG
3413static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3414{
3415 struct hci_conn *conn;
3416
3417 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3418 if (!conn)
3419 return false;
3420
3421 if (conn->dst_type != type)
3422 return false;
3423
3424 if (conn->state != BT_CONNECTED)
3425 return false;
3426
3427 return true;
3428}
3429
a9b0a04c
AG
3430static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3431{
3432 if (addr_type == ADDR_LE_DEV_PUBLIC)
3433 return true;
3434
3435 /* Check for Random Static address type */
3436 if ((addr->b[5] & 0xc0) == 0xc0)
3437 return true;
3438
3439 return false;
3440}
3441
15819a70 3442/* This function requires the caller holds hdev->lock */
a9b0a04c
AG
3443int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3444 u8 auto_connect, u16 conn_min_interval,
3445 u16 conn_max_interval)
15819a70
AG
3446{
3447 struct hci_conn_params *params;
3448
a9b0a04c
AG
3449 if (!is_identity_address(addr, addr_type))
3450 return -EINVAL;
3451
15819a70 3452 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce
AG
3453 if (params)
3454 goto update;
15819a70
AG
3455
3456 params = kzalloc(sizeof(*params), GFP_KERNEL);
3457 if (!params) {
3458 BT_ERR("Out of memory");
a9b0a04c 3459 return -ENOMEM;
15819a70
AG
3460 }
3461
3462 bacpy(&params->addr, addr);
3463 params->addr_type = addr_type;
cef952ce
AG
3464
3465 list_add(&params->list, &hdev->le_conn_params);
3466
3467update:
15819a70
AG
3468 params->conn_min_interval = conn_min_interval;
3469 params->conn_max_interval = conn_max_interval;
9fcb18ef 3470 params->auto_connect = auto_connect;
15819a70 3471
cef952ce
AG
3472 switch (auto_connect) {
3473 case HCI_AUTO_CONN_DISABLED:
3474 case HCI_AUTO_CONN_LINK_LOSS:
3475 hci_pend_le_conn_del(hdev, addr, addr_type);
3476 break;
3477 case HCI_AUTO_CONN_ALWAYS:
3478 if (!is_connected(hdev, addr, addr_type))
3479 hci_pend_le_conn_add(hdev, addr, addr_type);
3480 break;
3481 }
15819a70 3482
9fcb18ef
AG
3483 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3484 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3485 conn_min_interval, conn_max_interval);
a9b0a04c
AG
3486
3487 return 0;
15819a70
AG
3488}
3489
3490/* This function requires the caller holds hdev->lock */
3491void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3492{
3493 struct hci_conn_params *params;
3494
3495 params = hci_conn_params_lookup(hdev, addr, addr_type);
3496 if (!params)
3497 return;
3498
cef952ce
AG
3499 hci_pend_le_conn_del(hdev, addr, addr_type);
3500
15819a70
AG
3501 list_del(&params->list);
3502 kfree(params);
3503
3504 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3505}
3506
3507/* This function requires the caller holds hdev->lock */
3508void hci_conn_params_clear(struct hci_dev *hdev)
3509{
3510 struct hci_conn_params *params, *tmp;
3511
3512 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3513 list_del(&params->list);
3514 kfree(params);
3515 }
3516
3517 BT_DBG("All LE connection parameters were removed");
3518}
3519
77a77a30
AG
3520/* This function requires the caller holds hdev->lock */
3521struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3522 bdaddr_t *addr, u8 addr_type)
3523{
3524 struct bdaddr_list *entry;
3525
3526 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3527 if (bacmp(&entry->bdaddr, addr) == 0 &&
3528 entry->bdaddr_type == addr_type)
3529 return entry;
3530 }
3531
3532 return NULL;
3533}
3534
3535/* This function requires the caller holds hdev->lock */
3536void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3537{
3538 struct bdaddr_list *entry;
3539
3540 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3541 if (entry)
a4790dbd 3542 goto done;
77a77a30
AG
3543
3544 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3545 if (!entry) {
3546 BT_ERR("Out of memory");
3547 return;
3548 }
3549
3550 bacpy(&entry->bdaddr, addr);
3551 entry->bdaddr_type = addr_type;
3552
3553 list_add(&entry->list, &hdev->pend_le_conns);
3554
3555 BT_DBG("addr %pMR (type %u)", addr, addr_type);
a4790dbd
AG
3556
3557done:
3558 hci_update_background_scan(hdev);
77a77a30
AG
3559}
3560
3561/* This function requires the caller holds hdev->lock */
3562void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3563{
3564 struct bdaddr_list *entry;
3565
3566 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3567 if (!entry)
a4790dbd 3568 goto done;
77a77a30
AG
3569
3570 list_del(&entry->list);
3571 kfree(entry);
3572
3573 BT_DBG("addr %pMR (type %u)", addr, addr_type);
a4790dbd
AG
3574
3575done:
3576 hci_update_background_scan(hdev);
77a77a30
AG
3577}
3578
3579/* This function requires the caller holds hdev->lock */
3580void hci_pend_le_conns_clear(struct hci_dev *hdev)
3581{
3582 struct bdaddr_list *entry, *tmp;
3583
3584 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3585 list_del(&entry->list);
3586 kfree(entry);
3587 }
3588
3589 BT_DBG("All LE pending connections cleared");
3590}
3591
4c87eaab 3592static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3593{
4c87eaab
AG
3594 if (status) {
3595 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3596
4c87eaab
AG
3597 hci_dev_lock(hdev);
3598 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3599 hci_dev_unlock(hdev);
3600 return;
3601 }
7ba8b4be
AG
3602}
3603
4c87eaab 3604static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3605{
4c87eaab
AG
3606 /* General inquiry access code (GIAC) */
3607 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3608 struct hci_request req;
3609 struct hci_cp_inquiry cp;
7ba8b4be
AG
3610 int err;
3611
4c87eaab
AG
3612 if (status) {
3613 BT_ERR("Failed to disable LE scanning: status %d", status);
3614 return;
3615 }
7ba8b4be 3616
4c87eaab
AG
3617 switch (hdev->discovery.type) {
3618 case DISCOV_TYPE_LE:
3619 hci_dev_lock(hdev);
3620 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3621 hci_dev_unlock(hdev);
3622 break;
7ba8b4be 3623
4c87eaab
AG
3624 case DISCOV_TYPE_INTERLEAVED:
3625 hci_req_init(&req, hdev);
7ba8b4be 3626
4c87eaab
AG
3627 memset(&cp, 0, sizeof(cp));
3628 memcpy(&cp.lap, lap, sizeof(cp.lap));
3629 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3630 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3631
4c87eaab 3632 hci_dev_lock(hdev);
7dbfac1d 3633
4c87eaab 3634 hci_inquiry_cache_flush(hdev);
7dbfac1d 3635
4c87eaab
AG
3636 err = hci_req_run(&req, inquiry_complete);
3637 if (err) {
3638 BT_ERR("Inquiry request failed: err %d", err);
3639 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3640 }
7dbfac1d 3641
4c87eaab
AG
3642 hci_dev_unlock(hdev);
3643 break;
7dbfac1d 3644 }
7dbfac1d
AG
3645}
3646
7ba8b4be
AG
3647static void le_scan_disable_work(struct work_struct *work)
3648{
3649 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3650 le_scan_disable.work);
4c87eaab
AG
3651 struct hci_request req;
3652 int err;
7ba8b4be
AG
3653
3654 BT_DBG("%s", hdev->name);
3655
4c87eaab 3656 hci_req_init(&req, hdev);
28b75a89 3657
b1efcc28 3658 hci_req_add_le_scan_disable(&req);
28b75a89 3659
4c87eaab
AG
3660 err = hci_req_run(&req, le_scan_disable_work_complete);
3661 if (err)
3662 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3663}
3664
8d97250e
JH
3665static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3666{
3667 struct hci_dev *hdev = req->hdev;
3668
3669 /* If we're advertising or initiating an LE connection we can't
3670 * go ahead and change the random address at this time. This is
3671 * because the eventual initiator address used for the
3672 * subsequently created connection will be undefined (some
3673 * controllers use the new address and others the one we had
3674 * when the operation started).
3675 *
3676 * In this kind of scenario skip the update and let the random
3677 * address be updated at the next cycle.
3678 */
3679 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3680 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3681 BT_DBG("Deferring random address update");
3682 return;
3683 }
3684
3685 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3686}
3687
94b1fc92
MH
3688int hci_update_random_address(struct hci_request *req, bool require_privacy,
3689 u8 *own_addr_type)
ebd3a747
JH
3690{
3691 struct hci_dev *hdev = req->hdev;
3692 int err;
3693
3694 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3695 * current RPA has expired or there is something else than
3696 * the current RPA in use, then generate a new one.
ebd3a747
JH
3697 */
3698 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3699 int to;
3700
3701 *own_addr_type = ADDR_LE_DEV_RANDOM;
3702
3703 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3704 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3705 return 0;
3706
2b5224dc 3707 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3708 if (err < 0) {
3709 BT_ERR("%s failed to generate new RPA", hdev->name);
3710 return err;
3711 }
3712
8d97250e 3713 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3714
3715 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3716 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3717
3718 return 0;
94b1fc92
MH
3719 }
3720
3721 /* In case of required privacy without resolvable private address,
3722 * use an unresolvable private address. This is useful for active
3723 * scanning and non-connectable advertising.
3724 */
3725 if (require_privacy) {
3726 bdaddr_t urpa;
3727
3728 get_random_bytes(&urpa, 6);
3729 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3730
3731 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3732 set_random_addr(req, &urpa);
94b1fc92 3733 return 0;
ebd3a747
JH
3734 }
3735
3736 /* If forcing static address is in use or there is no public
3737 * address use the static address as random address (but skip
3738 * the HCI command if the current random address is already the
3739 * static one.
3740 */
111902f7 3741 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3742 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3743 *own_addr_type = ADDR_LE_DEV_RANDOM;
3744 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3745 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3746 &hdev->static_addr);
3747 return 0;
3748 }
3749
3750 /* Neither privacy nor static address is being used so use a
3751 * public address.
3752 */
3753 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3754
3755 return 0;
3756}
3757
a1f4c318
JH
3758/* Copy the Identity Address of the controller.
3759 *
3760 * If the controller has a public BD_ADDR, then by default use that one.
3761 * If this is a LE only controller without a public address, default to
3762 * the static random address.
3763 *
3764 * For debugging purposes it is possible to force controllers with a
3765 * public address to use the static random address instead.
3766 */
3767void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3768 u8 *bdaddr_type)
3769{
111902f7 3770 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3771 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3772 bacpy(bdaddr, &hdev->static_addr);
3773 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3774 } else {
3775 bacpy(bdaddr, &hdev->bdaddr);
3776 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3777 }
3778}
3779
9be0dab7
DH
3780/* Alloc HCI device */
3781struct hci_dev *hci_alloc_dev(void)
3782{
3783 struct hci_dev *hdev;
3784
3785 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3786 if (!hdev)
3787 return NULL;
3788
b1b813d4
DH
3789 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3790 hdev->esco_type = (ESCO_HV1);
3791 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3792 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3793 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3794 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3795 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3796
b1b813d4
DH
3797 hdev->sniff_max_interval = 800;
3798 hdev->sniff_min_interval = 80;
3799
3f959d46 3800 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3801 hdev->le_scan_interval = 0x0060;
3802 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3803 hdev->le_conn_min_interval = 0x0028;
3804 hdev->le_conn_max_interval = 0x0038;
bef64738 3805
d6bfd59c 3806 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3807 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3808 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3809 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3810
b1b813d4
DH
3811 mutex_init(&hdev->lock);
3812 mutex_init(&hdev->req_lock);
3813
3814 INIT_LIST_HEAD(&hdev->mgmt_pending);
3815 INIT_LIST_HEAD(&hdev->blacklist);
3816 INIT_LIST_HEAD(&hdev->uuids);
3817 INIT_LIST_HEAD(&hdev->link_keys);
3818 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3819 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3820 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3821 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3822 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3823 INIT_LIST_HEAD(&hdev->pend_le_conns);
6b536b5e 3824 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3825
3826 INIT_WORK(&hdev->rx_work, hci_rx_work);
3827 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3828 INIT_WORK(&hdev->tx_work, hci_tx_work);
3829 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3830
b1b813d4
DH
3831 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3832 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3833 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3834
b1b813d4
DH
3835 skb_queue_head_init(&hdev->rx_q);
3836 skb_queue_head_init(&hdev->cmd_q);
3837 skb_queue_head_init(&hdev->raw_q);
3838
3839 init_waitqueue_head(&hdev->req_wait_q);
3840
65cc2b49 3841 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3842
b1b813d4
DH
3843 hci_init_sysfs(hdev);
3844 discovery_init(hdev);
9be0dab7
DH
3845
3846 return hdev;
3847}
3848EXPORT_SYMBOL(hci_alloc_dev);
3849
3850/* Free HCI device */
3851void hci_free_dev(struct hci_dev *hdev)
3852{
9be0dab7
DH
3853 /* will free via device release */
3854 put_device(&hdev->dev);
3855}
3856EXPORT_SYMBOL(hci_free_dev);
3857
1da177e4
LT
3858/* Register HCI device */
3859int hci_register_dev(struct hci_dev *hdev)
3860{
b1b813d4 3861 int id, error;
1da177e4 3862
010666a1 3863 if (!hdev->open || !hdev->close)
1da177e4
LT
3864 return -EINVAL;
3865
08add513
MM
3866 /* Do not allow HCI_AMP devices to register at index 0,
3867 * so the index can be used as the AMP controller ID.
3868 */
3df92b31
SL
3869 switch (hdev->dev_type) {
3870 case HCI_BREDR:
3871 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3872 break;
3873 case HCI_AMP:
3874 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3875 break;
3876 default:
3877 return -EINVAL;
1da177e4 3878 }
8e87d142 3879
3df92b31
SL
3880 if (id < 0)
3881 return id;
3882
1da177e4
LT
3883 sprintf(hdev->name, "hci%d", id);
3884 hdev->id = id;
2d8b3a11
AE
3885
3886 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3887
d8537548
KC
3888 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3889 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3890 if (!hdev->workqueue) {
3891 error = -ENOMEM;
3892 goto err;
3893 }
f48fd9c8 3894
d8537548
KC
3895 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3896 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3897 if (!hdev->req_workqueue) {
3898 destroy_workqueue(hdev->workqueue);
3899 error = -ENOMEM;
3900 goto err;
3901 }
3902
0153e2ec
MH
3903 if (!IS_ERR_OR_NULL(bt_debugfs))
3904 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3905
bdc3e0f1
MH
3906 dev_set_name(&hdev->dev, "%s", hdev->name);
3907
99780a7b
JH
3908 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3909 CRYPTO_ALG_ASYNC);
3910 if (IS_ERR(hdev->tfm_aes)) {
3911 BT_ERR("Unable to create crypto context");
3912 error = PTR_ERR(hdev->tfm_aes);
3913 hdev->tfm_aes = NULL;
3914 goto err_wqueue;
3915 }
3916
bdc3e0f1 3917 error = device_add(&hdev->dev);
33ca954d 3918 if (error < 0)
99780a7b 3919 goto err_tfm;
1da177e4 3920
611b30f7 3921 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3922 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3923 hdev);
611b30f7
MH
3924 if (hdev->rfkill) {
3925 if (rfkill_register(hdev->rfkill) < 0) {
3926 rfkill_destroy(hdev->rfkill);
3927 hdev->rfkill = NULL;
3928 }
3929 }
3930
5e130367
JH
3931 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3932 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3933
a8b2d5c2 3934 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3935 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3936
01cd3404 3937 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3938 /* Assume BR/EDR support until proven otherwise (such as
3939 * through reading supported features during init.
3940 */
3941 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3942 }
ce2be9ac 3943
fcee3377
GP
3944 write_lock(&hci_dev_list_lock);
3945 list_add(&hdev->list, &hci_dev_list);
3946 write_unlock(&hci_dev_list_lock);
3947
1da177e4 3948 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3949 hci_dev_hold(hdev);
1da177e4 3950
19202573 3951 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3952
1da177e4 3953 return id;
f48fd9c8 3954
99780a7b
JH
3955err_tfm:
3956 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3957err_wqueue:
3958 destroy_workqueue(hdev->workqueue);
6ead1bbc 3959 destroy_workqueue(hdev->req_workqueue);
33ca954d 3960err:
3df92b31 3961 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3962
33ca954d 3963 return error;
1da177e4
LT
3964}
3965EXPORT_SYMBOL(hci_register_dev);
3966
3967/* Unregister HCI device */
59735631 3968void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3969{
3df92b31 3970 int i, id;
ef222013 3971
c13854ce 3972 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3973
94324962
JH
3974 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3975
3df92b31
SL
3976 id = hdev->id;
3977
f20d09d5 3978 write_lock(&hci_dev_list_lock);
1da177e4 3979 list_del(&hdev->list);
f20d09d5 3980 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3981
3982 hci_dev_do_close(hdev);
3983
cd4c5391 3984 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3985 kfree_skb(hdev->reassembly[i]);
3986
b9b5ef18
GP
3987 cancel_work_sync(&hdev->power_on);
3988
ab81cbf9 3989 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3990 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3991 hci_dev_lock(hdev);
744cf19e 3992 mgmt_index_removed(hdev);
09fd0de5 3993 hci_dev_unlock(hdev);
56e5cb86 3994 }
ab81cbf9 3995
2e58ef3e
JH
3996 /* mgmt_index_removed should take care of emptying the
3997 * pending list */
3998 BUG_ON(!list_empty(&hdev->mgmt_pending));
3999
1da177e4
LT
4000 hci_notify(hdev, HCI_DEV_UNREG);
4001
611b30f7
MH
4002 if (hdev->rfkill) {
4003 rfkill_unregister(hdev->rfkill);
4004 rfkill_destroy(hdev->rfkill);
4005 }
4006
99780a7b
JH
4007 if (hdev->tfm_aes)
4008 crypto_free_blkcipher(hdev->tfm_aes);
4009
bdc3e0f1 4010 device_del(&hdev->dev);
147e2d59 4011
0153e2ec
MH
4012 debugfs_remove_recursive(hdev->debugfs);
4013
f48fd9c8 4014 destroy_workqueue(hdev->workqueue);
6ead1bbc 4015 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4016
09fd0de5 4017 hci_dev_lock(hdev);
e2e0cacb 4018 hci_blacklist_clear(hdev);
2aeb9a1a 4019 hci_uuids_clear(hdev);
55ed8ca1 4020 hci_link_keys_clear(hdev);
b899efaf 4021 hci_smp_ltks_clear(hdev);
970c4e46 4022 hci_smp_irks_clear(hdev);
2763eda6 4023 hci_remote_oob_data_clear(hdev);
d2ab0ac1 4024 hci_white_list_clear(hdev);
15819a70 4025 hci_conn_params_clear(hdev);
77a77a30 4026 hci_pend_le_conns_clear(hdev);
09fd0de5 4027 hci_dev_unlock(hdev);
e2e0cacb 4028
dc946bd8 4029 hci_dev_put(hdev);
3df92b31
SL
4030
4031 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4032}
4033EXPORT_SYMBOL(hci_unregister_dev);
4034
4035/* Suspend HCI device */
4036int hci_suspend_dev(struct hci_dev *hdev)
4037{
4038 hci_notify(hdev, HCI_DEV_SUSPEND);
4039 return 0;
4040}
4041EXPORT_SYMBOL(hci_suspend_dev);
4042
4043/* Resume HCI device */
4044int hci_resume_dev(struct hci_dev *hdev)
4045{
4046 hci_notify(hdev, HCI_DEV_RESUME);
4047 return 0;
4048}
4049EXPORT_SYMBOL(hci_resume_dev);
4050
76bca880 4051/* Receive frame from HCI drivers */
e1a26170 4052int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4053{
76bca880 4054 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4055 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4056 kfree_skb(skb);
4057 return -ENXIO;
4058 }
4059
d82603c6 4060 /* Incoming skb */
76bca880
MH
4061 bt_cb(skb)->incoming = 1;
4062
4063 /* Time stamp */
4064 __net_timestamp(skb);
4065
76bca880 4066 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4067 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4068
76bca880
MH
4069 return 0;
4070}
4071EXPORT_SYMBOL(hci_recv_frame);
4072
33e882a5 4073static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4074 int count, __u8 index)
33e882a5
SS
4075{
4076 int len = 0;
4077 int hlen = 0;
4078 int remain = count;
4079 struct sk_buff *skb;
4080 struct bt_skb_cb *scb;
4081
4082 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4083 index >= NUM_REASSEMBLY)
33e882a5
SS
4084 return -EILSEQ;
4085
4086 skb = hdev->reassembly[index];
4087
4088 if (!skb) {
4089 switch (type) {
4090 case HCI_ACLDATA_PKT:
4091 len = HCI_MAX_FRAME_SIZE;
4092 hlen = HCI_ACL_HDR_SIZE;
4093 break;
4094 case HCI_EVENT_PKT:
4095 len = HCI_MAX_EVENT_SIZE;
4096 hlen = HCI_EVENT_HDR_SIZE;
4097 break;
4098 case HCI_SCODATA_PKT:
4099 len = HCI_MAX_SCO_SIZE;
4100 hlen = HCI_SCO_HDR_SIZE;
4101 break;
4102 }
4103
1e429f38 4104 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4105 if (!skb)
4106 return -ENOMEM;
4107
4108 scb = (void *) skb->cb;
4109 scb->expect = hlen;
4110 scb->pkt_type = type;
4111
33e882a5
SS
4112 hdev->reassembly[index] = skb;
4113 }
4114
4115 while (count) {
4116 scb = (void *) skb->cb;
89bb46d0 4117 len = min_t(uint, scb->expect, count);
33e882a5
SS
4118
4119 memcpy(skb_put(skb, len), data, len);
4120
4121 count -= len;
4122 data += len;
4123 scb->expect -= len;
4124 remain = count;
4125
4126 switch (type) {
4127 case HCI_EVENT_PKT:
4128 if (skb->len == HCI_EVENT_HDR_SIZE) {
4129 struct hci_event_hdr *h = hci_event_hdr(skb);
4130 scb->expect = h->plen;
4131
4132 if (skb_tailroom(skb) < scb->expect) {
4133 kfree_skb(skb);
4134 hdev->reassembly[index] = NULL;
4135 return -ENOMEM;
4136 }
4137 }
4138 break;
4139
4140 case HCI_ACLDATA_PKT:
4141 if (skb->len == HCI_ACL_HDR_SIZE) {
4142 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4143 scb->expect = __le16_to_cpu(h->dlen);
4144
4145 if (skb_tailroom(skb) < scb->expect) {
4146 kfree_skb(skb);
4147 hdev->reassembly[index] = NULL;
4148 return -ENOMEM;
4149 }
4150 }
4151 break;
4152
4153 case HCI_SCODATA_PKT:
4154 if (skb->len == HCI_SCO_HDR_SIZE) {
4155 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4156 scb->expect = h->dlen;
4157
4158 if (skb_tailroom(skb) < scb->expect) {
4159 kfree_skb(skb);
4160 hdev->reassembly[index] = NULL;
4161 return -ENOMEM;
4162 }
4163 }
4164 break;
4165 }
4166
4167 if (scb->expect == 0) {
4168 /* Complete frame */
4169
4170 bt_cb(skb)->pkt_type = type;
e1a26170 4171 hci_recv_frame(hdev, skb);
33e882a5
SS
4172
4173 hdev->reassembly[index] = NULL;
4174 return remain;
4175 }
4176 }
4177
4178 return remain;
4179}
4180
ef222013
MH
4181int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4182{
f39a3c06
SS
4183 int rem = 0;
4184
ef222013
MH
4185 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4186 return -EILSEQ;
4187
da5f6c37 4188 while (count) {
1e429f38 4189 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4190 if (rem < 0)
4191 return rem;
ef222013 4192
f39a3c06
SS
4193 data += (count - rem);
4194 count = rem;
f81c6224 4195 }
ef222013 4196
f39a3c06 4197 return rem;
ef222013
MH
4198}
4199EXPORT_SYMBOL(hci_recv_fragment);
4200
99811510
SS
4201#define STREAM_REASSEMBLY 0
4202
4203int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4204{
4205 int type;
4206 int rem = 0;
4207
da5f6c37 4208 while (count) {
99811510
SS
4209 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4210
4211 if (!skb) {
4212 struct { char type; } *pkt;
4213
4214 /* Start of the frame */
4215 pkt = data;
4216 type = pkt->type;
4217
4218 data++;
4219 count--;
4220 } else
4221 type = bt_cb(skb)->pkt_type;
4222
1e429f38 4223 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4224 STREAM_REASSEMBLY);
99811510
SS
4225 if (rem < 0)
4226 return rem;
4227
4228 data += (count - rem);
4229 count = rem;
f81c6224 4230 }
99811510
SS
4231
4232 return rem;
4233}
4234EXPORT_SYMBOL(hci_recv_stream_fragment);
4235
1da177e4
LT
4236/* ---- Interface to upper protocols ---- */
4237
1da177e4
LT
4238int hci_register_cb(struct hci_cb *cb)
4239{
4240 BT_DBG("%p name %s", cb, cb->name);
4241
f20d09d5 4242 write_lock(&hci_cb_list_lock);
1da177e4 4243 list_add(&cb->list, &hci_cb_list);
f20d09d5 4244 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4245
4246 return 0;
4247}
4248EXPORT_SYMBOL(hci_register_cb);
4249
4250int hci_unregister_cb(struct hci_cb *cb)
4251{
4252 BT_DBG("%p name %s", cb, cb->name);
4253
f20d09d5 4254 write_lock(&hci_cb_list_lock);
1da177e4 4255 list_del(&cb->list);
f20d09d5 4256 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4257
4258 return 0;
4259}
4260EXPORT_SYMBOL(hci_unregister_cb);
4261
51086991 4262static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4263{
0d48d939 4264 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4265
cd82e61c
MH
4266 /* Time stamp */
4267 __net_timestamp(skb);
1da177e4 4268
cd82e61c
MH
4269 /* Send copy to monitor */
4270 hci_send_to_monitor(hdev, skb);
4271
4272 if (atomic_read(&hdev->promisc)) {
4273 /* Send copy to the sockets */
470fe1b5 4274 hci_send_to_sock(hdev, skb);
1da177e4
LT
4275 }
4276
4277 /* Get rid of skb owner, prior to sending to the driver. */
4278 skb_orphan(skb);
4279
7bd8f09f 4280 if (hdev->send(hdev, skb) < 0)
51086991 4281 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
4282}
4283
3119ae95
JH
4284void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4285{
4286 skb_queue_head_init(&req->cmd_q);
4287 req->hdev = hdev;
5d73e034 4288 req->err = 0;
3119ae95
JH
4289}
4290
4291int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4292{
4293 struct hci_dev *hdev = req->hdev;
4294 struct sk_buff *skb;
4295 unsigned long flags;
4296
4297 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4298
5d73e034
AG
4299 /* If an error occured during request building, remove all HCI
4300 * commands queued on the HCI request queue.
4301 */
4302 if (req->err) {
4303 skb_queue_purge(&req->cmd_q);
4304 return req->err;
4305 }
4306
3119ae95
JH
4307 /* Do not allow empty requests */
4308 if (skb_queue_empty(&req->cmd_q))
382b0c39 4309 return -ENODATA;
3119ae95
JH
4310
4311 skb = skb_peek_tail(&req->cmd_q);
4312 bt_cb(skb)->req.complete = complete;
4313
4314 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4315 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4316 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4317
4318 queue_work(hdev->workqueue, &hdev->cmd_work);
4319
4320 return 0;
4321}
4322
1ca3a9d0 4323static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4324 u32 plen, const void *param)
1da177e4
LT
4325{
4326 int len = HCI_COMMAND_HDR_SIZE + plen;
4327 struct hci_command_hdr *hdr;
4328 struct sk_buff *skb;
4329
1da177e4 4330 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4331 if (!skb)
4332 return NULL;
1da177e4
LT
4333
4334 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4335 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4336 hdr->plen = plen;
4337
4338 if (plen)
4339 memcpy(skb_put(skb, plen), param, plen);
4340
4341 BT_DBG("skb len %d", skb->len);
4342
0d48d939 4343 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4344
1ca3a9d0
JH
4345 return skb;
4346}
4347
4348/* Send HCI command */
07dc93dd
JH
4349int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4350 const void *param)
1ca3a9d0
JH
4351{
4352 struct sk_buff *skb;
4353
4354 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4355
4356 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4357 if (!skb) {
4358 BT_ERR("%s no memory for command", hdev->name);
4359 return -ENOMEM;
4360 }
4361
11714b3d
JH
4362 /* Stand-alone HCI commands must be flaged as
4363 * single-command requests.
4364 */
4365 bt_cb(skb)->req.start = true;
4366
1da177e4 4367 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4368 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4369
4370 return 0;
4371}
1da177e4 4372
71c76a17 4373/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4374void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4375 const void *param, u8 event)
71c76a17
JH
4376{
4377 struct hci_dev *hdev = req->hdev;
4378 struct sk_buff *skb;
4379
4380 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4381
34739c1e
AG
4382 /* If an error occured during request building, there is no point in
4383 * queueing the HCI command. We can simply return.
4384 */
4385 if (req->err)
4386 return;
4387
71c76a17
JH
4388 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4389 if (!skb) {
5d73e034
AG
4390 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4391 hdev->name, opcode);
4392 req->err = -ENOMEM;
e348fe6b 4393 return;
71c76a17
JH
4394 }
4395
4396 if (skb_queue_empty(&req->cmd_q))
4397 bt_cb(skb)->req.start = true;
4398
02350a72
JH
4399 bt_cb(skb)->req.event = event;
4400
71c76a17 4401 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4402}
4403
07dc93dd
JH
4404void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4405 const void *param)
02350a72
JH
4406{
4407 hci_req_add_ev(req, opcode, plen, param, 0);
4408}
4409
1da177e4 4410/* Get data from the previously sent command */
a9de9248 4411void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4412{
4413 struct hci_command_hdr *hdr;
4414
4415 if (!hdev->sent_cmd)
4416 return NULL;
4417
4418 hdr = (void *) hdev->sent_cmd->data;
4419
a9de9248 4420 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4421 return NULL;
4422
f0e09510 4423 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4424
4425 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4426}
4427
4428/* Send ACL data */
4429static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4430{
4431 struct hci_acl_hdr *hdr;
4432 int len = skb->len;
4433
badff6d0
ACM
4434 skb_push(skb, HCI_ACL_HDR_SIZE);
4435 skb_reset_transport_header(skb);
9c70220b 4436 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4437 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4438 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4439}
4440
ee22be7e 4441static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4442 struct sk_buff *skb, __u16 flags)
1da177e4 4443{
ee22be7e 4444 struct hci_conn *conn = chan->conn;
1da177e4
LT
4445 struct hci_dev *hdev = conn->hdev;
4446 struct sk_buff *list;
4447
087bfd99
GP
4448 skb->len = skb_headlen(skb);
4449 skb->data_len = 0;
4450
4451 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4452
4453 switch (hdev->dev_type) {
4454 case HCI_BREDR:
4455 hci_add_acl_hdr(skb, conn->handle, flags);
4456 break;
4457 case HCI_AMP:
4458 hci_add_acl_hdr(skb, chan->handle, flags);
4459 break;
4460 default:
4461 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4462 return;
4463 }
087bfd99 4464
70f23020
AE
4465 list = skb_shinfo(skb)->frag_list;
4466 if (!list) {
1da177e4
LT
4467 /* Non fragmented */
4468 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4469
73d80deb 4470 skb_queue_tail(queue, skb);
1da177e4
LT
4471 } else {
4472 /* Fragmented */
4473 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4474
4475 skb_shinfo(skb)->frag_list = NULL;
4476
4477 /* Queue all fragments atomically */
af3e6359 4478 spin_lock(&queue->lock);
1da177e4 4479
73d80deb 4480 __skb_queue_tail(queue, skb);
e702112f
AE
4481
4482 flags &= ~ACL_START;
4483 flags |= ACL_CONT;
1da177e4
LT
4484 do {
4485 skb = list; list = list->next;
8e87d142 4486
0d48d939 4487 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4488 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4489
4490 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4491
73d80deb 4492 __skb_queue_tail(queue, skb);
1da177e4
LT
4493 } while (list);
4494
af3e6359 4495 spin_unlock(&queue->lock);
1da177e4 4496 }
73d80deb
LAD
4497}
4498
4499void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4500{
ee22be7e 4501 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4502
f0e09510 4503 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4504
ee22be7e 4505 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4506
3eff45ea 4507 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4508}
1da177e4
LT
4509
4510/* Send SCO data */
0d861d8b 4511void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4512{
4513 struct hci_dev *hdev = conn->hdev;
4514 struct hci_sco_hdr hdr;
4515
4516 BT_DBG("%s len %d", hdev->name, skb->len);
4517
aca3192c 4518 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4519 hdr.dlen = skb->len;
4520
badff6d0
ACM
4521 skb_push(skb, HCI_SCO_HDR_SIZE);
4522 skb_reset_transport_header(skb);
9c70220b 4523 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4524
0d48d939 4525 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4526
1da177e4 4527 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4528 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4529}
1da177e4
LT
4530
4531/* ---- HCI TX task (outgoing data) ---- */
4532
4533/* HCI Connection scheduler */
6039aa73
GP
4534static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4535 int *quote)
1da177e4
LT
4536{
4537 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4538 struct hci_conn *conn = NULL, *c;
abc5de8f 4539 unsigned int num = 0, min = ~0;
1da177e4 4540
8e87d142 4541 /* We don't have to lock device here. Connections are always
1da177e4 4542 * added and removed with TX task disabled. */
bf4c6325
GP
4543
4544 rcu_read_lock();
4545
4546 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4547 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4548 continue;
769be974
MH
4549
4550 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4551 continue;
4552
1da177e4
LT
4553 num++;
4554
4555 if (c->sent < min) {
4556 min = c->sent;
4557 conn = c;
4558 }
52087a79
LAD
4559
4560 if (hci_conn_num(hdev, type) == num)
4561 break;
1da177e4
LT
4562 }
4563
bf4c6325
GP
4564 rcu_read_unlock();
4565
1da177e4 4566 if (conn) {
6ed58ec5
VT
4567 int cnt, q;
4568
4569 switch (conn->type) {
4570 case ACL_LINK:
4571 cnt = hdev->acl_cnt;
4572 break;
4573 case SCO_LINK:
4574 case ESCO_LINK:
4575 cnt = hdev->sco_cnt;
4576 break;
4577 case LE_LINK:
4578 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4579 break;
4580 default:
4581 cnt = 0;
4582 BT_ERR("Unknown link type");
4583 }
4584
4585 q = cnt / num;
1da177e4
LT
4586 *quote = q ? q : 1;
4587 } else
4588 *quote = 0;
4589
4590 BT_DBG("conn %p quote %d", conn, *quote);
4591 return conn;
4592}
4593
6039aa73 4594static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4595{
4596 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4597 struct hci_conn *c;
1da177e4 4598
bae1f5d9 4599 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4600
bf4c6325
GP
4601 rcu_read_lock();
4602
1da177e4 4603 /* Kill stalled connections */
bf4c6325 4604 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4605 if (c->type == type && c->sent) {
6ed93dc6
AE
4606 BT_ERR("%s killing stalled connection %pMR",
4607 hdev->name, &c->dst);
bed71748 4608 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4609 }
4610 }
bf4c6325
GP
4611
4612 rcu_read_unlock();
1da177e4
LT
4613}
4614
6039aa73
GP
4615static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4616 int *quote)
1da177e4 4617{
73d80deb
LAD
4618 struct hci_conn_hash *h = &hdev->conn_hash;
4619 struct hci_chan *chan = NULL;
abc5de8f 4620 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4621 struct hci_conn *conn;
73d80deb
LAD
4622 int cnt, q, conn_num = 0;
4623
4624 BT_DBG("%s", hdev->name);
4625
bf4c6325
GP
4626 rcu_read_lock();
4627
4628 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4629 struct hci_chan *tmp;
4630
4631 if (conn->type != type)
4632 continue;
4633
4634 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4635 continue;
4636
4637 conn_num++;
4638
8192edef 4639 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4640 struct sk_buff *skb;
4641
4642 if (skb_queue_empty(&tmp->data_q))
4643 continue;
4644
4645 skb = skb_peek(&tmp->data_q);
4646 if (skb->priority < cur_prio)
4647 continue;
4648
4649 if (skb->priority > cur_prio) {
4650 num = 0;
4651 min = ~0;
4652 cur_prio = skb->priority;
4653 }
4654
4655 num++;
4656
4657 if (conn->sent < min) {
4658 min = conn->sent;
4659 chan = tmp;
4660 }
4661 }
4662
4663 if (hci_conn_num(hdev, type) == conn_num)
4664 break;
4665 }
4666
bf4c6325
GP
4667 rcu_read_unlock();
4668
73d80deb
LAD
4669 if (!chan)
4670 return NULL;
4671
4672 switch (chan->conn->type) {
4673 case ACL_LINK:
4674 cnt = hdev->acl_cnt;
4675 break;
bd1eb66b
AE
4676 case AMP_LINK:
4677 cnt = hdev->block_cnt;
4678 break;
73d80deb
LAD
4679 case SCO_LINK:
4680 case ESCO_LINK:
4681 cnt = hdev->sco_cnt;
4682 break;
4683 case LE_LINK:
4684 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4685 break;
4686 default:
4687 cnt = 0;
4688 BT_ERR("Unknown link type");
4689 }
4690
4691 q = cnt / num;
4692 *quote = q ? q : 1;
4693 BT_DBG("chan %p quote %d", chan, *quote);
4694 return chan;
4695}
4696
02b20f0b
LAD
4697static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4698{
4699 struct hci_conn_hash *h = &hdev->conn_hash;
4700 struct hci_conn *conn;
4701 int num = 0;
4702
4703 BT_DBG("%s", hdev->name);
4704
bf4c6325
GP
4705 rcu_read_lock();
4706
4707 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4708 struct hci_chan *chan;
4709
4710 if (conn->type != type)
4711 continue;
4712
4713 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4714 continue;
4715
4716 num++;
4717
8192edef 4718 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4719 struct sk_buff *skb;
4720
4721 if (chan->sent) {
4722 chan->sent = 0;
4723 continue;
4724 }
4725
4726 if (skb_queue_empty(&chan->data_q))
4727 continue;
4728
4729 skb = skb_peek(&chan->data_q);
4730 if (skb->priority >= HCI_PRIO_MAX - 1)
4731 continue;
4732
4733 skb->priority = HCI_PRIO_MAX - 1;
4734
4735 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4736 skb->priority);
02b20f0b
LAD
4737 }
4738
4739 if (hci_conn_num(hdev, type) == num)
4740 break;
4741 }
bf4c6325
GP
4742
4743 rcu_read_unlock();
4744
02b20f0b
LAD
4745}
4746
b71d385a
AE
4747static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4748{
4749 /* Calculate count of blocks used by this packet */
4750 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4751}
4752
6039aa73 4753static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4754{
1da177e4
LT
4755 if (!test_bit(HCI_RAW, &hdev->flags)) {
4756 /* ACL tx timeout must be longer than maximum
4757 * link supervision timeout (40.9 seconds) */
63d2bc1b 4758 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4759 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4760 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4761 }
63d2bc1b 4762}
1da177e4 4763
6039aa73 4764static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4765{
4766 unsigned int cnt = hdev->acl_cnt;
4767 struct hci_chan *chan;
4768 struct sk_buff *skb;
4769 int quote;
4770
4771 __check_timeout(hdev, cnt);
04837f64 4772
73d80deb 4773 while (hdev->acl_cnt &&
a8c5fb1a 4774 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4775 u32 priority = (skb_peek(&chan->data_q))->priority;
4776 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4777 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4778 skb->len, skb->priority);
73d80deb 4779
ec1cce24
LAD
4780 /* Stop if priority has changed */
4781 if (skb->priority < priority)
4782 break;
4783
4784 skb = skb_dequeue(&chan->data_q);
4785
73d80deb 4786 hci_conn_enter_active_mode(chan->conn,
04124681 4787 bt_cb(skb)->force_active);
04837f64 4788
57d17d70 4789 hci_send_frame(hdev, skb);
1da177e4
LT
4790 hdev->acl_last_tx = jiffies;
4791
4792 hdev->acl_cnt--;
73d80deb
LAD
4793 chan->sent++;
4794 chan->conn->sent++;
1da177e4
LT
4795 }
4796 }
02b20f0b
LAD
4797
4798 if (cnt != hdev->acl_cnt)
4799 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4800}
4801
6039aa73 4802static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4803{
63d2bc1b 4804 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4805 struct hci_chan *chan;
4806 struct sk_buff *skb;
4807 int quote;
bd1eb66b 4808 u8 type;
b71d385a 4809
63d2bc1b 4810 __check_timeout(hdev, cnt);
b71d385a 4811
bd1eb66b
AE
4812 BT_DBG("%s", hdev->name);
4813
4814 if (hdev->dev_type == HCI_AMP)
4815 type = AMP_LINK;
4816 else
4817 type = ACL_LINK;
4818
b71d385a 4819 while (hdev->block_cnt > 0 &&
bd1eb66b 4820 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4821 u32 priority = (skb_peek(&chan->data_q))->priority;
4822 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4823 int blocks;
4824
4825 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4826 skb->len, skb->priority);
b71d385a
AE
4827
4828 /* Stop if priority has changed */
4829 if (skb->priority < priority)
4830 break;
4831
4832 skb = skb_dequeue(&chan->data_q);
4833
4834 blocks = __get_blocks(hdev, skb);
4835 if (blocks > hdev->block_cnt)
4836 return;
4837
4838 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4839 bt_cb(skb)->force_active);
b71d385a 4840
57d17d70 4841 hci_send_frame(hdev, skb);
b71d385a
AE
4842 hdev->acl_last_tx = jiffies;
4843
4844 hdev->block_cnt -= blocks;
4845 quote -= blocks;
4846
4847 chan->sent += blocks;
4848 chan->conn->sent += blocks;
4849 }
4850 }
4851
4852 if (cnt != hdev->block_cnt)
bd1eb66b 4853 hci_prio_recalculate(hdev, type);
b71d385a
AE
4854}
4855
6039aa73 4856static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4857{
4858 BT_DBG("%s", hdev->name);
4859
bd1eb66b
AE
4860 /* No ACL link over BR/EDR controller */
4861 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4862 return;
4863
4864 /* No AMP link over AMP controller */
4865 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4866 return;
4867
4868 switch (hdev->flow_ctl_mode) {
4869 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4870 hci_sched_acl_pkt(hdev);
4871 break;
4872
4873 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4874 hci_sched_acl_blk(hdev);
4875 break;
4876 }
4877}
4878
1da177e4 4879/* Schedule SCO */
6039aa73 4880static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4881{
4882 struct hci_conn *conn;
4883 struct sk_buff *skb;
4884 int quote;
4885
4886 BT_DBG("%s", hdev->name);
4887
52087a79
LAD
4888 if (!hci_conn_num(hdev, SCO_LINK))
4889 return;
4890
1da177e4
LT
4891 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4892 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4893 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4894 hci_send_frame(hdev, skb);
1da177e4
LT
4895
4896 conn->sent++;
4897 if (conn->sent == ~0)
4898 conn->sent = 0;
4899 }
4900 }
4901}
4902
6039aa73 4903static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4904{
4905 struct hci_conn *conn;
4906 struct sk_buff *skb;
4907 int quote;
4908
4909 BT_DBG("%s", hdev->name);
4910
52087a79
LAD
4911 if (!hci_conn_num(hdev, ESCO_LINK))
4912 return;
4913
8fc9ced3
GP
4914 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4915 &quote))) {
b6a0dc82
MH
4916 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4917 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4918 hci_send_frame(hdev, skb);
b6a0dc82
MH
4919
4920 conn->sent++;
4921 if (conn->sent == ~0)
4922 conn->sent = 0;
4923 }
4924 }
4925}
4926
6039aa73 4927static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4928{
73d80deb 4929 struct hci_chan *chan;
6ed58ec5 4930 struct sk_buff *skb;
02b20f0b 4931 int quote, cnt, tmp;
6ed58ec5
VT
4932
4933 BT_DBG("%s", hdev->name);
4934
52087a79
LAD
4935 if (!hci_conn_num(hdev, LE_LINK))
4936 return;
4937
6ed58ec5
VT
4938 if (!test_bit(HCI_RAW, &hdev->flags)) {
4939 /* LE tx timeout must be longer than maximum
4940 * link supervision timeout (40.9 seconds) */
bae1f5d9 4941 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4942 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4943 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4944 }
4945
4946 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4947 tmp = cnt;
73d80deb 4948 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4949 u32 priority = (skb_peek(&chan->data_q))->priority;
4950 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4951 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4952 skb->len, skb->priority);
6ed58ec5 4953
ec1cce24
LAD
4954 /* Stop if priority has changed */
4955 if (skb->priority < priority)
4956 break;
4957
4958 skb = skb_dequeue(&chan->data_q);
4959
57d17d70 4960 hci_send_frame(hdev, skb);
6ed58ec5
VT
4961 hdev->le_last_tx = jiffies;
4962
4963 cnt--;
73d80deb
LAD
4964 chan->sent++;
4965 chan->conn->sent++;
6ed58ec5
VT
4966 }
4967 }
73d80deb 4968
6ed58ec5
VT
4969 if (hdev->le_pkts)
4970 hdev->le_cnt = cnt;
4971 else
4972 hdev->acl_cnt = cnt;
02b20f0b
LAD
4973
4974 if (cnt != tmp)
4975 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4976}
4977
3eff45ea 4978static void hci_tx_work(struct work_struct *work)
1da177e4 4979{
3eff45ea 4980 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4981 struct sk_buff *skb;
4982
6ed58ec5 4983 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4984 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4985
52de599e
MH
4986 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4987 /* Schedule queues and send stuff to HCI driver */
4988 hci_sched_acl(hdev);
4989 hci_sched_sco(hdev);
4990 hci_sched_esco(hdev);
4991 hci_sched_le(hdev);
4992 }
6ed58ec5 4993
1da177e4
LT
4994 /* Send next queued raw (unknown type) packet */
4995 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4996 hci_send_frame(hdev, skb);
1da177e4
LT
4997}
4998
25985edc 4999/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5000
5001/* ACL data packet */
6039aa73 5002static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5003{
5004 struct hci_acl_hdr *hdr = (void *) skb->data;
5005 struct hci_conn *conn;
5006 __u16 handle, flags;
5007
5008 skb_pull(skb, HCI_ACL_HDR_SIZE);
5009
5010 handle = __le16_to_cpu(hdr->handle);
5011 flags = hci_flags(handle);
5012 handle = hci_handle(handle);
5013
f0e09510 5014 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5015 handle, flags);
1da177e4
LT
5016
5017 hdev->stat.acl_rx++;
5018
5019 hci_dev_lock(hdev);
5020 conn = hci_conn_hash_lookup_handle(hdev, handle);
5021 hci_dev_unlock(hdev);
8e87d142 5022
1da177e4 5023 if (conn) {
65983fc7 5024 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5025
1da177e4 5026 /* Send to upper protocol */
686ebf28
UF
5027 l2cap_recv_acldata(conn, skb, flags);
5028 return;
1da177e4 5029 } else {
8e87d142 5030 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5031 hdev->name, handle);
1da177e4
LT
5032 }
5033
5034 kfree_skb(skb);
5035}
5036
5037/* SCO data packet */
6039aa73 5038static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5039{
5040 struct hci_sco_hdr *hdr = (void *) skb->data;
5041 struct hci_conn *conn;
5042 __u16 handle;
5043
5044 skb_pull(skb, HCI_SCO_HDR_SIZE);
5045
5046 handle = __le16_to_cpu(hdr->handle);
5047
f0e09510 5048 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5049
5050 hdev->stat.sco_rx++;
5051
5052 hci_dev_lock(hdev);
5053 conn = hci_conn_hash_lookup_handle(hdev, handle);
5054 hci_dev_unlock(hdev);
5055
5056 if (conn) {
1da177e4 5057 /* Send to upper protocol */
686ebf28
UF
5058 sco_recv_scodata(conn, skb);
5059 return;
1da177e4 5060 } else {
8e87d142 5061 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5062 hdev->name, handle);
1da177e4
LT
5063 }
5064
5065 kfree_skb(skb);
5066}
5067
9238f36a
JH
5068static bool hci_req_is_complete(struct hci_dev *hdev)
5069{
5070 struct sk_buff *skb;
5071
5072 skb = skb_peek(&hdev->cmd_q);
5073 if (!skb)
5074 return true;
5075
5076 return bt_cb(skb)->req.start;
5077}
5078
42c6b129
JH
5079static void hci_resend_last(struct hci_dev *hdev)
5080{
5081 struct hci_command_hdr *sent;
5082 struct sk_buff *skb;
5083 u16 opcode;
5084
5085 if (!hdev->sent_cmd)
5086 return;
5087
5088 sent = (void *) hdev->sent_cmd->data;
5089 opcode = __le16_to_cpu(sent->opcode);
5090 if (opcode == HCI_OP_RESET)
5091 return;
5092
5093 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5094 if (!skb)
5095 return;
5096
5097 skb_queue_head(&hdev->cmd_q, skb);
5098 queue_work(hdev->workqueue, &hdev->cmd_work);
5099}
5100
9238f36a
JH
5101void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5102{
5103 hci_req_complete_t req_complete = NULL;
5104 struct sk_buff *skb;
5105 unsigned long flags;
5106
5107 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5108
42c6b129
JH
5109 /* If the completed command doesn't match the last one that was
5110 * sent we need to do special handling of it.
9238f36a 5111 */
42c6b129
JH
5112 if (!hci_sent_cmd_data(hdev, opcode)) {
5113 /* Some CSR based controllers generate a spontaneous
5114 * reset complete event during init and any pending
5115 * command will never be completed. In such a case we
5116 * need to resend whatever was the last sent
5117 * command.
5118 */
5119 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5120 hci_resend_last(hdev);
5121
9238f36a 5122 return;
42c6b129 5123 }
9238f36a
JH
5124
5125 /* If the command succeeded and there's still more commands in
5126 * this request the request is not yet complete.
5127 */
5128 if (!status && !hci_req_is_complete(hdev))
5129 return;
5130
5131 /* If this was the last command in a request the complete
5132 * callback would be found in hdev->sent_cmd instead of the
5133 * command queue (hdev->cmd_q).
5134 */
5135 if (hdev->sent_cmd) {
5136 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5137
5138 if (req_complete) {
5139 /* We must set the complete callback to NULL to
5140 * avoid calling the callback more than once if
5141 * this function gets called again.
5142 */
5143 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5144
9238f36a 5145 goto call_complete;
53e21fbc 5146 }
9238f36a
JH
5147 }
5148
5149 /* Remove all pending commands belonging to this request */
5150 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5151 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5152 if (bt_cb(skb)->req.start) {
5153 __skb_queue_head(&hdev->cmd_q, skb);
5154 break;
5155 }
5156
5157 req_complete = bt_cb(skb)->req.complete;
5158 kfree_skb(skb);
5159 }
5160 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5161
5162call_complete:
5163 if (req_complete)
5164 req_complete(hdev, status);
5165}
5166
b78752cc 5167static void hci_rx_work(struct work_struct *work)
1da177e4 5168{
b78752cc 5169 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5170 struct sk_buff *skb;
5171
5172 BT_DBG("%s", hdev->name);
5173
1da177e4 5174 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5175 /* Send copy to monitor */
5176 hci_send_to_monitor(hdev, skb);
5177
1da177e4
LT
5178 if (atomic_read(&hdev->promisc)) {
5179 /* Send copy to the sockets */
470fe1b5 5180 hci_send_to_sock(hdev, skb);
1da177e4
LT
5181 }
5182
0736cfa8
MH
5183 if (test_bit(HCI_RAW, &hdev->flags) ||
5184 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5185 kfree_skb(skb);
5186 continue;
5187 }
5188
5189 if (test_bit(HCI_INIT, &hdev->flags)) {
5190 /* Don't process data packets in this states. */
0d48d939 5191 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5192 case HCI_ACLDATA_PKT:
5193 case HCI_SCODATA_PKT:
5194 kfree_skb(skb);
5195 continue;
3ff50b79 5196 }
1da177e4
LT
5197 }
5198
5199 /* Process frame */
0d48d939 5200 switch (bt_cb(skb)->pkt_type) {
1da177e4 5201 case HCI_EVENT_PKT:
b78752cc 5202 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5203 hci_event_packet(hdev, skb);
5204 break;
5205
5206 case HCI_ACLDATA_PKT:
5207 BT_DBG("%s ACL data packet", hdev->name);
5208 hci_acldata_packet(hdev, skb);
5209 break;
5210
5211 case HCI_SCODATA_PKT:
5212 BT_DBG("%s SCO data packet", hdev->name);
5213 hci_scodata_packet(hdev, skb);
5214 break;
5215
5216 default:
5217 kfree_skb(skb);
5218 break;
5219 }
5220 }
1da177e4
LT
5221}
5222
c347b765 5223static void hci_cmd_work(struct work_struct *work)
1da177e4 5224{
c347b765 5225 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5226 struct sk_buff *skb;
5227
2104786b
AE
5228 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5229 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5230
1da177e4 5231 /* Send queued commands */
5a08ecce
AE
5232 if (atomic_read(&hdev->cmd_cnt)) {
5233 skb = skb_dequeue(&hdev->cmd_q);
5234 if (!skb)
5235 return;
5236
7585b97a 5237 kfree_skb(hdev->sent_cmd);
1da177e4 5238
a675d7f1 5239 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5240 if (hdev->sent_cmd) {
1da177e4 5241 atomic_dec(&hdev->cmd_cnt);
57d17d70 5242 hci_send_frame(hdev, skb);
7bdb8a5c 5243 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5244 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5245 else
65cc2b49
MH
5246 schedule_delayed_work(&hdev->cmd_timer,
5247 HCI_CMD_TIMEOUT);
1da177e4
LT
5248 } else {
5249 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5250 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5251 }
5252 }
5253}
b1efcc28
AG
5254
5255void hci_req_add_le_scan_disable(struct hci_request *req)
5256{
5257 struct hci_cp_le_set_scan_enable cp;
5258
5259 memset(&cp, 0, sizeof(cp));
5260 cp.enable = LE_SCAN_DISABLE;
5261 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5262}
a4790dbd 5263
8ef30fd3
AG
5264void hci_req_add_le_passive_scan(struct hci_request *req)
5265{
5266 struct hci_cp_le_set_scan_param param_cp;
5267 struct hci_cp_le_set_scan_enable enable_cp;
5268 struct hci_dev *hdev = req->hdev;
5269 u8 own_addr_type;
5270
5271 /* Set require_privacy to true to avoid identification from
5272 * unknown peer devices. Since this is passive scanning, no
5273 * SCAN_REQ using the local identity should be sent. Mandating
5274 * privacy is just an extra precaution.
5275 */
5276 if (hci_update_random_address(req, true, &own_addr_type))
5277 return;
5278
5279 memset(&param_cp, 0, sizeof(param_cp));
5280 param_cp.type = LE_SCAN_PASSIVE;
5281 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5282 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5283 param_cp.own_address_type = own_addr_type;
5284 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5285 &param_cp);
5286
5287 memset(&enable_cp, 0, sizeof(enable_cp));
5288 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5289 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5290 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5291 &enable_cp);
5292}
5293
a4790dbd
AG
5294static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5295{
5296 if (status)
5297 BT_DBG("HCI request failed to update background scanning: "
5298 "status 0x%2.2x", status);
5299}
5300
5301/* This function controls the background scanning based on hdev->pend_le_conns
5302 * list. If there are pending LE connection we start the background scanning,
5303 * otherwise we stop it.
5304 *
5305 * This function requires the caller holds hdev->lock.
5306 */
5307void hci_update_background_scan(struct hci_dev *hdev)
5308{
a4790dbd
AG
5309 struct hci_request req;
5310 struct hci_conn *conn;
5311 int err;
5312
5313 hci_req_init(&req, hdev);
5314
5315 if (list_empty(&hdev->pend_le_conns)) {
5316 /* If there is no pending LE connections, we should stop
5317 * the background scanning.
5318 */
5319
5320 /* If controller is not scanning we are done. */
5321 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5322 return;
5323
5324 hci_req_add_le_scan_disable(&req);
5325
5326 BT_DBG("%s stopping background scanning", hdev->name);
5327 } else {
a4790dbd
AG
5328 /* If there is at least one pending LE connection, we should
5329 * keep the background scan running.
5330 */
5331
a4790dbd
AG
5332 /* If controller is connecting, we should not start scanning
5333 * since some controllers are not able to scan and connect at
5334 * the same time.
5335 */
5336 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5337 if (conn)
5338 return;
5339
4340a124
AG
5340 /* If controller is currently scanning, we stop it to ensure we
5341 * don't miss any advertising (due to duplicates filter).
5342 */
5343 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5344 hci_req_add_le_scan_disable(&req);
5345
8ef30fd3 5346 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5347
5348 BT_DBG("%s starting background scanning", hdev->name);
5349 }
5350
5351 err = hci_req_run(&req, update_background_scan_complete);
5352 if (err)
5353 BT_ERR("Failed to run HCI request: err %d", err);
5354}