Merge git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next
[linux-2.6-block.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37
38 #include "smp.h"
39
40 static void hci_rx_work(struct work_struct *work);
41 static void hci_cmd_work(struct work_struct *work);
42 static void hci_tx_work(struct work_struct *work);
43
44 /* HCI device list */
45 LIST_HEAD(hci_dev_list);
46 DEFINE_RWLOCK(hci_dev_list_lock);
47
48 /* HCI callback list */
49 LIST_HEAD(hci_cb_list);
50 DEFINE_RWLOCK(hci_cb_list_lock);
51
52 /* HCI ID Numbering */
53 static DEFINE_IDA(hci_index_ida);
54
55 /* ---- HCI notifications ---- */
56
57 static void hci_notify(struct hci_dev *hdev, int event)
58 {
59         hci_sock_dev_event(hdev, event);
60 }
61
62 /* ---- HCI debugfs entries ---- */
63
64 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65                              size_t count, loff_t *ppos)
66 {
67         struct hci_dev *hdev = file->private_data;
68         char buf[3];
69
70         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71         buf[1] = '\n';
72         buf[2] = '\0';
73         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74 }
75
76 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77                               size_t count, loff_t *ppos)
78 {
79         struct hci_dev *hdev = file->private_data;
80         struct sk_buff *skb;
81         char buf[32];
82         size_t buf_size = min(count, (sizeof(buf)-1));
83         bool enable;
84         int err;
85
86         if (!test_bit(HCI_UP, &hdev->flags))
87                 return -ENETDOWN;
88
89         if (copy_from_user(buf, user_buf, buf_size))
90                 return -EFAULT;
91
92         buf[buf_size] = '\0';
93         if (strtobool(buf, &enable))
94                 return -EINVAL;
95
96         if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97                 return -EALREADY;
98
99         hci_req_lock(hdev);
100         if (enable)
101                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         else
104                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105                                      HCI_CMD_TIMEOUT);
106         hci_req_unlock(hdev);
107
108         if (IS_ERR(skb))
109                 return PTR_ERR(skb);
110
111         err = -bt_to_errno(skb->data[0]);
112         kfree_skb(skb);
113
114         if (err < 0)
115                 return err;
116
117         change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119         return count;
120 }
121
122 static const struct file_operations dut_mode_fops = {
123         .open           = simple_open,
124         .read           = dut_mode_read,
125         .write          = dut_mode_write,
126         .llseek         = default_llseek,
127 };
128
129 static int features_show(struct seq_file *f, void *ptr)
130 {
131         struct hci_dev *hdev = f->private;
132         u8 p;
133
134         hci_dev_lock(hdev);
135         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
136                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
137                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138                            hdev->features[p][0], hdev->features[p][1],
139                            hdev->features[p][2], hdev->features[p][3],
140                            hdev->features[p][4], hdev->features[p][5],
141                            hdev->features[p][6], hdev->features[p][7]);
142         }
143         if (lmp_le_capable(hdev))
144                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146                            hdev->le_features[0], hdev->le_features[1],
147                            hdev->le_features[2], hdev->le_features[3],
148                            hdev->le_features[4], hdev->le_features[5],
149                            hdev->le_features[6], hdev->le_features[7]);
150         hci_dev_unlock(hdev);
151
152         return 0;
153 }
154
155 static int features_open(struct inode *inode, struct file *file)
156 {
157         return single_open(file, features_show, inode->i_private);
158 }
159
160 static const struct file_operations features_fops = {
161         .open           = features_open,
162         .read           = seq_read,
163         .llseek         = seq_lseek,
164         .release        = single_release,
165 };
166
167 static int blacklist_show(struct seq_file *f, void *p)
168 {
169         struct hci_dev *hdev = f->private;
170         struct bdaddr_list *b;
171
172         hci_dev_lock(hdev);
173         list_for_each_entry(b, &hdev->blacklist, list)
174                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
175         hci_dev_unlock(hdev);
176
177         return 0;
178 }
179
180 static int blacklist_open(struct inode *inode, struct file *file)
181 {
182         return single_open(file, blacklist_show, inode->i_private);
183 }
184
185 static const struct file_operations blacklist_fops = {
186         .open           = blacklist_open,
187         .read           = seq_read,
188         .llseek         = seq_lseek,
189         .release        = single_release,
190 };
191
192 static int uuids_show(struct seq_file *f, void *p)
193 {
194         struct hci_dev *hdev = f->private;
195         struct bt_uuid *uuid;
196
197         hci_dev_lock(hdev);
198         list_for_each_entry(uuid, &hdev->uuids, list) {
199                 u8 i, val[16];
200
201                 /* The Bluetooth UUID values are stored in big endian,
202                  * but with reversed byte order. So convert them into
203                  * the right order for the %pUb modifier.
204                  */
205                 for (i = 0; i < 16; i++)
206                         val[i] = uuid->uuid[15 - i];
207
208                 seq_printf(f, "%pUb\n", val);
209         }
210         hci_dev_unlock(hdev);
211
212         return 0;
213 }
214
215 static int uuids_open(struct inode *inode, struct file *file)
216 {
217         return single_open(file, uuids_show, inode->i_private);
218 }
219
220 static const struct file_operations uuids_fops = {
221         .open           = uuids_open,
222         .read           = seq_read,
223         .llseek         = seq_lseek,
224         .release        = single_release,
225 };
226
227 static int inquiry_cache_show(struct seq_file *f, void *p)
228 {
229         struct hci_dev *hdev = f->private;
230         struct discovery_state *cache = &hdev->discovery;
231         struct inquiry_entry *e;
232
233         hci_dev_lock(hdev);
234
235         list_for_each_entry(e, &cache->all, all) {
236                 struct inquiry_data *data = &e->data;
237                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238                            &data->bdaddr,
239                            data->pscan_rep_mode, data->pscan_period_mode,
240                            data->pscan_mode, data->dev_class[2],
241                            data->dev_class[1], data->dev_class[0],
242                            __le16_to_cpu(data->clock_offset),
243                            data->rssi, data->ssp_mode, e->timestamp);
244         }
245
246         hci_dev_unlock(hdev);
247
248         return 0;
249 }
250
251 static int inquiry_cache_open(struct inode *inode, struct file *file)
252 {
253         return single_open(file, inquiry_cache_show, inode->i_private);
254 }
255
256 static const struct file_operations inquiry_cache_fops = {
257         .open           = inquiry_cache_open,
258         .read           = seq_read,
259         .llseek         = seq_lseek,
260         .release        = single_release,
261 };
262
263 static int link_keys_show(struct seq_file *f, void *ptr)
264 {
265         struct hci_dev *hdev = f->private;
266         struct list_head *p, *n;
267
268         hci_dev_lock(hdev);
269         list_for_each_safe(p, n, &hdev->link_keys) {
270                 struct link_key *key = list_entry(p, struct link_key, list);
271                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273         }
274         hci_dev_unlock(hdev);
275
276         return 0;
277 }
278
279 static int link_keys_open(struct inode *inode, struct file *file)
280 {
281         return single_open(file, link_keys_show, inode->i_private);
282 }
283
284 static const struct file_operations link_keys_fops = {
285         .open           = link_keys_open,
286         .read           = seq_read,
287         .llseek         = seq_lseek,
288         .release        = single_release,
289 };
290
291 static int dev_class_show(struct seq_file *f, void *ptr)
292 {
293         struct hci_dev *hdev = f->private;
294
295         hci_dev_lock(hdev);
296         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297                    hdev->dev_class[1], hdev->dev_class[0]);
298         hci_dev_unlock(hdev);
299
300         return 0;
301 }
302
303 static int dev_class_open(struct inode *inode, struct file *file)
304 {
305         return single_open(file, dev_class_show, inode->i_private);
306 }
307
308 static const struct file_operations dev_class_fops = {
309         .open           = dev_class_open,
310         .read           = seq_read,
311         .llseek         = seq_lseek,
312         .release        = single_release,
313 };
314
315 static int voice_setting_get(void *data, u64 *val)
316 {
317         struct hci_dev *hdev = data;
318
319         hci_dev_lock(hdev);
320         *val = hdev->voice_setting;
321         hci_dev_unlock(hdev);
322
323         return 0;
324 }
325
326 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327                         NULL, "0x%4.4llx\n");
328
329 static int auto_accept_delay_set(void *data, u64 val)
330 {
331         struct hci_dev *hdev = data;
332
333         hci_dev_lock(hdev);
334         hdev->auto_accept_delay = val;
335         hci_dev_unlock(hdev);
336
337         return 0;
338 }
339
340 static int auto_accept_delay_get(void *data, u64 *val)
341 {
342         struct hci_dev *hdev = data;
343
344         hci_dev_lock(hdev);
345         *val = hdev->auto_accept_delay;
346         hci_dev_unlock(hdev);
347
348         return 0;
349 }
350
351 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352                         auto_accept_delay_set, "%llu\n");
353
354 static int ssp_debug_mode_set(void *data, u64 val)
355 {
356         struct hci_dev *hdev = data;
357         struct sk_buff *skb;
358         __u8 mode;
359         int err;
360
361         if (val != 0 && val != 1)
362                 return -EINVAL;
363
364         if (!test_bit(HCI_UP, &hdev->flags))
365                 return -ENETDOWN;
366
367         hci_req_lock(hdev);
368         mode = val;
369         skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370                              &mode, HCI_CMD_TIMEOUT);
371         hci_req_unlock(hdev);
372
373         if (IS_ERR(skb))
374                 return PTR_ERR(skb);
375
376         err = -bt_to_errno(skb->data[0]);
377         kfree_skb(skb);
378
379         if (err < 0)
380                 return err;
381
382         hci_dev_lock(hdev);
383         hdev->ssp_debug_mode = val;
384         hci_dev_unlock(hdev);
385
386         return 0;
387 }
388
389 static int ssp_debug_mode_get(void *data, u64 *val)
390 {
391         struct hci_dev *hdev = data;
392
393         hci_dev_lock(hdev);
394         *val = hdev->ssp_debug_mode;
395         hci_dev_unlock(hdev);
396
397         return 0;
398 }
399
400 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401                         ssp_debug_mode_set, "%llu\n");
402
403 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404                                      size_t count, loff_t *ppos)
405 {
406         struct hci_dev *hdev = file->private_data;
407         char buf[3];
408
409         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410         buf[1] = '\n';
411         buf[2] = '\0';
412         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413 }
414
415 static ssize_t force_sc_support_write(struct file *file,
416                                       const char __user *user_buf,
417                                       size_t count, loff_t *ppos)
418 {
419         struct hci_dev *hdev = file->private_data;
420         char buf[32];
421         size_t buf_size = min(count, (sizeof(buf)-1));
422         bool enable;
423
424         if (test_bit(HCI_UP, &hdev->flags))
425                 return -EBUSY;
426
427         if (copy_from_user(buf, user_buf, buf_size))
428                 return -EFAULT;
429
430         buf[buf_size] = '\0';
431         if (strtobool(buf, &enable))
432                 return -EINVAL;
433
434         if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435                 return -EALREADY;
436
437         change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439         return count;
440 }
441
442 static const struct file_operations force_sc_support_fops = {
443         .open           = simple_open,
444         .read           = force_sc_support_read,
445         .write          = force_sc_support_write,
446         .llseek         = default_llseek,
447 };
448
449 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450                                  size_t count, loff_t *ppos)
451 {
452         struct hci_dev *hdev = file->private_data;
453         char buf[3];
454
455         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456         buf[1] = '\n';
457         buf[2] = '\0';
458         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459 }
460
461 static const struct file_operations sc_only_mode_fops = {
462         .open           = simple_open,
463         .read           = sc_only_mode_read,
464         .llseek         = default_llseek,
465 };
466
467 static int idle_timeout_set(void *data, u64 val)
468 {
469         struct hci_dev *hdev = data;
470
471         if (val != 0 && (val < 500 || val > 3600000))
472                 return -EINVAL;
473
474         hci_dev_lock(hdev);
475         hdev->idle_timeout = val;
476         hci_dev_unlock(hdev);
477
478         return 0;
479 }
480
481 static int idle_timeout_get(void *data, u64 *val)
482 {
483         struct hci_dev *hdev = data;
484
485         hci_dev_lock(hdev);
486         *val = hdev->idle_timeout;
487         hci_dev_unlock(hdev);
488
489         return 0;
490 }
491
492 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493                         idle_timeout_set, "%llu\n");
494
495 static int rpa_timeout_set(void *data, u64 val)
496 {
497         struct hci_dev *hdev = data;
498
499         /* Require the RPA timeout to be at least 30 seconds and at most
500          * 24 hours.
501          */
502         if (val < 30 || val > (60 * 60 * 24))
503                 return -EINVAL;
504
505         hci_dev_lock(hdev);
506         hdev->rpa_timeout = val;
507         hci_dev_unlock(hdev);
508
509         return 0;
510 }
511
512 static int rpa_timeout_get(void *data, u64 *val)
513 {
514         struct hci_dev *hdev = data;
515
516         hci_dev_lock(hdev);
517         *val = hdev->rpa_timeout;
518         hci_dev_unlock(hdev);
519
520         return 0;
521 }
522
523 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524                         rpa_timeout_set, "%llu\n");
525
526 static int sniff_min_interval_set(void *data, u64 val)
527 {
528         struct hci_dev *hdev = data;
529
530         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531                 return -EINVAL;
532
533         hci_dev_lock(hdev);
534         hdev->sniff_min_interval = val;
535         hci_dev_unlock(hdev);
536
537         return 0;
538 }
539
540 static int sniff_min_interval_get(void *data, u64 *val)
541 {
542         struct hci_dev *hdev = data;
543
544         hci_dev_lock(hdev);
545         *val = hdev->sniff_min_interval;
546         hci_dev_unlock(hdev);
547
548         return 0;
549 }
550
551 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552                         sniff_min_interval_set, "%llu\n");
553
554 static int sniff_max_interval_set(void *data, u64 val)
555 {
556         struct hci_dev *hdev = data;
557
558         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559                 return -EINVAL;
560
561         hci_dev_lock(hdev);
562         hdev->sniff_max_interval = val;
563         hci_dev_unlock(hdev);
564
565         return 0;
566 }
567
568 static int sniff_max_interval_get(void *data, u64 *val)
569 {
570         struct hci_dev *hdev = data;
571
572         hci_dev_lock(hdev);
573         *val = hdev->sniff_max_interval;
574         hci_dev_unlock(hdev);
575
576         return 0;
577 }
578
579 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580                         sniff_max_interval_set, "%llu\n");
581
582 static int identity_show(struct seq_file *f, void *p)
583 {
584         struct hci_dev *hdev = f->private;
585         bdaddr_t addr;
586         u8 addr_type;
587
588         hci_dev_lock(hdev);
589
590         hci_copy_identity_address(hdev, &addr, &addr_type);
591
592         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
593                    16, hdev->irk, &hdev->rpa);
594
595         hci_dev_unlock(hdev);
596
597         return 0;
598 }
599
600 static int identity_open(struct inode *inode, struct file *file)
601 {
602         return single_open(file, identity_show, inode->i_private);
603 }
604
605 static const struct file_operations identity_fops = {
606         .open           = identity_open,
607         .read           = seq_read,
608         .llseek         = seq_lseek,
609         .release        = single_release,
610 };
611
612 static int random_address_show(struct seq_file *f, void *p)
613 {
614         struct hci_dev *hdev = f->private;
615
616         hci_dev_lock(hdev);
617         seq_printf(f, "%pMR\n", &hdev->random_addr);
618         hci_dev_unlock(hdev);
619
620         return 0;
621 }
622
623 static int random_address_open(struct inode *inode, struct file *file)
624 {
625         return single_open(file, random_address_show, inode->i_private);
626 }
627
628 static const struct file_operations random_address_fops = {
629         .open           = random_address_open,
630         .read           = seq_read,
631         .llseek         = seq_lseek,
632         .release        = single_release,
633 };
634
635 static int static_address_show(struct seq_file *f, void *p)
636 {
637         struct hci_dev *hdev = f->private;
638
639         hci_dev_lock(hdev);
640         seq_printf(f, "%pMR\n", &hdev->static_addr);
641         hci_dev_unlock(hdev);
642
643         return 0;
644 }
645
646 static int static_address_open(struct inode *inode, struct file *file)
647 {
648         return single_open(file, static_address_show, inode->i_private);
649 }
650
651 static const struct file_operations static_address_fops = {
652         .open           = static_address_open,
653         .read           = seq_read,
654         .llseek         = seq_lseek,
655         .release        = single_release,
656 };
657
658 static ssize_t force_static_address_read(struct file *file,
659                                          char __user *user_buf,
660                                          size_t count, loff_t *ppos)
661 {
662         struct hci_dev *hdev = file->private_data;
663         char buf[3];
664
665         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
666         buf[1] = '\n';
667         buf[2] = '\0';
668         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
669 }
670
671 static ssize_t force_static_address_write(struct file *file,
672                                           const char __user *user_buf,
673                                           size_t count, loff_t *ppos)
674 {
675         struct hci_dev *hdev = file->private_data;
676         char buf[32];
677         size_t buf_size = min(count, (sizeof(buf)-1));
678         bool enable;
679
680         if (test_bit(HCI_UP, &hdev->flags))
681                 return -EBUSY;
682
683         if (copy_from_user(buf, user_buf, buf_size))
684                 return -EFAULT;
685
686         buf[buf_size] = '\0';
687         if (strtobool(buf, &enable))
688                 return -EINVAL;
689
690         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
691                 return -EALREADY;
692
693         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
694
695         return count;
696 }
697
698 static const struct file_operations force_static_address_fops = {
699         .open           = simple_open,
700         .read           = force_static_address_read,
701         .write          = force_static_address_write,
702         .llseek         = default_llseek,
703 };
704
705 static int white_list_show(struct seq_file *f, void *ptr)
706 {
707         struct hci_dev *hdev = f->private;
708         struct bdaddr_list *b;
709
710         hci_dev_lock(hdev);
711         list_for_each_entry(b, &hdev->le_white_list, list)
712                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
713         hci_dev_unlock(hdev);
714
715         return 0;
716 }
717
718 static int white_list_open(struct inode *inode, struct file *file)
719 {
720         return single_open(file, white_list_show, inode->i_private);
721 }
722
723 static const struct file_operations white_list_fops = {
724         .open           = white_list_open,
725         .read           = seq_read,
726         .llseek         = seq_lseek,
727         .release        = single_release,
728 };
729
730 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
731 {
732         struct hci_dev *hdev = f->private;
733         struct list_head *p, *n;
734
735         hci_dev_lock(hdev);
736         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
737                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
738                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
739                            &irk->bdaddr, irk->addr_type,
740                            16, irk->val, &irk->rpa);
741         }
742         hci_dev_unlock(hdev);
743
744         return 0;
745 }
746
747 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
748 {
749         return single_open(file, identity_resolving_keys_show,
750                            inode->i_private);
751 }
752
753 static const struct file_operations identity_resolving_keys_fops = {
754         .open           = identity_resolving_keys_open,
755         .read           = seq_read,
756         .llseek         = seq_lseek,
757         .release        = single_release,
758 };
759
760 static int long_term_keys_show(struct seq_file *f, void *ptr)
761 {
762         struct hci_dev *hdev = f->private;
763         struct list_head *p, *n;
764
765         hci_dev_lock(hdev);
766         list_for_each_safe(p, n, &hdev->long_term_keys) {
767                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
768                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
769                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
770                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
771                            __le64_to_cpu(ltk->rand), 16, ltk->val);
772         }
773         hci_dev_unlock(hdev);
774
775         return 0;
776 }
777
778 static int long_term_keys_open(struct inode *inode, struct file *file)
779 {
780         return single_open(file, long_term_keys_show, inode->i_private);
781 }
782
783 static const struct file_operations long_term_keys_fops = {
784         .open           = long_term_keys_open,
785         .read           = seq_read,
786         .llseek         = seq_lseek,
787         .release        = single_release,
788 };
789
790 static int conn_min_interval_set(void *data, u64 val)
791 {
792         struct hci_dev *hdev = data;
793
794         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
795                 return -EINVAL;
796
797         hci_dev_lock(hdev);
798         hdev->le_conn_min_interval = val;
799         hci_dev_unlock(hdev);
800
801         return 0;
802 }
803
804 static int conn_min_interval_get(void *data, u64 *val)
805 {
806         struct hci_dev *hdev = data;
807
808         hci_dev_lock(hdev);
809         *val = hdev->le_conn_min_interval;
810         hci_dev_unlock(hdev);
811
812         return 0;
813 }
814
815 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
816                         conn_min_interval_set, "%llu\n");
817
818 static int conn_max_interval_set(void *data, u64 val)
819 {
820         struct hci_dev *hdev = data;
821
822         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
823                 return -EINVAL;
824
825         hci_dev_lock(hdev);
826         hdev->le_conn_max_interval = val;
827         hci_dev_unlock(hdev);
828
829         return 0;
830 }
831
832 static int conn_max_interval_get(void *data, u64 *val)
833 {
834         struct hci_dev *hdev = data;
835
836         hci_dev_lock(hdev);
837         *val = hdev->le_conn_max_interval;
838         hci_dev_unlock(hdev);
839
840         return 0;
841 }
842
843 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
844                         conn_max_interval_set, "%llu\n");
845
846 static int adv_channel_map_set(void *data, u64 val)
847 {
848         struct hci_dev *hdev = data;
849
850         if (val < 0x01 || val > 0x07)
851                 return -EINVAL;
852
853         hci_dev_lock(hdev);
854         hdev->le_adv_channel_map = val;
855         hci_dev_unlock(hdev);
856
857         return 0;
858 }
859
860 static int adv_channel_map_get(void *data, u64 *val)
861 {
862         struct hci_dev *hdev = data;
863
864         hci_dev_lock(hdev);
865         *val = hdev->le_adv_channel_map;
866         hci_dev_unlock(hdev);
867
868         return 0;
869 }
870
871 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
872                         adv_channel_map_set, "%llu\n");
873
874 static ssize_t lowpan_read(struct file *file, char __user *user_buf,
875                            size_t count, loff_t *ppos)
876 {
877         struct hci_dev *hdev = file->private_data;
878         char buf[3];
879
880         buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
881         buf[1] = '\n';
882         buf[2] = '\0';
883         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
884 }
885
886 static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
887                             size_t count, loff_t *position)
888 {
889         struct hci_dev *hdev = fp->private_data;
890         bool enable;
891         char buf[32];
892         size_t buf_size = min(count, (sizeof(buf)-1));
893
894         if (copy_from_user(buf, user_buffer, buf_size))
895                 return -EFAULT;
896
897         buf[buf_size] = '\0';
898
899         if (strtobool(buf, &enable) < 0)
900                 return -EINVAL;
901
902         if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
903                 return -EALREADY;
904
905         change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
906
907         return count;
908 }
909
910 static const struct file_operations lowpan_debugfs_fops = {
911         .open           = simple_open,
912         .read           = lowpan_read,
913         .write          = lowpan_write,
914         .llseek         = default_llseek,
915 };
916
917 static int le_auto_conn_show(struct seq_file *sf, void *ptr)
918 {
919         struct hci_dev *hdev = sf->private;
920         struct hci_conn_params *p;
921
922         hci_dev_lock(hdev);
923
924         list_for_each_entry(p, &hdev->le_conn_params, list) {
925                 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
926                            p->auto_connect);
927         }
928
929         hci_dev_unlock(hdev);
930
931         return 0;
932 }
933
934 static int le_auto_conn_open(struct inode *inode, struct file *file)
935 {
936         return single_open(file, le_auto_conn_show, inode->i_private);
937 }
938
939 static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
940                                   size_t count, loff_t *offset)
941 {
942         struct seq_file *sf = file->private_data;
943         struct hci_dev *hdev = sf->private;
944         u8 auto_connect = 0;
945         bdaddr_t addr;
946         u8 addr_type;
947         char *buf;
948         int err = 0;
949         int n;
950
951         /* Don't allow partial write */
952         if (*offset != 0)
953                 return -EINVAL;
954
955         if (count < 3)
956                 return -EINVAL;
957
958         buf = memdup_user(data, count);
959         if (IS_ERR(buf))
960                 return PTR_ERR(buf);
961
962         if (memcmp(buf, "add", 3) == 0) {
963                 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
964                            &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
965                            &addr.b[1], &addr.b[0], &addr_type,
966                            &auto_connect);
967
968                 if (n < 7) {
969                         err = -EINVAL;
970                         goto done;
971                 }
972
973                 hci_dev_lock(hdev);
974                 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
975                                           hdev->le_conn_min_interval,
976                                           hdev->le_conn_max_interval);
977                 hci_dev_unlock(hdev);
978
979                 if (err)
980                         goto done;
981         } else if (memcmp(buf, "del", 3) == 0) {
982                 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
983                            &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
984                            &addr.b[1], &addr.b[0], &addr_type);
985
986                 if (n < 7) {
987                         err = -EINVAL;
988                         goto done;
989                 }
990
991                 hci_dev_lock(hdev);
992                 hci_conn_params_del(hdev, &addr, addr_type);
993                 hci_dev_unlock(hdev);
994         } else if (memcmp(buf, "clr", 3) == 0) {
995                 hci_dev_lock(hdev);
996                 hci_conn_params_clear(hdev);
997                 hci_pend_le_conns_clear(hdev);
998                 hci_update_background_scan(hdev);
999                 hci_dev_unlock(hdev);
1000         } else {
1001                 err = -EINVAL;
1002         }
1003
1004 done:
1005         kfree(buf);
1006
1007         if (err)
1008                 return err;
1009         else
1010                 return count;
1011 }
1012
1013 static const struct file_operations le_auto_conn_fops = {
1014         .open           = le_auto_conn_open,
1015         .read           = seq_read,
1016         .write          = le_auto_conn_write,
1017         .llseek         = seq_lseek,
1018         .release        = single_release,
1019 };
1020
1021 /* ---- HCI requests ---- */
1022
1023 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1024 {
1025         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1026
1027         if (hdev->req_status == HCI_REQ_PEND) {
1028                 hdev->req_result = result;
1029                 hdev->req_status = HCI_REQ_DONE;
1030                 wake_up_interruptible(&hdev->req_wait_q);
1031         }
1032 }
1033
1034 static void hci_req_cancel(struct hci_dev *hdev, int err)
1035 {
1036         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1037
1038         if (hdev->req_status == HCI_REQ_PEND) {
1039                 hdev->req_result = err;
1040                 hdev->req_status = HCI_REQ_CANCELED;
1041                 wake_up_interruptible(&hdev->req_wait_q);
1042         }
1043 }
1044
1045 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1046                                             u8 event)
1047 {
1048         struct hci_ev_cmd_complete *ev;
1049         struct hci_event_hdr *hdr;
1050         struct sk_buff *skb;
1051
1052         hci_dev_lock(hdev);
1053
1054         skb = hdev->recv_evt;
1055         hdev->recv_evt = NULL;
1056
1057         hci_dev_unlock(hdev);
1058
1059         if (!skb)
1060                 return ERR_PTR(-ENODATA);
1061
1062         if (skb->len < sizeof(*hdr)) {
1063                 BT_ERR("Too short HCI event");
1064                 goto failed;
1065         }
1066
1067         hdr = (void *) skb->data;
1068         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1069
1070         if (event) {
1071                 if (hdr->evt != event)
1072                         goto failed;
1073                 return skb;
1074         }
1075
1076         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1077                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1078                 goto failed;
1079         }
1080
1081         if (skb->len < sizeof(*ev)) {
1082                 BT_ERR("Too short cmd_complete event");
1083                 goto failed;
1084         }
1085
1086         ev = (void *) skb->data;
1087         skb_pull(skb, sizeof(*ev));
1088
1089         if (opcode == __le16_to_cpu(ev->opcode))
1090                 return skb;
1091
1092         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1093                __le16_to_cpu(ev->opcode));
1094
1095 failed:
1096         kfree_skb(skb);
1097         return ERR_PTR(-ENODATA);
1098 }
1099
1100 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1101                                   const void *param, u8 event, u32 timeout)
1102 {
1103         DECLARE_WAITQUEUE(wait, current);
1104         struct hci_request req;
1105         int err = 0;
1106
1107         BT_DBG("%s", hdev->name);
1108
1109         hci_req_init(&req, hdev);
1110
1111         hci_req_add_ev(&req, opcode, plen, param, event);
1112
1113         hdev->req_status = HCI_REQ_PEND;
1114
1115         err = hci_req_run(&req, hci_req_sync_complete);
1116         if (err < 0)
1117                 return ERR_PTR(err);
1118
1119         add_wait_queue(&hdev->req_wait_q, &wait);
1120         set_current_state(TASK_INTERRUPTIBLE);
1121
1122         schedule_timeout(timeout);
1123
1124         remove_wait_queue(&hdev->req_wait_q, &wait);
1125
1126         if (signal_pending(current))
1127                 return ERR_PTR(-EINTR);
1128
1129         switch (hdev->req_status) {
1130         case HCI_REQ_DONE:
1131                 err = -bt_to_errno(hdev->req_result);
1132                 break;
1133
1134         case HCI_REQ_CANCELED:
1135                 err = -hdev->req_result;
1136                 break;
1137
1138         default:
1139                 err = -ETIMEDOUT;
1140                 break;
1141         }
1142
1143         hdev->req_status = hdev->req_result = 0;
1144
1145         BT_DBG("%s end: err %d", hdev->name, err);
1146
1147         if (err < 0)
1148                 return ERR_PTR(err);
1149
1150         return hci_get_cmd_complete(hdev, opcode, event);
1151 }
1152 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1153
1154 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1155                                const void *param, u32 timeout)
1156 {
1157         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1158 }
1159 EXPORT_SYMBOL(__hci_cmd_sync);
1160
1161 /* Execute request and wait for completion. */
1162 static int __hci_req_sync(struct hci_dev *hdev,
1163                           void (*func)(struct hci_request *req,
1164                                       unsigned long opt),
1165                           unsigned long opt, __u32 timeout)
1166 {
1167         struct hci_request req;
1168         DECLARE_WAITQUEUE(wait, current);
1169         int err = 0;
1170
1171         BT_DBG("%s start", hdev->name);
1172
1173         hci_req_init(&req, hdev);
1174
1175         hdev->req_status = HCI_REQ_PEND;
1176
1177         func(&req, opt);
1178
1179         err = hci_req_run(&req, hci_req_sync_complete);
1180         if (err < 0) {
1181                 hdev->req_status = 0;
1182
1183                 /* ENODATA means the HCI request command queue is empty.
1184                  * This can happen when a request with conditionals doesn't
1185                  * trigger any commands to be sent. This is normal behavior
1186                  * and should not trigger an error return.
1187                  */
1188                 if (err == -ENODATA)
1189                         return 0;
1190
1191                 return err;
1192         }
1193
1194         add_wait_queue(&hdev->req_wait_q, &wait);
1195         set_current_state(TASK_INTERRUPTIBLE);
1196
1197         schedule_timeout(timeout);
1198
1199         remove_wait_queue(&hdev->req_wait_q, &wait);
1200
1201         if (signal_pending(current))
1202                 return -EINTR;
1203
1204         switch (hdev->req_status) {
1205         case HCI_REQ_DONE:
1206                 err = -bt_to_errno(hdev->req_result);
1207                 break;
1208
1209         case HCI_REQ_CANCELED:
1210                 err = -hdev->req_result;
1211                 break;
1212
1213         default:
1214                 err = -ETIMEDOUT;
1215                 break;
1216         }
1217
1218         hdev->req_status = hdev->req_result = 0;
1219
1220         BT_DBG("%s end: err %d", hdev->name, err);
1221
1222         return err;
1223 }
1224
1225 static int hci_req_sync(struct hci_dev *hdev,
1226                         void (*req)(struct hci_request *req,
1227                                     unsigned long opt),
1228                         unsigned long opt, __u32 timeout)
1229 {
1230         int ret;
1231
1232         if (!test_bit(HCI_UP, &hdev->flags))
1233                 return -ENETDOWN;
1234
1235         /* Serialize all requests */
1236         hci_req_lock(hdev);
1237         ret = __hci_req_sync(hdev, req, opt, timeout);
1238         hci_req_unlock(hdev);
1239
1240         return ret;
1241 }
1242
1243 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1244 {
1245         BT_DBG("%s %ld", req->hdev->name, opt);
1246
1247         /* Reset device */
1248         set_bit(HCI_RESET, &req->hdev->flags);
1249         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1250 }
1251
1252 static void bredr_init(struct hci_request *req)
1253 {
1254         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1255
1256         /* Read Local Supported Features */
1257         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1258
1259         /* Read Local Version */
1260         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1261
1262         /* Read BD Address */
1263         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1264 }
1265
1266 static void amp_init(struct hci_request *req)
1267 {
1268         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1269
1270         /* Read Local Version */
1271         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1272
1273         /* Read Local Supported Commands */
1274         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1275
1276         /* Read Local Supported Features */
1277         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1278
1279         /* Read Local AMP Info */
1280         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1281
1282         /* Read Data Blk size */
1283         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1284
1285         /* Read Flow Control Mode */
1286         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1287
1288         /* Read Location Data */
1289         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1290 }
1291
1292 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1293 {
1294         struct hci_dev *hdev = req->hdev;
1295
1296         BT_DBG("%s %ld", hdev->name, opt);
1297
1298         /* Reset */
1299         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1300                 hci_reset_req(req, 0);
1301
1302         switch (hdev->dev_type) {
1303         case HCI_BREDR:
1304                 bredr_init(req);
1305                 break;
1306
1307         case HCI_AMP:
1308                 amp_init(req);
1309                 break;
1310
1311         default:
1312                 BT_ERR("Unknown device type %d", hdev->dev_type);
1313                 break;
1314         }
1315 }
1316
1317 static void bredr_setup(struct hci_request *req)
1318 {
1319         struct hci_dev *hdev = req->hdev;
1320
1321         __le16 param;
1322         __u8 flt_type;
1323
1324         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1325         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1326
1327         /* Read Class of Device */
1328         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1329
1330         /* Read Local Name */
1331         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1332
1333         /* Read Voice Setting */
1334         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1335
1336         /* Read Number of Supported IAC */
1337         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1338
1339         /* Read Current IAC LAP */
1340         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1341
1342         /* Clear Event Filters */
1343         flt_type = HCI_FLT_CLEAR_ALL;
1344         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1345
1346         /* Connection accept timeout ~20 secs */
1347         param = cpu_to_le16(0x7d00);
1348         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1349
1350         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1351          * but it does not support page scan related HCI commands.
1352          */
1353         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1354                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1355                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1356         }
1357 }
1358
1359 static void le_setup(struct hci_request *req)
1360 {
1361         struct hci_dev *hdev = req->hdev;
1362
1363         /* Read LE Buffer Size */
1364         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1365
1366         /* Read LE Local Supported Features */
1367         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1368
1369         /* Read LE Supported States */
1370         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1371
1372         /* Read LE Advertising Channel TX Power */
1373         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1374
1375         /* Read LE White List Size */
1376         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1377
1378         /* Clear LE White List */
1379         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1380
1381         /* LE-only controllers have LE implicitly enabled */
1382         if (!lmp_bredr_capable(hdev))
1383                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1384 }
1385
1386 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1387 {
1388         if (lmp_ext_inq_capable(hdev))
1389                 return 0x02;
1390
1391         if (lmp_inq_rssi_capable(hdev))
1392                 return 0x01;
1393
1394         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1395             hdev->lmp_subver == 0x0757)
1396                 return 0x01;
1397
1398         if (hdev->manufacturer == 15) {
1399                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1400                         return 0x01;
1401                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1402                         return 0x01;
1403                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1404                         return 0x01;
1405         }
1406
1407         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1408             hdev->lmp_subver == 0x1805)
1409                 return 0x01;
1410
1411         return 0x00;
1412 }
1413
1414 static void hci_setup_inquiry_mode(struct hci_request *req)
1415 {
1416         u8 mode;
1417
1418         mode = hci_get_inquiry_mode(req->hdev);
1419
1420         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1421 }
1422
1423 static void hci_setup_event_mask(struct hci_request *req)
1424 {
1425         struct hci_dev *hdev = req->hdev;
1426
1427         /* The second byte is 0xff instead of 0x9f (two reserved bits
1428          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1429          * command otherwise.
1430          */
1431         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1432
1433         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1434          * any event mask for pre 1.2 devices.
1435          */
1436         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1437                 return;
1438
1439         if (lmp_bredr_capable(hdev)) {
1440                 events[4] |= 0x01; /* Flow Specification Complete */
1441                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1442                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1443                 events[5] |= 0x08; /* Synchronous Connection Complete */
1444                 events[5] |= 0x10; /* Synchronous Connection Changed */
1445         } else {
1446                 /* Use a different default for LE-only devices */
1447                 memset(events, 0, sizeof(events));
1448                 events[0] |= 0x10; /* Disconnection Complete */
1449                 events[0] |= 0x80; /* Encryption Change */
1450                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1451                 events[1] |= 0x20; /* Command Complete */
1452                 events[1] |= 0x40; /* Command Status */
1453                 events[1] |= 0x80; /* Hardware Error */
1454                 events[2] |= 0x04; /* Number of Completed Packets */
1455                 events[3] |= 0x02; /* Data Buffer Overflow */
1456                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1457         }
1458
1459         if (lmp_inq_rssi_capable(hdev))
1460                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1461
1462         if (lmp_sniffsubr_capable(hdev))
1463                 events[5] |= 0x20; /* Sniff Subrating */
1464
1465         if (lmp_pause_enc_capable(hdev))
1466                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1467
1468         if (lmp_ext_inq_capable(hdev))
1469                 events[5] |= 0x40; /* Extended Inquiry Result */
1470
1471         if (lmp_no_flush_capable(hdev))
1472                 events[7] |= 0x01; /* Enhanced Flush Complete */
1473
1474         if (lmp_lsto_capable(hdev))
1475                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1476
1477         if (lmp_ssp_capable(hdev)) {
1478                 events[6] |= 0x01;      /* IO Capability Request */
1479                 events[6] |= 0x02;      /* IO Capability Response */
1480                 events[6] |= 0x04;      /* User Confirmation Request */
1481                 events[6] |= 0x08;      /* User Passkey Request */
1482                 events[6] |= 0x10;      /* Remote OOB Data Request */
1483                 events[6] |= 0x20;      /* Simple Pairing Complete */
1484                 events[7] |= 0x04;      /* User Passkey Notification */
1485                 events[7] |= 0x08;      /* Keypress Notification */
1486                 events[7] |= 0x10;      /* Remote Host Supported
1487                                          * Features Notification
1488                                          */
1489         }
1490
1491         if (lmp_le_capable(hdev))
1492                 events[7] |= 0x20;      /* LE Meta-Event */
1493
1494         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1495
1496         if (lmp_le_capable(hdev)) {
1497                 memset(events, 0, sizeof(events));
1498                 events[0] = 0x1f;
1499                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1500                             sizeof(events), events);
1501         }
1502 }
1503
1504 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1505 {
1506         struct hci_dev *hdev = req->hdev;
1507
1508         if (lmp_bredr_capable(hdev))
1509                 bredr_setup(req);
1510         else
1511                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1512
1513         if (lmp_le_capable(hdev))
1514                 le_setup(req);
1515
1516         hci_setup_event_mask(req);
1517
1518         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1519          * local supported commands HCI command.
1520          */
1521         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1522                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1523
1524         if (lmp_ssp_capable(hdev)) {
1525                 /* When SSP is available, then the host features page
1526                  * should also be available as well. However some
1527                  * controllers list the max_page as 0 as long as SSP
1528                  * has not been enabled. To achieve proper debugging
1529                  * output, force the minimum max_page to 1 at least.
1530                  */
1531                 hdev->max_page = 0x01;
1532
1533                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1534                         u8 mode = 0x01;
1535                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1536                                     sizeof(mode), &mode);
1537                 } else {
1538                         struct hci_cp_write_eir cp;
1539
1540                         memset(hdev->eir, 0, sizeof(hdev->eir));
1541                         memset(&cp, 0, sizeof(cp));
1542
1543                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1544                 }
1545         }
1546
1547         if (lmp_inq_rssi_capable(hdev))
1548                 hci_setup_inquiry_mode(req);
1549
1550         if (lmp_inq_tx_pwr_capable(hdev))
1551                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1552
1553         if (lmp_ext_feat_capable(hdev)) {
1554                 struct hci_cp_read_local_ext_features cp;
1555
1556                 cp.page = 0x01;
1557                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1558                             sizeof(cp), &cp);
1559         }
1560
1561         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1562                 u8 enable = 1;
1563                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1564                             &enable);
1565         }
1566 }
1567
1568 static void hci_setup_link_policy(struct hci_request *req)
1569 {
1570         struct hci_dev *hdev = req->hdev;
1571         struct hci_cp_write_def_link_policy cp;
1572         u16 link_policy = 0;
1573
1574         if (lmp_rswitch_capable(hdev))
1575                 link_policy |= HCI_LP_RSWITCH;
1576         if (lmp_hold_capable(hdev))
1577                 link_policy |= HCI_LP_HOLD;
1578         if (lmp_sniff_capable(hdev))
1579                 link_policy |= HCI_LP_SNIFF;
1580         if (lmp_park_capable(hdev))
1581                 link_policy |= HCI_LP_PARK;
1582
1583         cp.policy = cpu_to_le16(link_policy);
1584         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1585 }
1586
1587 static void hci_set_le_support(struct hci_request *req)
1588 {
1589         struct hci_dev *hdev = req->hdev;
1590         struct hci_cp_write_le_host_supported cp;
1591
1592         /* LE-only devices do not support explicit enablement */
1593         if (!lmp_bredr_capable(hdev))
1594                 return;
1595
1596         memset(&cp, 0, sizeof(cp));
1597
1598         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1599                 cp.le = 0x01;
1600                 cp.simul = lmp_le_br_capable(hdev);
1601         }
1602
1603         if (cp.le != lmp_host_le_capable(hdev))
1604                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1605                             &cp);
1606 }
1607
1608 static void hci_set_event_mask_page_2(struct hci_request *req)
1609 {
1610         struct hci_dev *hdev = req->hdev;
1611         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1612
1613         /* If Connectionless Slave Broadcast master role is supported
1614          * enable all necessary events for it.
1615          */
1616         if (lmp_csb_master_capable(hdev)) {
1617                 events[1] |= 0x40;      /* Triggered Clock Capture */
1618                 events[1] |= 0x80;      /* Synchronization Train Complete */
1619                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1620                 events[2] |= 0x20;      /* CSB Channel Map Change */
1621         }
1622
1623         /* If Connectionless Slave Broadcast slave role is supported
1624          * enable all necessary events for it.
1625          */
1626         if (lmp_csb_slave_capable(hdev)) {
1627                 events[2] |= 0x01;      /* Synchronization Train Received */
1628                 events[2] |= 0x02;      /* CSB Receive */
1629                 events[2] |= 0x04;      /* CSB Timeout */
1630                 events[2] |= 0x08;      /* Truncated Page Complete */
1631         }
1632
1633         /* Enable Authenticated Payload Timeout Expired event if supported */
1634         if (lmp_ping_capable(hdev))
1635                 events[2] |= 0x80;
1636
1637         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1638 }
1639
1640 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1641 {
1642         struct hci_dev *hdev = req->hdev;
1643         u8 p;
1644
1645         /* Some Broadcom based Bluetooth controllers do not support the
1646          * Delete Stored Link Key command. They are clearly indicating its
1647          * absence in the bit mask of supported commands.
1648          *
1649          * Check the supported commands and only if the the command is marked
1650          * as supported send it. If not supported assume that the controller
1651          * does not have actual support for stored link keys which makes this
1652          * command redundant anyway.
1653          *
1654          * Some controllers indicate that they support handling deleting
1655          * stored link keys, but they don't. The quirk lets a driver
1656          * just disable this command.
1657          */
1658         if (hdev->commands[6] & 0x80 &&
1659             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1660                 struct hci_cp_delete_stored_link_key cp;
1661
1662                 bacpy(&cp.bdaddr, BDADDR_ANY);
1663                 cp.delete_all = 0x01;
1664                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1665                             sizeof(cp), &cp);
1666         }
1667
1668         if (hdev->commands[5] & 0x10)
1669                 hci_setup_link_policy(req);
1670
1671         if (lmp_le_capable(hdev))
1672                 hci_set_le_support(req);
1673
1674         /* Read features beyond page 1 if available */
1675         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1676                 struct hci_cp_read_local_ext_features cp;
1677
1678                 cp.page = p;
1679                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1680                             sizeof(cp), &cp);
1681         }
1682 }
1683
1684 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1685 {
1686         struct hci_dev *hdev = req->hdev;
1687
1688         /* Set event mask page 2 if the HCI command for it is supported */
1689         if (hdev->commands[22] & 0x04)
1690                 hci_set_event_mask_page_2(req);
1691
1692         /* Check for Synchronization Train support */
1693         if (lmp_sync_train_capable(hdev))
1694                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1695
1696         /* Enable Secure Connections if supported and configured */
1697         if ((lmp_sc_capable(hdev) ||
1698              test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
1699             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1700                 u8 support = 0x01;
1701                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1702                             sizeof(support), &support);
1703         }
1704 }
1705
1706 static int __hci_init(struct hci_dev *hdev)
1707 {
1708         int err;
1709
1710         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1711         if (err < 0)
1712                 return err;
1713
1714         /* The Device Under Test (DUT) mode is special and available for
1715          * all controller types. So just create it early on.
1716          */
1717         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1718                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1719                                     &dut_mode_fops);
1720         }
1721
1722         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1723          * BR/EDR/LE type controllers. AMP controllers only need the
1724          * first stage init.
1725          */
1726         if (hdev->dev_type != HCI_BREDR)
1727                 return 0;
1728
1729         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1730         if (err < 0)
1731                 return err;
1732
1733         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1734         if (err < 0)
1735                 return err;
1736
1737         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1738         if (err < 0)
1739                 return err;
1740
1741         /* Only create debugfs entries during the initial setup
1742          * phase and not every time the controller gets powered on.
1743          */
1744         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1745                 return 0;
1746
1747         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1748                             &features_fops);
1749         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1750                            &hdev->manufacturer);
1751         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1752         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1753         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1754                             &blacklist_fops);
1755         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1756
1757         if (lmp_bredr_capable(hdev)) {
1758                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1759                                     hdev, &inquiry_cache_fops);
1760                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1761                                     hdev, &link_keys_fops);
1762                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1763                                     hdev, &dev_class_fops);
1764                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1765                                     hdev, &voice_setting_fops);
1766         }
1767
1768         if (lmp_ssp_capable(hdev)) {
1769                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1770                                     hdev, &auto_accept_delay_fops);
1771                 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1772                                     hdev, &ssp_debug_mode_fops);
1773                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1774                                     hdev, &force_sc_support_fops);
1775                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1776                                     hdev, &sc_only_mode_fops);
1777         }
1778
1779         if (lmp_sniff_capable(hdev)) {
1780                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1781                                     hdev, &idle_timeout_fops);
1782                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1783                                     hdev, &sniff_min_interval_fops);
1784                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1785                                     hdev, &sniff_max_interval_fops);
1786         }
1787
1788         if (lmp_le_capable(hdev)) {
1789                 debugfs_create_file("identity", 0400, hdev->debugfs,
1790                                     hdev, &identity_fops);
1791                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1792                                     hdev, &rpa_timeout_fops);
1793                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1794                                     hdev, &random_address_fops);
1795                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1796                                     hdev, &static_address_fops);
1797
1798                 /* For controllers with a public address, provide a debug
1799                  * option to force the usage of the configured static
1800                  * address. By default the public address is used.
1801                  */
1802                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1803                         debugfs_create_file("force_static_address", 0644,
1804                                             hdev->debugfs, hdev,
1805                                             &force_static_address_fops);
1806
1807                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1808                                   &hdev->le_white_list_size);
1809                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1810                                     &white_list_fops);
1811                 debugfs_create_file("identity_resolving_keys", 0400,
1812                                     hdev->debugfs, hdev,
1813                                     &identity_resolving_keys_fops);
1814                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1815                                     hdev, &long_term_keys_fops);
1816                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1817                                     hdev, &conn_min_interval_fops);
1818                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1819                                     hdev, &conn_max_interval_fops);
1820                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1821                                     hdev, &adv_channel_map_fops);
1822                 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1823                                     &lowpan_debugfs_fops);
1824                 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1825                                     &le_auto_conn_fops);
1826                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1827                                    hdev->debugfs,
1828                                    &hdev->discov_interleaved_timeout);
1829         }
1830
1831         return 0;
1832 }
1833
1834 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1835 {
1836         __u8 scan = opt;
1837
1838         BT_DBG("%s %x", req->hdev->name, scan);
1839
1840         /* Inquiry and Page scans */
1841         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1842 }
1843
1844 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1845 {
1846         __u8 auth = opt;
1847
1848         BT_DBG("%s %x", req->hdev->name, auth);
1849
1850         /* Authentication */
1851         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1852 }
1853
1854 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1855 {
1856         __u8 encrypt = opt;
1857
1858         BT_DBG("%s %x", req->hdev->name, encrypt);
1859
1860         /* Encryption */
1861         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1862 }
1863
1864 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1865 {
1866         __le16 policy = cpu_to_le16(opt);
1867
1868         BT_DBG("%s %x", req->hdev->name, policy);
1869
1870         /* Default link policy */
1871         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1872 }
1873
1874 /* Get HCI device by index.
1875  * Device is held on return. */
1876 struct hci_dev *hci_dev_get(int index)
1877 {
1878         struct hci_dev *hdev = NULL, *d;
1879
1880         BT_DBG("%d", index);
1881
1882         if (index < 0)
1883                 return NULL;
1884
1885         read_lock(&hci_dev_list_lock);
1886         list_for_each_entry(d, &hci_dev_list, list) {
1887                 if (d->id == index) {
1888                         hdev = hci_dev_hold(d);
1889                         break;
1890                 }
1891         }
1892         read_unlock(&hci_dev_list_lock);
1893         return hdev;
1894 }
1895
1896 /* ---- Inquiry support ---- */
1897
1898 bool hci_discovery_active(struct hci_dev *hdev)
1899 {
1900         struct discovery_state *discov = &hdev->discovery;
1901
1902         switch (discov->state) {
1903         case DISCOVERY_FINDING:
1904         case DISCOVERY_RESOLVING:
1905                 return true;
1906
1907         default:
1908                 return false;
1909         }
1910 }
1911
1912 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1913 {
1914         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1915
1916         if (hdev->discovery.state == state)
1917                 return;
1918
1919         switch (state) {
1920         case DISCOVERY_STOPPED:
1921                 hci_update_background_scan(hdev);
1922
1923                 if (hdev->discovery.state != DISCOVERY_STARTING)
1924                         mgmt_discovering(hdev, 0);
1925                 break;
1926         case DISCOVERY_STARTING:
1927                 break;
1928         case DISCOVERY_FINDING:
1929                 mgmt_discovering(hdev, 1);
1930                 break;
1931         case DISCOVERY_RESOLVING:
1932                 break;
1933         case DISCOVERY_STOPPING:
1934                 break;
1935         }
1936
1937         hdev->discovery.state = state;
1938 }
1939
1940 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1941 {
1942         struct discovery_state *cache = &hdev->discovery;
1943         struct inquiry_entry *p, *n;
1944
1945         list_for_each_entry_safe(p, n, &cache->all, all) {
1946                 list_del(&p->all);
1947                 kfree(p);
1948         }
1949
1950         INIT_LIST_HEAD(&cache->unknown);
1951         INIT_LIST_HEAD(&cache->resolve);
1952 }
1953
1954 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1955                                                bdaddr_t *bdaddr)
1956 {
1957         struct discovery_state *cache = &hdev->discovery;
1958         struct inquiry_entry *e;
1959
1960         BT_DBG("cache %p, %pMR", cache, bdaddr);
1961
1962         list_for_each_entry(e, &cache->all, all) {
1963                 if (!bacmp(&e->data.bdaddr, bdaddr))
1964                         return e;
1965         }
1966
1967         return NULL;
1968 }
1969
1970 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1971                                                        bdaddr_t *bdaddr)
1972 {
1973         struct discovery_state *cache = &hdev->discovery;
1974         struct inquiry_entry *e;
1975
1976         BT_DBG("cache %p, %pMR", cache, bdaddr);
1977
1978         list_for_each_entry(e, &cache->unknown, list) {
1979                 if (!bacmp(&e->data.bdaddr, bdaddr))
1980                         return e;
1981         }
1982
1983         return NULL;
1984 }
1985
1986 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1987                                                        bdaddr_t *bdaddr,
1988                                                        int state)
1989 {
1990         struct discovery_state *cache = &hdev->discovery;
1991         struct inquiry_entry *e;
1992
1993         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1994
1995         list_for_each_entry(e, &cache->resolve, list) {
1996                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1997                         return e;
1998                 if (!bacmp(&e->data.bdaddr, bdaddr))
1999                         return e;
2000         }
2001
2002         return NULL;
2003 }
2004
2005 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2006                                       struct inquiry_entry *ie)
2007 {
2008         struct discovery_state *cache = &hdev->discovery;
2009         struct list_head *pos = &cache->resolve;
2010         struct inquiry_entry *p;
2011
2012         list_del(&ie->list);
2013
2014         list_for_each_entry(p, &cache->resolve, list) {
2015                 if (p->name_state != NAME_PENDING &&
2016                     abs(p->data.rssi) >= abs(ie->data.rssi))
2017                         break;
2018                 pos = &p->list;
2019         }
2020
2021         list_add(&ie->list, pos);
2022 }
2023
2024 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2025                               bool name_known, bool *ssp)
2026 {
2027         struct discovery_state *cache = &hdev->discovery;
2028         struct inquiry_entry *ie;
2029
2030         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2031
2032         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2033
2034         *ssp = data->ssp_mode;
2035
2036         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2037         if (ie) {
2038                 if (ie->data.ssp_mode)
2039                         *ssp = true;
2040
2041                 if (ie->name_state == NAME_NEEDED &&
2042                     data->rssi != ie->data.rssi) {
2043                         ie->data.rssi = data->rssi;
2044                         hci_inquiry_cache_update_resolve(hdev, ie);
2045                 }
2046
2047                 goto update;
2048         }
2049
2050         /* Entry not in the cache. Add new one. */
2051         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2052         if (!ie)
2053                 return false;
2054
2055         list_add(&ie->all, &cache->all);
2056
2057         if (name_known) {
2058                 ie->name_state = NAME_KNOWN;
2059         } else {
2060                 ie->name_state = NAME_NOT_KNOWN;
2061                 list_add(&ie->list, &cache->unknown);
2062         }
2063
2064 update:
2065         if (name_known && ie->name_state != NAME_KNOWN &&
2066             ie->name_state != NAME_PENDING) {
2067                 ie->name_state = NAME_KNOWN;
2068                 list_del(&ie->list);
2069         }
2070
2071         memcpy(&ie->data, data, sizeof(*data));
2072         ie->timestamp = jiffies;
2073         cache->timestamp = jiffies;
2074
2075         if (ie->name_state == NAME_NOT_KNOWN)
2076                 return false;
2077
2078         return true;
2079 }
2080
2081 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2082 {
2083         struct discovery_state *cache = &hdev->discovery;
2084         struct inquiry_info *info = (struct inquiry_info *) buf;
2085         struct inquiry_entry *e;
2086         int copied = 0;
2087
2088         list_for_each_entry(e, &cache->all, all) {
2089                 struct inquiry_data *data = &e->data;
2090
2091                 if (copied >= num)
2092                         break;
2093
2094                 bacpy(&info->bdaddr, &data->bdaddr);
2095                 info->pscan_rep_mode    = data->pscan_rep_mode;
2096                 info->pscan_period_mode = data->pscan_period_mode;
2097                 info->pscan_mode        = data->pscan_mode;
2098                 memcpy(info->dev_class, data->dev_class, 3);
2099                 info->clock_offset      = data->clock_offset;
2100
2101                 info++;
2102                 copied++;
2103         }
2104
2105         BT_DBG("cache %p, copied %d", cache, copied);
2106         return copied;
2107 }
2108
2109 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2110 {
2111         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2112         struct hci_dev *hdev = req->hdev;
2113         struct hci_cp_inquiry cp;
2114
2115         BT_DBG("%s", hdev->name);
2116
2117         if (test_bit(HCI_INQUIRY, &hdev->flags))
2118                 return;
2119
2120         /* Start Inquiry */
2121         memcpy(&cp.lap, &ir->lap, 3);
2122         cp.length  = ir->length;
2123         cp.num_rsp = ir->num_rsp;
2124         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2125 }
2126
2127 static int wait_inquiry(void *word)
2128 {
2129         schedule();
2130         return signal_pending(current);
2131 }
2132
2133 int hci_inquiry(void __user *arg)
2134 {
2135         __u8 __user *ptr = arg;
2136         struct hci_inquiry_req ir;
2137         struct hci_dev *hdev;
2138         int err = 0, do_inquiry = 0, max_rsp;
2139         long timeo;
2140         __u8 *buf;
2141
2142         if (copy_from_user(&ir, ptr, sizeof(ir)))
2143                 return -EFAULT;
2144
2145         hdev = hci_dev_get(ir.dev_id);
2146         if (!hdev)
2147                 return -ENODEV;
2148
2149         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2150                 err = -EBUSY;
2151                 goto done;
2152         }
2153
2154         if (hdev->dev_type != HCI_BREDR) {
2155                 err = -EOPNOTSUPP;
2156                 goto done;
2157         }
2158
2159         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2160                 err = -EOPNOTSUPP;
2161                 goto done;
2162         }
2163
2164         hci_dev_lock(hdev);
2165         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2166             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2167                 hci_inquiry_cache_flush(hdev);
2168                 do_inquiry = 1;
2169         }
2170         hci_dev_unlock(hdev);
2171
2172         timeo = ir.length * msecs_to_jiffies(2000);
2173
2174         if (do_inquiry) {
2175                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2176                                    timeo);
2177                 if (err < 0)
2178                         goto done;
2179
2180                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2181                  * cleared). If it is interrupted by a signal, return -EINTR.
2182                  */
2183                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2184                                 TASK_INTERRUPTIBLE))
2185                         return -EINTR;
2186         }
2187
2188         /* for unlimited number of responses we will use buffer with
2189          * 255 entries
2190          */
2191         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2192
2193         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2194          * copy it to the user space.
2195          */
2196         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2197         if (!buf) {
2198                 err = -ENOMEM;
2199                 goto done;
2200         }
2201
2202         hci_dev_lock(hdev);
2203         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2204         hci_dev_unlock(hdev);
2205
2206         BT_DBG("num_rsp %d", ir.num_rsp);
2207
2208         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2209                 ptr += sizeof(ir);
2210                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2211                                  ir.num_rsp))
2212                         err = -EFAULT;
2213         } else
2214                 err = -EFAULT;
2215
2216         kfree(buf);
2217
2218 done:
2219         hci_dev_put(hdev);
2220         return err;
2221 }
2222
2223 static int hci_dev_do_open(struct hci_dev *hdev)
2224 {
2225         int ret = 0;
2226
2227         BT_DBG("%s %p", hdev->name, hdev);
2228
2229         hci_req_lock(hdev);
2230
2231         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2232                 ret = -ENODEV;
2233                 goto done;
2234         }
2235
2236         if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2237                 /* Check for rfkill but allow the HCI setup stage to
2238                  * proceed (which in itself doesn't cause any RF activity).
2239                  */
2240                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2241                         ret = -ERFKILL;
2242                         goto done;
2243                 }
2244
2245                 /* Check for valid public address or a configured static
2246                  * random adddress, but let the HCI setup proceed to
2247                  * be able to determine if there is a public address
2248                  * or not.
2249                  *
2250                  * In case of user channel usage, it is not important
2251                  * if a public address or static random address is
2252                  * available.
2253                  *
2254                  * This check is only valid for BR/EDR controllers
2255                  * since AMP controllers do not have an address.
2256                  */
2257                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2258                     hdev->dev_type == HCI_BREDR &&
2259                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2260                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2261                         ret = -EADDRNOTAVAIL;
2262                         goto done;
2263                 }
2264         }
2265
2266         if (test_bit(HCI_UP, &hdev->flags)) {
2267                 ret = -EALREADY;
2268                 goto done;
2269         }
2270
2271         if (hdev->open(hdev)) {
2272                 ret = -EIO;
2273                 goto done;
2274         }
2275
2276         atomic_set(&hdev->cmd_cnt, 1);
2277         set_bit(HCI_INIT, &hdev->flags);
2278
2279         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2280                 ret = hdev->setup(hdev);
2281
2282         if (!ret) {
2283                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2284                         set_bit(HCI_RAW, &hdev->flags);
2285
2286                 if (!test_bit(HCI_RAW, &hdev->flags) &&
2287                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2288                         ret = __hci_init(hdev);
2289         }
2290
2291         clear_bit(HCI_INIT, &hdev->flags);
2292
2293         if (!ret) {
2294                 hci_dev_hold(hdev);
2295                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2296                 set_bit(HCI_UP, &hdev->flags);
2297                 hci_notify(hdev, HCI_DEV_UP);
2298                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2299                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2300                     hdev->dev_type == HCI_BREDR) {
2301                         hci_dev_lock(hdev);
2302                         mgmt_powered(hdev, 1);
2303                         hci_dev_unlock(hdev);
2304                 }
2305         } else {
2306                 /* Init failed, cleanup */
2307                 flush_work(&hdev->tx_work);
2308                 flush_work(&hdev->cmd_work);
2309                 flush_work(&hdev->rx_work);
2310
2311                 skb_queue_purge(&hdev->cmd_q);
2312                 skb_queue_purge(&hdev->rx_q);
2313
2314                 if (hdev->flush)
2315                         hdev->flush(hdev);
2316
2317                 if (hdev->sent_cmd) {
2318                         kfree_skb(hdev->sent_cmd);
2319                         hdev->sent_cmd = NULL;
2320                 }
2321
2322                 hdev->close(hdev);
2323                 hdev->flags = 0;
2324         }
2325
2326 done:
2327         hci_req_unlock(hdev);
2328         return ret;
2329 }
2330
2331 /* ---- HCI ioctl helpers ---- */
2332
2333 int hci_dev_open(__u16 dev)
2334 {
2335         struct hci_dev *hdev;
2336         int err;
2337
2338         hdev = hci_dev_get(dev);
2339         if (!hdev)
2340                 return -ENODEV;
2341
2342         /* We need to ensure that no other power on/off work is pending
2343          * before proceeding to call hci_dev_do_open. This is
2344          * particularly important if the setup procedure has not yet
2345          * completed.
2346          */
2347         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2348                 cancel_delayed_work(&hdev->power_off);
2349
2350         /* After this call it is guaranteed that the setup procedure
2351          * has finished. This means that error conditions like RFKILL
2352          * or no valid public or static random address apply.
2353          */
2354         flush_workqueue(hdev->req_workqueue);
2355
2356         err = hci_dev_do_open(hdev);
2357
2358         hci_dev_put(hdev);
2359
2360         return err;
2361 }
2362
2363 static int hci_dev_do_close(struct hci_dev *hdev)
2364 {
2365         BT_DBG("%s %p", hdev->name, hdev);
2366
2367         cancel_delayed_work(&hdev->power_off);
2368
2369         hci_req_cancel(hdev, ENODEV);
2370         hci_req_lock(hdev);
2371
2372         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2373                 del_timer_sync(&hdev->cmd_timer);
2374                 hci_req_unlock(hdev);
2375                 return 0;
2376         }
2377
2378         /* Flush RX and TX works */
2379         flush_work(&hdev->tx_work);
2380         flush_work(&hdev->rx_work);
2381
2382         if (hdev->discov_timeout > 0) {
2383                 cancel_delayed_work(&hdev->discov_off);
2384                 hdev->discov_timeout = 0;
2385                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2386                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2387         }
2388
2389         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2390                 cancel_delayed_work(&hdev->service_cache);
2391
2392         cancel_delayed_work_sync(&hdev->le_scan_disable);
2393
2394         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2395                 cancel_delayed_work_sync(&hdev->rpa_expired);
2396
2397         hci_dev_lock(hdev);
2398         hci_inquiry_cache_flush(hdev);
2399         hci_conn_hash_flush(hdev);
2400         hci_pend_le_conns_clear(hdev);
2401         hci_dev_unlock(hdev);
2402
2403         hci_notify(hdev, HCI_DEV_DOWN);
2404
2405         if (hdev->flush)
2406                 hdev->flush(hdev);
2407
2408         /* Reset device */
2409         skb_queue_purge(&hdev->cmd_q);
2410         atomic_set(&hdev->cmd_cnt, 1);
2411         if (!test_bit(HCI_RAW, &hdev->flags) &&
2412             !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2413             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2414                 set_bit(HCI_INIT, &hdev->flags);
2415                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2416                 clear_bit(HCI_INIT, &hdev->flags);
2417         }
2418
2419         /* flush cmd  work */
2420         flush_work(&hdev->cmd_work);
2421
2422         /* Drop queues */
2423         skb_queue_purge(&hdev->rx_q);
2424         skb_queue_purge(&hdev->cmd_q);
2425         skb_queue_purge(&hdev->raw_q);
2426
2427         /* Drop last sent command */
2428         if (hdev->sent_cmd) {
2429                 del_timer_sync(&hdev->cmd_timer);
2430                 kfree_skb(hdev->sent_cmd);
2431                 hdev->sent_cmd = NULL;
2432         }
2433
2434         kfree_skb(hdev->recv_evt);
2435         hdev->recv_evt = NULL;
2436
2437         /* After this point our queues are empty
2438          * and no tasks are scheduled. */
2439         hdev->close(hdev);
2440
2441         /* Clear flags */
2442         hdev->flags = 0;
2443         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2444
2445         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2446                 if (hdev->dev_type == HCI_BREDR) {
2447                         hci_dev_lock(hdev);
2448                         mgmt_powered(hdev, 0);
2449                         hci_dev_unlock(hdev);
2450                 }
2451         }
2452
2453         /* Controller radio is available but is currently powered down */
2454         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2455
2456         memset(hdev->eir, 0, sizeof(hdev->eir));
2457         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2458         bacpy(&hdev->random_addr, BDADDR_ANY);
2459
2460         hci_req_unlock(hdev);
2461
2462         hci_dev_put(hdev);
2463         return 0;
2464 }
2465
2466 int hci_dev_close(__u16 dev)
2467 {
2468         struct hci_dev *hdev;
2469         int err;
2470
2471         hdev = hci_dev_get(dev);
2472         if (!hdev)
2473                 return -ENODEV;
2474
2475         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2476                 err = -EBUSY;
2477                 goto done;
2478         }
2479
2480         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2481                 cancel_delayed_work(&hdev->power_off);
2482
2483         err = hci_dev_do_close(hdev);
2484
2485 done:
2486         hci_dev_put(hdev);
2487         return err;
2488 }
2489
2490 int hci_dev_reset(__u16 dev)
2491 {
2492         struct hci_dev *hdev;
2493         int ret = 0;
2494
2495         hdev = hci_dev_get(dev);
2496         if (!hdev)
2497                 return -ENODEV;
2498
2499         hci_req_lock(hdev);
2500
2501         if (!test_bit(HCI_UP, &hdev->flags)) {
2502                 ret = -ENETDOWN;
2503                 goto done;
2504         }
2505
2506         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2507                 ret = -EBUSY;
2508                 goto done;
2509         }
2510
2511         /* Drop queues */
2512         skb_queue_purge(&hdev->rx_q);
2513         skb_queue_purge(&hdev->cmd_q);
2514
2515         hci_dev_lock(hdev);
2516         hci_inquiry_cache_flush(hdev);
2517         hci_conn_hash_flush(hdev);
2518         hci_dev_unlock(hdev);
2519
2520         if (hdev->flush)
2521                 hdev->flush(hdev);
2522
2523         atomic_set(&hdev->cmd_cnt, 1);
2524         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2525
2526         if (!test_bit(HCI_RAW, &hdev->flags))
2527                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2528
2529 done:
2530         hci_req_unlock(hdev);
2531         hci_dev_put(hdev);
2532         return ret;
2533 }
2534
2535 int hci_dev_reset_stat(__u16 dev)
2536 {
2537         struct hci_dev *hdev;
2538         int ret = 0;
2539
2540         hdev = hci_dev_get(dev);
2541         if (!hdev)
2542                 return -ENODEV;
2543
2544         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2545                 ret = -EBUSY;
2546                 goto done;
2547         }
2548
2549         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2550
2551 done:
2552         hci_dev_put(hdev);
2553         return ret;
2554 }
2555
2556 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2557 {
2558         struct hci_dev *hdev;
2559         struct hci_dev_req dr;
2560         int err = 0;
2561
2562         if (copy_from_user(&dr, arg, sizeof(dr)))
2563                 return -EFAULT;
2564
2565         hdev = hci_dev_get(dr.dev_id);
2566         if (!hdev)
2567                 return -ENODEV;
2568
2569         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2570                 err = -EBUSY;
2571                 goto done;
2572         }
2573
2574         if (hdev->dev_type != HCI_BREDR) {
2575                 err = -EOPNOTSUPP;
2576                 goto done;
2577         }
2578
2579         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2580                 err = -EOPNOTSUPP;
2581                 goto done;
2582         }
2583
2584         switch (cmd) {
2585         case HCISETAUTH:
2586                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2587                                    HCI_INIT_TIMEOUT);
2588                 break;
2589
2590         case HCISETENCRYPT:
2591                 if (!lmp_encrypt_capable(hdev)) {
2592                         err = -EOPNOTSUPP;
2593                         break;
2594                 }
2595
2596                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2597                         /* Auth must be enabled first */
2598                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2599                                            HCI_INIT_TIMEOUT);
2600                         if (err)
2601                                 break;
2602                 }
2603
2604                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2605                                    HCI_INIT_TIMEOUT);
2606                 break;
2607
2608         case HCISETSCAN:
2609                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2610                                    HCI_INIT_TIMEOUT);
2611                 break;
2612
2613         case HCISETLINKPOL:
2614                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2615                                    HCI_INIT_TIMEOUT);
2616                 break;
2617
2618         case HCISETLINKMODE:
2619                 hdev->link_mode = ((__u16) dr.dev_opt) &
2620                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2621                 break;
2622
2623         case HCISETPTYPE:
2624                 hdev->pkt_type = (__u16) dr.dev_opt;
2625                 break;
2626
2627         case HCISETACLMTU:
2628                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2629                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2630                 break;
2631
2632         case HCISETSCOMTU:
2633                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2634                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2635                 break;
2636
2637         default:
2638                 err = -EINVAL;
2639                 break;
2640         }
2641
2642 done:
2643         hci_dev_put(hdev);
2644         return err;
2645 }
2646
2647 int hci_get_dev_list(void __user *arg)
2648 {
2649         struct hci_dev *hdev;
2650         struct hci_dev_list_req *dl;
2651         struct hci_dev_req *dr;
2652         int n = 0, size, err;
2653         __u16 dev_num;
2654
2655         if (get_user(dev_num, (__u16 __user *) arg))
2656                 return -EFAULT;
2657
2658         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2659                 return -EINVAL;
2660
2661         size = sizeof(*dl) + dev_num * sizeof(*dr);
2662
2663         dl = kzalloc(size, GFP_KERNEL);
2664         if (!dl)
2665                 return -ENOMEM;
2666
2667         dr = dl->dev_req;
2668
2669         read_lock(&hci_dev_list_lock);
2670         list_for_each_entry(hdev, &hci_dev_list, list) {
2671                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2672                         cancel_delayed_work(&hdev->power_off);
2673
2674                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2675                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2676
2677                 (dr + n)->dev_id  = hdev->id;
2678                 (dr + n)->dev_opt = hdev->flags;
2679
2680                 if (++n >= dev_num)
2681                         break;
2682         }
2683         read_unlock(&hci_dev_list_lock);
2684
2685         dl->dev_num = n;
2686         size = sizeof(*dl) + n * sizeof(*dr);
2687
2688         err = copy_to_user(arg, dl, size);
2689         kfree(dl);
2690
2691         return err ? -EFAULT : 0;
2692 }
2693
2694 int hci_get_dev_info(void __user *arg)
2695 {
2696         struct hci_dev *hdev;
2697         struct hci_dev_info di;
2698         int err = 0;
2699
2700         if (copy_from_user(&di, arg, sizeof(di)))
2701                 return -EFAULT;
2702
2703         hdev = hci_dev_get(di.dev_id);
2704         if (!hdev)
2705                 return -ENODEV;
2706
2707         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2708                 cancel_delayed_work_sync(&hdev->power_off);
2709
2710         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2711                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2712
2713         strcpy(di.name, hdev->name);
2714         di.bdaddr   = hdev->bdaddr;
2715         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2716         di.flags    = hdev->flags;
2717         di.pkt_type = hdev->pkt_type;
2718         if (lmp_bredr_capable(hdev)) {
2719                 di.acl_mtu  = hdev->acl_mtu;
2720                 di.acl_pkts = hdev->acl_pkts;
2721                 di.sco_mtu  = hdev->sco_mtu;
2722                 di.sco_pkts = hdev->sco_pkts;
2723         } else {
2724                 di.acl_mtu  = hdev->le_mtu;
2725                 di.acl_pkts = hdev->le_pkts;
2726                 di.sco_mtu  = 0;
2727                 di.sco_pkts = 0;
2728         }
2729         di.link_policy = hdev->link_policy;
2730         di.link_mode   = hdev->link_mode;
2731
2732         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2733         memcpy(&di.features, &hdev->features, sizeof(di.features));
2734
2735         if (copy_to_user(arg, &di, sizeof(di)))
2736                 err = -EFAULT;
2737
2738         hci_dev_put(hdev);
2739
2740         return err;
2741 }
2742
2743 /* ---- Interface to HCI drivers ---- */
2744
2745 static int hci_rfkill_set_block(void *data, bool blocked)
2746 {
2747         struct hci_dev *hdev = data;
2748
2749         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2750
2751         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2752                 return -EBUSY;
2753
2754         if (blocked) {
2755                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2756                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2757                         hci_dev_do_close(hdev);
2758         } else {
2759                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2760         }
2761
2762         return 0;
2763 }
2764
2765 static const struct rfkill_ops hci_rfkill_ops = {
2766         .set_block = hci_rfkill_set_block,
2767 };
2768
2769 static void hci_power_on(struct work_struct *work)
2770 {
2771         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2772         int err;
2773
2774         BT_DBG("%s", hdev->name);
2775
2776         err = hci_dev_do_open(hdev);
2777         if (err < 0) {
2778                 mgmt_set_powered_failed(hdev, err);
2779                 return;
2780         }
2781
2782         /* During the HCI setup phase, a few error conditions are
2783          * ignored and they need to be checked now. If they are still
2784          * valid, it is important to turn the device back off.
2785          */
2786         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2787             (hdev->dev_type == HCI_BREDR &&
2788              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2789              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2790                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2791                 hci_dev_do_close(hdev);
2792         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2793                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2794                                    HCI_AUTO_OFF_TIMEOUT);
2795         }
2796
2797         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2798                 mgmt_index_added(hdev);
2799 }
2800
2801 static void hci_power_off(struct work_struct *work)
2802 {
2803         struct hci_dev *hdev = container_of(work, struct hci_dev,
2804                                             power_off.work);
2805
2806         BT_DBG("%s", hdev->name);
2807
2808         hci_dev_do_close(hdev);
2809 }
2810
2811 static void hci_discov_off(struct work_struct *work)
2812 {
2813         struct hci_dev *hdev;
2814
2815         hdev = container_of(work, struct hci_dev, discov_off.work);
2816
2817         BT_DBG("%s", hdev->name);
2818
2819         mgmt_discoverable_timeout(hdev);
2820 }
2821
2822 void hci_uuids_clear(struct hci_dev *hdev)
2823 {
2824         struct bt_uuid *uuid, *tmp;
2825
2826         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2827                 list_del(&uuid->list);
2828                 kfree(uuid);
2829         }
2830 }
2831
2832 void hci_link_keys_clear(struct hci_dev *hdev)
2833 {
2834         struct list_head *p, *n;
2835
2836         list_for_each_safe(p, n, &hdev->link_keys) {
2837                 struct link_key *key;
2838
2839                 key = list_entry(p, struct link_key, list);
2840
2841                 list_del(p);
2842                 kfree(key);
2843         }
2844 }
2845
2846 void hci_smp_ltks_clear(struct hci_dev *hdev)
2847 {
2848         struct smp_ltk *k, *tmp;
2849
2850         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2851                 list_del(&k->list);
2852                 kfree(k);
2853         }
2854 }
2855
2856 void hci_smp_irks_clear(struct hci_dev *hdev)
2857 {
2858         struct smp_irk *k, *tmp;
2859
2860         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2861                 list_del(&k->list);
2862                 kfree(k);
2863         }
2864 }
2865
2866 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2867 {
2868         struct link_key *k;
2869
2870         list_for_each_entry(k, &hdev->link_keys, list)
2871                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2872                         return k;
2873
2874         return NULL;
2875 }
2876
2877 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2878                                u8 key_type, u8 old_key_type)
2879 {
2880         /* Legacy key */
2881         if (key_type < 0x03)
2882                 return true;
2883
2884         /* Debug keys are insecure so don't store them persistently */
2885         if (key_type == HCI_LK_DEBUG_COMBINATION)
2886                 return false;
2887
2888         /* Changed combination key and there's no previous one */
2889         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2890                 return false;
2891
2892         /* Security mode 3 case */
2893         if (!conn)
2894                 return true;
2895
2896         /* Neither local nor remote side had no-bonding as requirement */
2897         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2898                 return true;
2899
2900         /* Local side had dedicated bonding as requirement */
2901         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2902                 return true;
2903
2904         /* Remote side had dedicated bonding as requirement */
2905         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2906                 return true;
2907
2908         /* If none of the above criteria match, then don't store the key
2909          * persistently */
2910         return false;
2911 }
2912
2913 static bool ltk_type_master(u8 type)
2914 {
2915         if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2916                 return true;
2917
2918         return false;
2919 }
2920
2921 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2922                              bool master)
2923 {
2924         struct smp_ltk *k;
2925
2926         list_for_each_entry(k, &hdev->long_term_keys, list) {
2927                 if (k->ediv != ediv || k->rand != rand)
2928                         continue;
2929
2930                 if (ltk_type_master(k->type) != master)
2931                         continue;
2932
2933                 return k;
2934         }
2935
2936         return NULL;
2937 }
2938
2939 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2940                                      u8 addr_type, bool master)
2941 {
2942         struct smp_ltk *k;
2943
2944         list_for_each_entry(k, &hdev->long_term_keys, list)
2945                 if (addr_type == k->bdaddr_type &&
2946                     bacmp(bdaddr, &k->bdaddr) == 0 &&
2947                     ltk_type_master(k->type) == master)
2948                         return k;
2949
2950         return NULL;
2951 }
2952
2953 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2954 {
2955         struct smp_irk *irk;
2956
2957         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2958                 if (!bacmp(&irk->rpa, rpa))
2959                         return irk;
2960         }
2961
2962         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2963                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2964                         bacpy(&irk->rpa, rpa);
2965                         return irk;
2966                 }
2967         }
2968
2969         return NULL;
2970 }
2971
2972 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2973                                      u8 addr_type)
2974 {
2975         struct smp_irk *irk;
2976
2977         /* Identity Address must be public or static random */
2978         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2979                 return NULL;
2980
2981         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2982                 if (addr_type == irk->addr_type &&
2983                     bacmp(bdaddr, &irk->bdaddr) == 0)
2984                         return irk;
2985         }
2986
2987         return NULL;
2988 }
2989
2990 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2991                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2992 {
2993         struct link_key *key, *old_key;
2994         u8 old_key_type;
2995         bool persistent;
2996
2997         old_key = hci_find_link_key(hdev, bdaddr);
2998         if (old_key) {
2999                 old_key_type = old_key->type;
3000                 key = old_key;
3001         } else {
3002                 old_key_type = conn ? conn->key_type : 0xff;
3003                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3004                 if (!key)
3005                         return -ENOMEM;
3006                 list_add(&key->list, &hdev->link_keys);
3007         }
3008
3009         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3010
3011         /* Some buggy controller combinations generate a changed
3012          * combination key for legacy pairing even when there's no
3013          * previous key */
3014         if (type == HCI_LK_CHANGED_COMBINATION &&
3015             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3016                 type = HCI_LK_COMBINATION;
3017                 if (conn)
3018                         conn->key_type = type;
3019         }
3020
3021         bacpy(&key->bdaddr, bdaddr);
3022         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3023         key->pin_len = pin_len;
3024
3025         if (type == HCI_LK_CHANGED_COMBINATION)
3026                 key->type = old_key_type;
3027         else
3028                 key->type = type;
3029
3030         if (!new_key)
3031                 return 0;
3032
3033         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3034
3035         mgmt_new_link_key(hdev, key, persistent);
3036
3037         if (conn)
3038                 conn->flush_key = !persistent;
3039
3040         return 0;
3041 }
3042
3043 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3044                             u8 addr_type, u8 type, u8 authenticated,
3045                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3046 {
3047         struct smp_ltk *key, *old_key;
3048         bool master = ltk_type_master(type);
3049
3050         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3051         if (old_key)
3052                 key = old_key;
3053         else {
3054                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3055                 if (!key)
3056                         return NULL;
3057                 list_add(&key->list, &hdev->long_term_keys);
3058         }
3059
3060         bacpy(&key->bdaddr, bdaddr);
3061         key->bdaddr_type = addr_type;
3062         memcpy(key->val, tk, sizeof(key->val));
3063         key->authenticated = authenticated;
3064         key->ediv = ediv;
3065         key->rand = rand;
3066         key->enc_size = enc_size;
3067         key->type = type;
3068
3069         return key;
3070 }
3071
3072 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3073                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3074 {
3075         struct smp_irk *irk;
3076
3077         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3078         if (!irk) {
3079                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3080                 if (!irk)
3081                         return NULL;
3082
3083                 bacpy(&irk->bdaddr, bdaddr);
3084                 irk->addr_type = addr_type;
3085
3086                 list_add(&irk->list, &hdev->identity_resolving_keys);
3087         }
3088
3089         memcpy(irk->val, val, 16);
3090         bacpy(&irk->rpa, rpa);
3091
3092         return irk;
3093 }
3094
3095 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3096 {
3097         struct link_key *key;
3098
3099         key = hci_find_link_key(hdev, bdaddr);
3100         if (!key)
3101                 return -ENOENT;
3102
3103         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3104
3105         list_del(&key->list);
3106         kfree(key);
3107
3108         return 0;
3109 }
3110
3111 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3112 {
3113         struct smp_ltk *k, *tmp;
3114         int removed = 0;
3115
3116         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3117                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3118                         continue;
3119
3120                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3121
3122                 list_del(&k->list);
3123                 kfree(k);
3124                 removed++;
3125         }
3126
3127         return removed ? 0 : -ENOENT;
3128 }
3129
3130 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3131 {
3132         struct smp_irk *k, *tmp;
3133
3134         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3135                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3136                         continue;
3137
3138                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3139
3140                 list_del(&k->list);
3141                 kfree(k);
3142         }
3143 }
3144
3145 /* HCI command timer function */
3146 static void hci_cmd_timeout(unsigned long arg)
3147 {
3148         struct hci_dev *hdev = (void *) arg;
3149
3150         if (hdev->sent_cmd) {
3151                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3152                 u16 opcode = __le16_to_cpu(sent->opcode);
3153
3154                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3155         } else {
3156                 BT_ERR("%s command tx timeout", hdev->name);
3157         }
3158
3159         atomic_set(&hdev->cmd_cnt, 1);
3160         queue_work(hdev->workqueue, &hdev->cmd_work);
3161 }
3162
3163 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3164                                           bdaddr_t *bdaddr)
3165 {
3166         struct oob_data *data;
3167
3168         list_for_each_entry(data, &hdev->remote_oob_data, list)
3169                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3170                         return data;
3171
3172         return NULL;
3173 }
3174
3175 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3176 {
3177         struct oob_data *data;
3178
3179         data = hci_find_remote_oob_data(hdev, bdaddr);
3180         if (!data)
3181                 return -ENOENT;
3182
3183         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3184
3185         list_del(&data->list);
3186         kfree(data);
3187
3188         return 0;
3189 }
3190
3191 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3192 {
3193         struct oob_data *data, *n;
3194
3195         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3196                 list_del(&data->list);
3197                 kfree(data);
3198         }
3199 }
3200
3201 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3202                             u8 *hash, u8 *randomizer)
3203 {
3204         struct oob_data *data;
3205
3206         data = hci_find_remote_oob_data(hdev, bdaddr);
3207         if (!data) {
3208                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3209                 if (!data)
3210                         return -ENOMEM;
3211
3212                 bacpy(&data->bdaddr, bdaddr);
3213                 list_add(&data->list, &hdev->remote_oob_data);
3214         }
3215
3216         memcpy(data->hash192, hash, sizeof(data->hash192));
3217         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3218
3219         memset(data->hash256, 0, sizeof(data->hash256));
3220         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3221
3222         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3223
3224         return 0;
3225 }
3226
3227 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3228                                 u8 *hash192, u8 *randomizer192,
3229                                 u8 *hash256, u8 *randomizer256)
3230 {
3231         struct oob_data *data;
3232
3233         data = hci_find_remote_oob_data(hdev, bdaddr);
3234         if (!data) {
3235                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3236                 if (!data)
3237                         return -ENOMEM;
3238
3239                 bacpy(&data->bdaddr, bdaddr);
3240                 list_add(&data->list, &hdev->remote_oob_data);
3241         }
3242
3243         memcpy(data->hash192, hash192, sizeof(data->hash192));
3244         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3245
3246         memcpy(data->hash256, hash256, sizeof(data->hash256));
3247         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3248
3249         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3250
3251         return 0;
3252 }
3253
3254 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3255                                          bdaddr_t *bdaddr, u8 type)
3256 {
3257         struct bdaddr_list *b;
3258
3259         list_for_each_entry(b, &hdev->blacklist, list) {
3260                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3261                         return b;
3262         }
3263
3264         return NULL;
3265 }
3266
3267 static void hci_blacklist_clear(struct hci_dev *hdev)
3268 {
3269         struct list_head *p, *n;
3270
3271         list_for_each_safe(p, n, &hdev->blacklist) {
3272                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3273
3274                 list_del(p);
3275                 kfree(b);
3276         }
3277 }
3278
3279 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3280 {
3281         struct bdaddr_list *entry;
3282
3283         if (!bacmp(bdaddr, BDADDR_ANY))
3284                 return -EBADF;
3285
3286         if (hci_blacklist_lookup(hdev, bdaddr, type))
3287                 return -EEXIST;
3288
3289         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3290         if (!entry)
3291                 return -ENOMEM;
3292
3293         bacpy(&entry->bdaddr, bdaddr);
3294         entry->bdaddr_type = type;
3295
3296         list_add(&entry->list, &hdev->blacklist);
3297
3298         return mgmt_device_blocked(hdev, bdaddr, type);
3299 }
3300
3301 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3302 {
3303         struct bdaddr_list *entry;
3304
3305         if (!bacmp(bdaddr, BDADDR_ANY)) {
3306                 hci_blacklist_clear(hdev);
3307                 return 0;
3308         }
3309
3310         entry = hci_blacklist_lookup(hdev, bdaddr, type);
3311         if (!entry)
3312                 return -ENOENT;
3313
3314         list_del(&entry->list);
3315         kfree(entry);
3316
3317         return mgmt_device_unblocked(hdev, bdaddr, type);
3318 }
3319
3320 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3321                                           bdaddr_t *bdaddr, u8 type)
3322 {
3323         struct bdaddr_list *b;
3324
3325         list_for_each_entry(b, &hdev->le_white_list, list) {
3326                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3327                         return b;
3328         }
3329
3330         return NULL;
3331 }
3332
3333 void hci_white_list_clear(struct hci_dev *hdev)
3334 {
3335         struct list_head *p, *n;
3336
3337         list_for_each_safe(p, n, &hdev->le_white_list) {
3338                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3339
3340                 list_del(p);
3341                 kfree(b);
3342         }
3343 }
3344
3345 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3346 {
3347         struct bdaddr_list *entry;
3348
3349         if (!bacmp(bdaddr, BDADDR_ANY))
3350                 return -EBADF;
3351
3352         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3353         if (!entry)
3354                 return -ENOMEM;
3355
3356         bacpy(&entry->bdaddr, bdaddr);
3357         entry->bdaddr_type = type;
3358
3359         list_add(&entry->list, &hdev->le_white_list);
3360
3361         return 0;
3362 }
3363
3364 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3365 {
3366         struct bdaddr_list *entry;
3367
3368         if (!bacmp(bdaddr, BDADDR_ANY))
3369                 return -EBADF;
3370
3371         entry = hci_white_list_lookup(hdev, bdaddr, type);
3372         if (!entry)
3373                 return -ENOENT;
3374
3375         list_del(&entry->list);
3376         kfree(entry);
3377
3378         return 0;
3379 }
3380
3381 /* This function requires the caller holds hdev->lock */
3382 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3383                                                bdaddr_t *addr, u8 addr_type)
3384 {
3385         struct hci_conn_params *params;
3386
3387         list_for_each_entry(params, &hdev->le_conn_params, list) {
3388                 if (bacmp(&params->addr, addr) == 0 &&
3389                     params->addr_type == addr_type) {
3390                         return params;
3391                 }
3392         }
3393
3394         return NULL;
3395 }
3396
3397 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3398 {
3399         struct hci_conn *conn;
3400
3401         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3402         if (!conn)
3403                 return false;
3404
3405         if (conn->dst_type != type)
3406                 return false;
3407
3408         if (conn->state != BT_CONNECTED)
3409                 return false;
3410
3411         return true;
3412 }
3413
3414 static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3415 {
3416         if (addr_type == ADDR_LE_DEV_PUBLIC)
3417                 return true;
3418
3419         /* Check for Random Static address type */
3420         if ((addr->b[5] & 0xc0) == 0xc0)
3421                 return true;
3422
3423         return false;
3424 }
3425
3426 /* This function requires the caller holds hdev->lock */
3427 int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3428                         u8 auto_connect, u16 conn_min_interval,
3429                         u16 conn_max_interval)
3430 {
3431         struct hci_conn_params *params;
3432
3433         if (!is_identity_address(addr, addr_type))
3434                 return -EINVAL;
3435
3436         params = hci_conn_params_lookup(hdev, addr, addr_type);
3437         if (params)
3438                 goto update;
3439
3440         params = kzalloc(sizeof(*params), GFP_KERNEL);
3441         if (!params) {
3442                 BT_ERR("Out of memory");
3443                 return -ENOMEM;
3444         }
3445
3446         bacpy(&params->addr, addr);
3447         params->addr_type = addr_type;
3448
3449         list_add(&params->list, &hdev->le_conn_params);
3450
3451 update:
3452         params->conn_min_interval = conn_min_interval;
3453         params->conn_max_interval = conn_max_interval;
3454         params->auto_connect = auto_connect;
3455
3456         switch (auto_connect) {
3457         case HCI_AUTO_CONN_DISABLED:
3458         case HCI_AUTO_CONN_LINK_LOSS:
3459                 hci_pend_le_conn_del(hdev, addr, addr_type);
3460                 break;
3461         case HCI_AUTO_CONN_ALWAYS:
3462                 if (!is_connected(hdev, addr, addr_type))
3463                         hci_pend_le_conn_add(hdev, addr, addr_type);
3464                 break;
3465         }
3466
3467         BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3468                "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3469                conn_min_interval, conn_max_interval);
3470
3471         return 0;
3472 }
3473
3474 /* This function requires the caller holds hdev->lock */
3475 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3476 {
3477         struct hci_conn_params *params;
3478
3479         params = hci_conn_params_lookup(hdev, addr, addr_type);
3480         if (!params)
3481                 return;
3482
3483         hci_pend_le_conn_del(hdev, addr, addr_type);
3484
3485         list_del(&params->list);
3486         kfree(params);
3487
3488         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3489 }
3490
3491 /* This function requires the caller holds hdev->lock */
3492 void hci_conn_params_clear(struct hci_dev *hdev)
3493 {
3494         struct hci_conn_params *params, *tmp;
3495
3496         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3497                 list_del(&params->list);
3498                 kfree(params);
3499         }
3500
3501         BT_DBG("All LE connection parameters were removed");
3502 }
3503
3504 /* This function requires the caller holds hdev->lock */
3505 struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3506                                             bdaddr_t *addr, u8 addr_type)
3507 {
3508         struct bdaddr_list *entry;
3509
3510         list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3511                 if (bacmp(&entry->bdaddr, addr) == 0 &&
3512                     entry->bdaddr_type == addr_type)
3513                         return entry;
3514         }
3515
3516         return NULL;
3517 }
3518
3519 /* This function requires the caller holds hdev->lock */
3520 void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3521 {
3522         struct bdaddr_list *entry;
3523
3524         entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3525         if (entry)
3526                 goto done;
3527
3528         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3529         if (!entry) {
3530                 BT_ERR("Out of memory");
3531                 return;
3532         }
3533
3534         bacpy(&entry->bdaddr, addr);
3535         entry->bdaddr_type = addr_type;
3536
3537         list_add(&entry->list, &hdev->pend_le_conns);
3538
3539         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3540
3541 done:
3542         hci_update_background_scan(hdev);
3543 }
3544
3545 /* This function requires the caller holds hdev->lock */
3546 void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3547 {
3548         struct bdaddr_list *entry;
3549
3550         entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3551         if (!entry)
3552                 goto done;
3553
3554         list_del(&entry->list);
3555         kfree(entry);
3556
3557         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3558
3559 done:
3560         hci_update_background_scan(hdev);
3561 }
3562
3563 /* This function requires the caller holds hdev->lock */
3564 void hci_pend_le_conns_clear(struct hci_dev *hdev)
3565 {
3566         struct bdaddr_list *entry, *tmp;
3567
3568         list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3569                 list_del(&entry->list);
3570                 kfree(entry);
3571         }
3572
3573         BT_DBG("All LE pending connections cleared");
3574 }
3575
3576 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3577 {
3578         if (status) {
3579                 BT_ERR("Failed to start inquiry: status %d", status);
3580
3581                 hci_dev_lock(hdev);
3582                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3583                 hci_dev_unlock(hdev);
3584                 return;
3585         }
3586 }
3587
3588 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3589 {
3590         /* General inquiry access code (GIAC) */
3591         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3592         struct hci_request req;
3593         struct hci_cp_inquiry cp;
3594         int err;
3595
3596         if (status) {
3597                 BT_ERR("Failed to disable LE scanning: status %d", status);
3598                 return;
3599         }
3600
3601         switch (hdev->discovery.type) {
3602         case DISCOV_TYPE_LE:
3603                 hci_dev_lock(hdev);
3604                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3605                 hci_dev_unlock(hdev);
3606                 break;
3607
3608         case DISCOV_TYPE_INTERLEAVED:
3609                 hci_req_init(&req, hdev);
3610
3611                 memset(&cp, 0, sizeof(cp));
3612                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3613                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3614                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3615
3616                 hci_dev_lock(hdev);
3617
3618                 hci_inquiry_cache_flush(hdev);
3619
3620                 err = hci_req_run(&req, inquiry_complete);
3621                 if (err) {
3622                         BT_ERR("Inquiry request failed: err %d", err);
3623                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3624                 }
3625
3626                 hci_dev_unlock(hdev);
3627                 break;
3628         }
3629 }
3630
3631 static void le_scan_disable_work(struct work_struct *work)
3632 {
3633         struct hci_dev *hdev = container_of(work, struct hci_dev,
3634                                             le_scan_disable.work);
3635         struct hci_request req;
3636         int err;
3637
3638         BT_DBG("%s", hdev->name);
3639
3640         hci_req_init(&req, hdev);
3641
3642         hci_req_add_le_scan_disable(&req);
3643
3644         err = hci_req_run(&req, le_scan_disable_work_complete);
3645         if (err)
3646                 BT_ERR("Disable LE scanning request failed: err %d", err);
3647 }
3648
3649 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3650 {
3651         struct hci_dev *hdev = req->hdev;
3652
3653         /* If we're advertising or initiating an LE connection we can't
3654          * go ahead and change the random address at this time. This is
3655          * because the eventual initiator address used for the
3656          * subsequently created connection will be undefined (some
3657          * controllers use the new address and others the one we had
3658          * when the operation started).
3659          *
3660          * In this kind of scenario skip the update and let the random
3661          * address be updated at the next cycle.
3662          */
3663         if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3664             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3665                 BT_DBG("Deferring random address update");
3666                 return;
3667         }
3668
3669         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3670 }
3671
3672 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3673                               u8 *own_addr_type)
3674 {
3675         struct hci_dev *hdev = req->hdev;
3676         int err;
3677
3678         /* If privacy is enabled use a resolvable private address. If
3679          * current RPA has expired or there is something else than
3680          * the current RPA in use, then generate a new one.
3681          */
3682         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3683                 int to;
3684
3685                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3686
3687                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3688                     !bacmp(&hdev->random_addr, &hdev->rpa))
3689                         return 0;
3690
3691                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3692                 if (err < 0) {
3693                         BT_ERR("%s failed to generate new RPA", hdev->name);
3694                         return err;
3695                 }
3696
3697                 set_random_addr(req, &hdev->rpa);
3698
3699                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3700                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3701
3702                 return 0;
3703         }
3704
3705         /* In case of required privacy without resolvable private address,
3706          * use an unresolvable private address. This is useful for active
3707          * scanning and non-connectable advertising.
3708          */
3709         if (require_privacy) {
3710                 bdaddr_t urpa;
3711
3712                 get_random_bytes(&urpa, 6);
3713                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3714
3715                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3716                 set_random_addr(req, &urpa);
3717                 return 0;
3718         }
3719
3720         /* If forcing static address is in use or there is no public
3721          * address use the static address as random address (but skip
3722          * the HCI command if the current random address is already the
3723          * static one.
3724          */
3725         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3726             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3727                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3728                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3729                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3730                                     &hdev->static_addr);
3731                 return 0;
3732         }
3733
3734         /* Neither privacy nor static address is being used so use a
3735          * public address.
3736          */
3737         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3738
3739         return 0;
3740 }
3741
3742 /* Copy the Identity Address of the controller.
3743  *
3744  * If the controller has a public BD_ADDR, then by default use that one.
3745  * If this is a LE only controller without a public address, default to
3746  * the static random address.
3747  *
3748  * For debugging purposes it is possible to force controllers with a
3749  * public address to use the static random address instead.
3750  */
3751 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3752                                u8 *bdaddr_type)
3753 {
3754         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3755             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3756                 bacpy(bdaddr, &hdev->static_addr);
3757                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3758         } else {
3759                 bacpy(bdaddr, &hdev->bdaddr);
3760                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3761         }
3762 }
3763
3764 /* Alloc HCI device */
3765 struct hci_dev *hci_alloc_dev(void)
3766 {
3767         struct hci_dev *hdev;
3768
3769         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3770         if (!hdev)
3771                 return NULL;
3772
3773         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3774         hdev->esco_type = (ESCO_HV1);
3775         hdev->link_mode = (HCI_LM_ACCEPT);
3776         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3777         hdev->io_capability = 0x03;     /* No Input No Output */
3778         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3779         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3780
3781         hdev->sniff_max_interval = 800;
3782         hdev->sniff_min_interval = 80;
3783
3784         hdev->le_adv_channel_map = 0x07;
3785         hdev->le_scan_interval = 0x0060;
3786         hdev->le_scan_window = 0x0030;
3787         hdev->le_conn_min_interval = 0x0028;
3788         hdev->le_conn_max_interval = 0x0038;
3789
3790         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3791         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3792
3793         mutex_init(&hdev->lock);
3794         mutex_init(&hdev->req_lock);
3795
3796         INIT_LIST_HEAD(&hdev->mgmt_pending);
3797         INIT_LIST_HEAD(&hdev->blacklist);
3798         INIT_LIST_HEAD(&hdev->uuids);
3799         INIT_LIST_HEAD(&hdev->link_keys);
3800         INIT_LIST_HEAD(&hdev->long_term_keys);
3801         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3802         INIT_LIST_HEAD(&hdev->remote_oob_data);
3803         INIT_LIST_HEAD(&hdev->le_white_list);
3804         INIT_LIST_HEAD(&hdev->le_conn_params);
3805         INIT_LIST_HEAD(&hdev->pend_le_conns);
3806         INIT_LIST_HEAD(&hdev->conn_hash.list);
3807
3808         INIT_WORK(&hdev->rx_work, hci_rx_work);
3809         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3810         INIT_WORK(&hdev->tx_work, hci_tx_work);
3811         INIT_WORK(&hdev->power_on, hci_power_on);
3812
3813         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3814         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3815         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3816
3817         skb_queue_head_init(&hdev->rx_q);
3818         skb_queue_head_init(&hdev->cmd_q);
3819         skb_queue_head_init(&hdev->raw_q);
3820
3821         init_waitqueue_head(&hdev->req_wait_q);
3822
3823         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
3824
3825         hci_init_sysfs(hdev);
3826         discovery_init(hdev);
3827
3828         return hdev;
3829 }
3830 EXPORT_SYMBOL(hci_alloc_dev);
3831
3832 /* Free HCI device */
3833 void hci_free_dev(struct hci_dev *hdev)
3834 {
3835         /* will free via device release */
3836         put_device(&hdev->dev);
3837 }
3838 EXPORT_SYMBOL(hci_free_dev);
3839
3840 /* Register HCI device */
3841 int hci_register_dev(struct hci_dev *hdev)
3842 {
3843         int id, error;
3844
3845         if (!hdev->open || !hdev->close)
3846                 return -EINVAL;
3847
3848         /* Do not allow HCI_AMP devices to register at index 0,
3849          * so the index can be used as the AMP controller ID.
3850          */
3851         switch (hdev->dev_type) {
3852         case HCI_BREDR:
3853                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3854                 break;
3855         case HCI_AMP:
3856                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3857                 break;
3858         default:
3859                 return -EINVAL;
3860         }
3861
3862         if (id < 0)
3863                 return id;
3864
3865         sprintf(hdev->name, "hci%d", id);
3866         hdev->id = id;
3867
3868         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3869
3870         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3871                                           WQ_MEM_RECLAIM, 1, hdev->name);
3872         if (!hdev->workqueue) {
3873                 error = -ENOMEM;
3874                 goto err;
3875         }
3876
3877         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3878                                               WQ_MEM_RECLAIM, 1, hdev->name);
3879         if (!hdev->req_workqueue) {
3880                 destroy_workqueue(hdev->workqueue);
3881                 error = -ENOMEM;
3882                 goto err;
3883         }
3884
3885         if (!IS_ERR_OR_NULL(bt_debugfs))
3886                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3887
3888         dev_set_name(&hdev->dev, "%s", hdev->name);
3889
3890         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3891                                                CRYPTO_ALG_ASYNC);
3892         if (IS_ERR(hdev->tfm_aes)) {
3893                 BT_ERR("Unable to create crypto context");
3894                 error = PTR_ERR(hdev->tfm_aes);
3895                 hdev->tfm_aes = NULL;
3896                 goto err_wqueue;
3897         }
3898
3899         error = device_add(&hdev->dev);
3900         if (error < 0)
3901                 goto err_tfm;
3902
3903         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3904                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3905                                     hdev);
3906         if (hdev->rfkill) {
3907                 if (rfkill_register(hdev->rfkill) < 0) {
3908                         rfkill_destroy(hdev->rfkill);
3909                         hdev->rfkill = NULL;
3910                 }
3911         }
3912
3913         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3914                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3915
3916         set_bit(HCI_SETUP, &hdev->dev_flags);
3917         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3918
3919         if (hdev->dev_type == HCI_BREDR) {
3920                 /* Assume BR/EDR support until proven otherwise (such as
3921                  * through reading supported features during init.
3922                  */
3923                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3924         }
3925
3926         write_lock(&hci_dev_list_lock);
3927         list_add(&hdev->list, &hci_dev_list);
3928         write_unlock(&hci_dev_list_lock);
3929
3930         hci_notify(hdev, HCI_DEV_REG);
3931         hci_dev_hold(hdev);
3932
3933         queue_work(hdev->req_workqueue, &hdev->power_on);
3934
3935         return id;
3936
3937 err_tfm:
3938         crypto_free_blkcipher(hdev->tfm_aes);
3939 err_wqueue:
3940         destroy_workqueue(hdev->workqueue);
3941         destroy_workqueue(hdev->req_workqueue);
3942 err:
3943         ida_simple_remove(&hci_index_ida, hdev->id);
3944
3945         return error;
3946 }
3947 EXPORT_SYMBOL(hci_register_dev);
3948
3949 /* Unregister HCI device */
3950 void hci_unregister_dev(struct hci_dev *hdev)
3951 {
3952         int i, id;
3953
3954         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3955
3956         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3957
3958         id = hdev->id;
3959
3960         write_lock(&hci_dev_list_lock);
3961         list_del(&hdev->list);
3962         write_unlock(&hci_dev_list_lock);
3963
3964         hci_dev_do_close(hdev);
3965
3966         for (i = 0; i < NUM_REASSEMBLY; i++)
3967                 kfree_skb(hdev->reassembly[i]);
3968
3969         cancel_work_sync(&hdev->power_on);
3970
3971         if (!test_bit(HCI_INIT, &hdev->flags) &&
3972             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
3973                 hci_dev_lock(hdev);
3974                 mgmt_index_removed(hdev);
3975                 hci_dev_unlock(hdev);
3976         }
3977
3978         /* mgmt_index_removed should take care of emptying the
3979          * pending list */
3980         BUG_ON(!list_empty(&hdev->mgmt_pending));
3981
3982         hci_notify(hdev, HCI_DEV_UNREG);
3983
3984         if (hdev->rfkill) {
3985                 rfkill_unregister(hdev->rfkill);
3986                 rfkill_destroy(hdev->rfkill);
3987         }
3988
3989         if (hdev->tfm_aes)
3990                 crypto_free_blkcipher(hdev->tfm_aes);
3991
3992         device_del(&hdev->dev);
3993
3994         debugfs_remove_recursive(hdev->debugfs);
3995
3996         destroy_workqueue(hdev->workqueue);
3997         destroy_workqueue(hdev->req_workqueue);
3998
3999         hci_dev_lock(hdev);
4000         hci_blacklist_clear(hdev);
4001         hci_uuids_clear(hdev);
4002         hci_link_keys_clear(hdev);
4003         hci_smp_ltks_clear(hdev);
4004         hci_smp_irks_clear(hdev);
4005         hci_remote_oob_data_clear(hdev);
4006         hci_white_list_clear(hdev);
4007         hci_conn_params_clear(hdev);
4008         hci_pend_le_conns_clear(hdev);
4009         hci_dev_unlock(hdev);
4010
4011         hci_dev_put(hdev);
4012
4013         ida_simple_remove(&hci_index_ida, id);
4014 }
4015 EXPORT_SYMBOL(hci_unregister_dev);
4016
4017 /* Suspend HCI device */
4018 int hci_suspend_dev(struct hci_dev *hdev)
4019 {
4020         hci_notify(hdev, HCI_DEV_SUSPEND);
4021         return 0;
4022 }
4023 EXPORT_SYMBOL(hci_suspend_dev);
4024
4025 /* Resume HCI device */
4026 int hci_resume_dev(struct hci_dev *hdev)
4027 {
4028         hci_notify(hdev, HCI_DEV_RESUME);
4029         return 0;
4030 }
4031 EXPORT_SYMBOL(hci_resume_dev);
4032
4033 /* Receive frame from HCI drivers */
4034 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4035 {
4036         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4037                       && !test_bit(HCI_INIT, &hdev->flags))) {
4038                 kfree_skb(skb);
4039                 return -ENXIO;
4040         }
4041
4042         /* Incoming skb */
4043         bt_cb(skb)->incoming = 1;
4044
4045         /* Time stamp */
4046         __net_timestamp(skb);
4047
4048         skb_queue_tail(&hdev->rx_q, skb);
4049         queue_work(hdev->workqueue, &hdev->rx_work);
4050
4051         return 0;
4052 }
4053 EXPORT_SYMBOL(hci_recv_frame);
4054
4055 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4056                           int count, __u8 index)
4057 {
4058         int len = 0;
4059         int hlen = 0;
4060         int remain = count;
4061         struct sk_buff *skb;
4062         struct bt_skb_cb *scb;
4063
4064         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4065             index >= NUM_REASSEMBLY)
4066                 return -EILSEQ;
4067
4068         skb = hdev->reassembly[index];
4069
4070         if (!skb) {
4071                 switch (type) {
4072                 case HCI_ACLDATA_PKT:
4073                         len = HCI_MAX_FRAME_SIZE;
4074                         hlen = HCI_ACL_HDR_SIZE;
4075                         break;
4076                 case HCI_EVENT_PKT:
4077                         len = HCI_MAX_EVENT_SIZE;
4078                         hlen = HCI_EVENT_HDR_SIZE;
4079                         break;
4080                 case HCI_SCODATA_PKT:
4081                         len = HCI_MAX_SCO_SIZE;
4082                         hlen = HCI_SCO_HDR_SIZE;
4083                         break;
4084                 }
4085
4086                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4087                 if (!skb)
4088                         return -ENOMEM;
4089
4090                 scb = (void *) skb->cb;
4091                 scb->expect = hlen;
4092                 scb->pkt_type = type;
4093
4094                 hdev->reassembly[index] = skb;
4095         }
4096
4097         while (count) {
4098                 scb = (void *) skb->cb;
4099                 len = min_t(uint, scb->expect, count);
4100
4101                 memcpy(skb_put(skb, len), data, len);
4102
4103                 count -= len;
4104                 data += len;
4105                 scb->expect -= len;
4106                 remain = count;
4107
4108                 switch (type) {
4109                 case HCI_EVENT_PKT:
4110                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4111                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4112                                 scb->expect = h->plen;
4113
4114                                 if (skb_tailroom(skb) < scb->expect) {
4115                                         kfree_skb(skb);
4116                                         hdev->reassembly[index] = NULL;
4117                                         return -ENOMEM;
4118                                 }
4119                         }
4120                         break;
4121
4122                 case HCI_ACLDATA_PKT:
4123                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4124                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4125                                 scb->expect = __le16_to_cpu(h->dlen);
4126
4127                                 if (skb_tailroom(skb) < scb->expect) {
4128                                         kfree_skb(skb);
4129                                         hdev->reassembly[index] = NULL;
4130                                         return -ENOMEM;
4131                                 }
4132                         }
4133                         break;
4134
4135                 case HCI_SCODATA_PKT:
4136                         if (skb->len == HCI_SCO_HDR_SIZE) {
4137                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4138                                 scb->expect = h->dlen;
4139
4140                                 if (skb_tailroom(skb) < scb->expect) {
4141                                         kfree_skb(skb);
4142                                         hdev->reassembly[index] = NULL;
4143                                         return -ENOMEM;
4144                                 }
4145                         }
4146                         break;
4147                 }
4148
4149                 if (scb->expect == 0) {
4150                         /* Complete frame */
4151
4152                         bt_cb(skb)->pkt_type = type;
4153                         hci_recv_frame(hdev, skb);
4154
4155                         hdev->reassembly[index] = NULL;
4156                         return remain;
4157                 }
4158         }
4159
4160         return remain;
4161 }
4162
4163 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4164 {
4165         int rem = 0;
4166
4167         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4168                 return -EILSEQ;
4169
4170         while (count) {
4171                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4172                 if (rem < 0)
4173                         return rem;
4174
4175                 data += (count - rem);
4176                 count = rem;
4177         }
4178
4179         return rem;
4180 }
4181 EXPORT_SYMBOL(hci_recv_fragment);
4182
4183 #define STREAM_REASSEMBLY 0
4184
4185 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4186 {
4187         int type;
4188         int rem = 0;
4189
4190         while (count) {
4191                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4192
4193                 if (!skb) {
4194                         struct { char type; } *pkt;
4195
4196                         /* Start of the frame */
4197                         pkt = data;
4198                         type = pkt->type;
4199
4200                         data++;
4201                         count--;
4202                 } else
4203                         type = bt_cb(skb)->pkt_type;
4204
4205                 rem = hci_reassembly(hdev, type, data, count,
4206                                      STREAM_REASSEMBLY);
4207                 if (rem < 0)
4208                         return rem;
4209
4210                 data += (count - rem);
4211                 count = rem;
4212         }
4213
4214         return rem;
4215 }
4216 EXPORT_SYMBOL(hci_recv_stream_fragment);
4217
4218 /* ---- Interface to upper protocols ---- */
4219
4220 int hci_register_cb(struct hci_cb *cb)
4221 {
4222         BT_DBG("%p name %s", cb, cb->name);
4223
4224         write_lock(&hci_cb_list_lock);
4225         list_add(&cb->list, &hci_cb_list);
4226         write_unlock(&hci_cb_list_lock);
4227
4228         return 0;
4229 }
4230 EXPORT_SYMBOL(hci_register_cb);
4231
4232 int hci_unregister_cb(struct hci_cb *cb)
4233 {
4234         BT_DBG("%p name %s", cb, cb->name);
4235
4236         write_lock(&hci_cb_list_lock);
4237         list_del(&cb->list);
4238         write_unlock(&hci_cb_list_lock);
4239
4240         return 0;
4241 }
4242 EXPORT_SYMBOL(hci_unregister_cb);
4243
4244 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4245 {
4246         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4247
4248         /* Time stamp */
4249         __net_timestamp(skb);
4250
4251         /* Send copy to monitor */
4252         hci_send_to_monitor(hdev, skb);
4253
4254         if (atomic_read(&hdev->promisc)) {
4255                 /* Send copy to the sockets */
4256                 hci_send_to_sock(hdev, skb);
4257         }
4258
4259         /* Get rid of skb owner, prior to sending to the driver. */
4260         skb_orphan(skb);
4261
4262         if (hdev->send(hdev, skb) < 0)
4263                 BT_ERR("%s sending frame failed", hdev->name);
4264 }
4265
4266 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4267 {
4268         skb_queue_head_init(&req->cmd_q);
4269         req->hdev = hdev;
4270         req->err = 0;
4271 }
4272
4273 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4274 {
4275         struct hci_dev *hdev = req->hdev;
4276         struct sk_buff *skb;
4277         unsigned long flags;
4278
4279         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4280
4281         /* If an error occured during request building, remove all HCI
4282          * commands queued on the HCI request queue.
4283          */
4284         if (req->err) {
4285                 skb_queue_purge(&req->cmd_q);
4286                 return req->err;
4287         }
4288
4289         /* Do not allow empty requests */
4290         if (skb_queue_empty(&req->cmd_q))
4291                 return -ENODATA;
4292
4293         skb = skb_peek_tail(&req->cmd_q);
4294         bt_cb(skb)->req.complete = complete;
4295
4296         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4297         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4298         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4299
4300         queue_work(hdev->workqueue, &hdev->cmd_work);
4301
4302         return 0;
4303 }
4304
4305 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4306                                        u32 plen, const void *param)
4307 {
4308         int len = HCI_COMMAND_HDR_SIZE + plen;
4309         struct hci_command_hdr *hdr;
4310         struct sk_buff *skb;
4311
4312         skb = bt_skb_alloc(len, GFP_ATOMIC);
4313         if (!skb)
4314                 return NULL;
4315
4316         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4317         hdr->opcode = cpu_to_le16(opcode);
4318         hdr->plen   = plen;
4319
4320         if (plen)
4321                 memcpy(skb_put(skb, plen), param, plen);
4322
4323         BT_DBG("skb len %d", skb->len);
4324
4325         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4326
4327         return skb;
4328 }
4329
4330 /* Send HCI command */
4331 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4332                  const void *param)
4333 {
4334         struct sk_buff *skb;
4335
4336         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4337
4338         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4339         if (!skb) {
4340                 BT_ERR("%s no memory for command", hdev->name);
4341                 return -ENOMEM;
4342         }
4343
4344         /* Stand-alone HCI commands must be flaged as
4345          * single-command requests.
4346          */
4347         bt_cb(skb)->req.start = true;
4348
4349         skb_queue_tail(&hdev->cmd_q, skb);
4350         queue_work(hdev->workqueue, &hdev->cmd_work);
4351
4352         return 0;
4353 }
4354
4355 /* Queue a command to an asynchronous HCI request */
4356 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4357                     const void *param, u8 event)
4358 {
4359         struct hci_dev *hdev = req->hdev;
4360         struct sk_buff *skb;
4361
4362         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4363
4364         /* If an error occured during request building, there is no point in
4365          * queueing the HCI command. We can simply return.
4366          */
4367         if (req->err)
4368                 return;
4369
4370         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4371         if (!skb) {
4372                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4373                        hdev->name, opcode);
4374                 req->err = -ENOMEM;
4375                 return;
4376         }
4377
4378         if (skb_queue_empty(&req->cmd_q))
4379                 bt_cb(skb)->req.start = true;
4380
4381         bt_cb(skb)->req.event = event;
4382
4383         skb_queue_tail(&req->cmd_q, skb);
4384 }
4385
4386 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4387                  const void *param)
4388 {
4389         hci_req_add_ev(req, opcode, plen, param, 0);
4390 }
4391
4392 /* Get data from the previously sent command */
4393 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4394 {
4395         struct hci_command_hdr *hdr;
4396
4397         if (!hdev->sent_cmd)
4398                 return NULL;
4399
4400         hdr = (void *) hdev->sent_cmd->data;
4401
4402         if (hdr->opcode != cpu_to_le16(opcode))
4403                 return NULL;
4404
4405         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4406
4407         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4408 }
4409
4410 /* Send ACL data */
4411 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4412 {
4413         struct hci_acl_hdr *hdr;
4414         int len = skb->len;
4415
4416         skb_push(skb, HCI_ACL_HDR_SIZE);
4417         skb_reset_transport_header(skb);
4418         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4419         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4420         hdr->dlen   = cpu_to_le16(len);
4421 }
4422
4423 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4424                           struct sk_buff *skb, __u16 flags)
4425 {
4426         struct hci_conn *conn = chan->conn;
4427         struct hci_dev *hdev = conn->hdev;
4428         struct sk_buff *list;
4429
4430         skb->len = skb_headlen(skb);
4431         skb->data_len = 0;
4432
4433         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4434
4435         switch (hdev->dev_type) {
4436         case HCI_BREDR:
4437                 hci_add_acl_hdr(skb, conn->handle, flags);
4438                 break;
4439         case HCI_AMP:
4440                 hci_add_acl_hdr(skb, chan->handle, flags);
4441                 break;
4442         default:
4443                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4444                 return;
4445         }
4446
4447         list = skb_shinfo(skb)->frag_list;
4448         if (!list) {
4449                 /* Non fragmented */
4450                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4451
4452                 skb_queue_tail(queue, skb);
4453         } else {
4454                 /* Fragmented */
4455                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4456
4457                 skb_shinfo(skb)->frag_list = NULL;
4458
4459                 /* Queue all fragments atomically */
4460                 spin_lock(&queue->lock);
4461
4462                 __skb_queue_tail(queue, skb);
4463
4464                 flags &= ~ACL_START;
4465                 flags |= ACL_CONT;
4466                 do {
4467                         skb = list; list = list->next;
4468
4469                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4470                         hci_add_acl_hdr(skb, conn->handle, flags);
4471
4472                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4473
4474                         __skb_queue_tail(queue, skb);
4475                 } while (list);
4476
4477                 spin_unlock(&queue->lock);
4478         }
4479 }
4480
4481 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4482 {
4483         struct hci_dev *hdev = chan->conn->hdev;
4484
4485         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4486
4487         hci_queue_acl(chan, &chan->data_q, skb, flags);
4488
4489         queue_work(hdev->workqueue, &hdev->tx_work);
4490 }
4491
4492 /* Send SCO data */
4493 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4494 {
4495         struct hci_dev *hdev = conn->hdev;
4496         struct hci_sco_hdr hdr;
4497
4498         BT_DBG("%s len %d", hdev->name, skb->len);
4499
4500         hdr.handle = cpu_to_le16(conn->handle);
4501         hdr.dlen   = skb->len;
4502
4503         skb_push(skb, HCI_SCO_HDR_SIZE);
4504         skb_reset_transport_header(skb);
4505         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4506
4507         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4508
4509         skb_queue_tail(&conn->data_q, skb);
4510         queue_work(hdev->workqueue, &hdev->tx_work);
4511 }
4512
4513 /* ---- HCI TX task (outgoing data) ---- */
4514
4515 /* HCI Connection scheduler */
4516 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4517                                      int *quote)
4518 {
4519         struct hci_conn_hash *h = &hdev->conn_hash;
4520         struct hci_conn *conn = NULL, *c;
4521         unsigned int num = 0, min = ~0;
4522
4523         /* We don't have to lock device here. Connections are always
4524          * added and removed with TX task disabled. */
4525
4526         rcu_read_lock();
4527
4528         list_for_each_entry_rcu(c, &h->list, list) {
4529                 if (c->type != type || skb_queue_empty(&c->data_q))
4530                         continue;
4531
4532                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4533                         continue;
4534
4535                 num++;
4536
4537                 if (c->sent < min) {
4538                         min  = c->sent;
4539                         conn = c;
4540                 }
4541
4542                 if (hci_conn_num(hdev, type) == num)
4543                         break;
4544         }
4545
4546         rcu_read_unlock();
4547
4548         if (conn) {
4549                 int cnt, q;
4550
4551                 switch (conn->type) {
4552                 case ACL_LINK:
4553                         cnt = hdev->acl_cnt;
4554                         break;
4555                 case SCO_LINK:
4556                 case ESCO_LINK:
4557                         cnt = hdev->sco_cnt;
4558                         break;
4559                 case LE_LINK:
4560                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4561                         break;
4562                 default:
4563                         cnt = 0;
4564                         BT_ERR("Unknown link type");
4565                 }
4566
4567                 q = cnt / num;
4568                 *quote = q ? q : 1;
4569         } else
4570                 *quote = 0;
4571
4572         BT_DBG("conn %p quote %d", conn, *quote);
4573         return conn;
4574 }
4575
4576 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4577 {
4578         struct hci_conn_hash *h = &hdev->conn_hash;
4579         struct hci_conn *c;
4580
4581         BT_ERR("%s link tx timeout", hdev->name);
4582
4583         rcu_read_lock();
4584
4585         /* Kill stalled connections */
4586         list_for_each_entry_rcu(c, &h->list, list) {
4587                 if (c->type == type && c->sent) {
4588                         BT_ERR("%s killing stalled connection %pMR",
4589                                hdev->name, &c->dst);
4590                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4591                 }
4592         }
4593
4594         rcu_read_unlock();
4595 }
4596
4597 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4598                                       int *quote)
4599 {
4600         struct hci_conn_hash *h = &hdev->conn_hash;
4601         struct hci_chan *chan = NULL;
4602         unsigned int num = 0, min = ~0, cur_prio = 0;
4603         struct hci_conn *conn;
4604         int cnt, q, conn_num = 0;
4605
4606         BT_DBG("%s", hdev->name);
4607
4608         rcu_read_lock();
4609
4610         list_for_each_entry_rcu(conn, &h->list, list) {
4611                 struct hci_chan *tmp;
4612
4613                 if (conn->type != type)
4614                         continue;
4615
4616                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4617                         continue;
4618
4619                 conn_num++;
4620
4621                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4622                         struct sk_buff *skb;
4623
4624                         if (skb_queue_empty(&tmp->data_q))
4625                                 continue;
4626
4627                         skb = skb_peek(&tmp->data_q);
4628                         if (skb->priority < cur_prio)
4629                                 continue;
4630
4631                         if (skb->priority > cur_prio) {
4632                                 num = 0;
4633                                 min = ~0;
4634                                 cur_prio = skb->priority;
4635                         }
4636
4637                         num++;
4638
4639                         if (conn->sent < min) {
4640                                 min  = conn->sent;
4641                                 chan = tmp;
4642                         }
4643                 }
4644
4645                 if (hci_conn_num(hdev, type) == conn_num)
4646                         break;
4647         }
4648
4649         rcu_read_unlock();
4650
4651         if (!chan)
4652                 return NULL;
4653
4654         switch (chan->conn->type) {
4655         case ACL_LINK:
4656                 cnt = hdev->acl_cnt;
4657                 break;
4658         case AMP_LINK:
4659                 cnt = hdev->block_cnt;
4660                 break;
4661         case SCO_LINK:
4662         case ESCO_LINK:
4663                 cnt = hdev->sco_cnt;
4664                 break;
4665         case LE_LINK:
4666                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4667                 break;
4668         default:
4669                 cnt = 0;
4670                 BT_ERR("Unknown link type");
4671         }
4672
4673         q = cnt / num;
4674         *quote = q ? q : 1;
4675         BT_DBG("chan %p quote %d", chan, *quote);
4676         return chan;
4677 }
4678
4679 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4680 {
4681         struct hci_conn_hash *h = &hdev->conn_hash;
4682         struct hci_conn *conn;
4683         int num = 0;
4684
4685         BT_DBG("%s", hdev->name);
4686
4687         rcu_read_lock();
4688
4689         list_for_each_entry_rcu(conn, &h->list, list) {
4690                 struct hci_chan *chan;
4691
4692                 if (conn->type != type)
4693                         continue;
4694
4695                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4696                         continue;
4697
4698                 num++;
4699
4700                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4701                         struct sk_buff *skb;
4702
4703                         if (chan->sent) {
4704                                 chan->sent = 0;
4705                                 continue;
4706                         }
4707
4708                         if (skb_queue_empty(&chan->data_q))
4709                                 continue;
4710
4711                         skb = skb_peek(&chan->data_q);
4712                         if (skb->priority >= HCI_PRIO_MAX - 1)
4713                                 continue;
4714
4715                         skb->priority = HCI_PRIO_MAX - 1;
4716
4717                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4718                                skb->priority);
4719                 }
4720
4721                 if (hci_conn_num(hdev, type) == num)
4722                         break;
4723         }
4724
4725         rcu_read_unlock();
4726
4727 }
4728
4729 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4730 {
4731         /* Calculate count of blocks used by this packet */
4732         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4733 }
4734
4735 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4736 {
4737         if (!test_bit(HCI_RAW, &hdev->flags)) {
4738                 /* ACL tx timeout must be longer than maximum
4739                  * link supervision timeout (40.9 seconds) */
4740                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4741                                        HCI_ACL_TX_TIMEOUT))
4742                         hci_link_tx_to(hdev, ACL_LINK);
4743         }
4744 }
4745
4746 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4747 {
4748         unsigned int cnt = hdev->acl_cnt;
4749         struct hci_chan *chan;
4750         struct sk_buff *skb;
4751         int quote;
4752
4753         __check_timeout(hdev, cnt);
4754
4755         while (hdev->acl_cnt &&
4756                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4757                 u32 priority = (skb_peek(&chan->data_q))->priority;
4758                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4759                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4760                                skb->len, skb->priority);
4761
4762                         /* Stop if priority has changed */
4763                         if (skb->priority < priority)
4764                                 break;
4765
4766                         skb = skb_dequeue(&chan->data_q);
4767
4768                         hci_conn_enter_active_mode(chan->conn,
4769                                                    bt_cb(skb)->force_active);
4770
4771                         hci_send_frame(hdev, skb);
4772                         hdev->acl_last_tx = jiffies;
4773
4774                         hdev->acl_cnt--;
4775                         chan->sent++;
4776                         chan->conn->sent++;
4777                 }
4778         }
4779
4780         if (cnt != hdev->acl_cnt)
4781                 hci_prio_recalculate(hdev, ACL_LINK);
4782 }
4783
4784 static void hci_sched_acl_blk(struct hci_dev *hdev)
4785 {
4786         unsigned int cnt = hdev->block_cnt;
4787         struct hci_chan *chan;
4788         struct sk_buff *skb;
4789         int quote;
4790         u8 type;
4791
4792         __check_timeout(hdev, cnt);
4793
4794         BT_DBG("%s", hdev->name);
4795
4796         if (hdev->dev_type == HCI_AMP)
4797                 type = AMP_LINK;
4798         else
4799                 type = ACL_LINK;
4800
4801         while (hdev->block_cnt > 0 &&
4802                (chan = hci_chan_sent(hdev, type, &quote))) {
4803                 u32 priority = (skb_peek(&chan->data_q))->priority;
4804                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4805                         int blocks;
4806
4807                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4808                                skb->len, skb->priority);
4809
4810                         /* Stop if priority has changed */
4811                         if (skb->priority < priority)
4812                                 break;
4813
4814                         skb = skb_dequeue(&chan->data_q);
4815
4816                         blocks = __get_blocks(hdev, skb);
4817                         if (blocks > hdev->block_cnt)
4818                                 return;
4819
4820                         hci_conn_enter_active_mode(chan->conn,
4821                                                    bt_cb(skb)->force_active);
4822
4823                         hci_send_frame(hdev, skb);
4824                         hdev->acl_last_tx = jiffies;
4825
4826                         hdev->block_cnt -= blocks;
4827                         quote -= blocks;
4828
4829                         chan->sent += blocks;
4830                         chan->conn->sent += blocks;
4831                 }
4832         }
4833
4834         if (cnt != hdev->block_cnt)
4835                 hci_prio_recalculate(hdev, type);
4836 }
4837
4838 static void hci_sched_acl(struct hci_dev *hdev)
4839 {
4840         BT_DBG("%s", hdev->name);
4841
4842         /* No ACL link over BR/EDR controller */
4843         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4844                 return;
4845
4846         /* No AMP link over AMP controller */
4847         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4848                 return;
4849
4850         switch (hdev->flow_ctl_mode) {
4851         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4852                 hci_sched_acl_pkt(hdev);
4853                 break;
4854
4855         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4856                 hci_sched_acl_blk(hdev);
4857                 break;
4858         }
4859 }
4860
4861 /* Schedule SCO */
4862 static void hci_sched_sco(struct hci_dev *hdev)
4863 {
4864         struct hci_conn *conn;
4865         struct sk_buff *skb;
4866         int quote;
4867
4868         BT_DBG("%s", hdev->name);
4869
4870         if (!hci_conn_num(hdev, SCO_LINK))
4871                 return;
4872
4873         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4874                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4875                         BT_DBG("skb %p len %d", skb, skb->len);
4876                         hci_send_frame(hdev, skb);
4877
4878                         conn->sent++;
4879                         if (conn->sent == ~0)
4880                                 conn->sent = 0;
4881                 }
4882         }
4883 }
4884
4885 static void hci_sched_esco(struct hci_dev *hdev)
4886 {
4887         struct hci_conn *conn;
4888         struct sk_buff *skb;
4889         int quote;
4890
4891         BT_DBG("%s", hdev->name);
4892
4893         if (!hci_conn_num(hdev, ESCO_LINK))
4894                 return;
4895
4896         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4897                                                      &quote))) {
4898                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4899                         BT_DBG("skb %p len %d", skb, skb->len);
4900                         hci_send_frame(hdev, skb);
4901
4902                         conn->sent++;
4903                         if (conn->sent == ~0)
4904                                 conn->sent = 0;
4905                 }
4906         }
4907 }
4908
4909 static void hci_sched_le(struct hci_dev *hdev)
4910 {
4911         struct hci_chan *chan;
4912         struct sk_buff *skb;
4913         int quote, cnt, tmp;
4914
4915         BT_DBG("%s", hdev->name);
4916
4917         if (!hci_conn_num(hdev, LE_LINK))
4918                 return;
4919
4920         if (!test_bit(HCI_RAW, &hdev->flags)) {
4921                 /* LE tx timeout must be longer than maximum
4922                  * link supervision timeout (40.9 seconds) */
4923                 if (!hdev->le_cnt && hdev->le_pkts &&
4924                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4925                         hci_link_tx_to(hdev, LE_LINK);
4926         }
4927
4928         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4929         tmp = cnt;
4930         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4931                 u32 priority = (skb_peek(&chan->data_q))->priority;
4932                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4933                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4934                                skb->len, skb->priority);
4935
4936                         /* Stop if priority has changed */
4937                         if (skb->priority < priority)
4938                                 break;
4939
4940                         skb = skb_dequeue(&chan->data_q);
4941
4942                         hci_send_frame(hdev, skb);
4943                         hdev->le_last_tx = jiffies;
4944
4945                         cnt--;
4946                         chan->sent++;
4947                         chan->conn->sent++;
4948                 }
4949         }
4950
4951         if (hdev->le_pkts)
4952                 hdev->le_cnt = cnt;
4953         else
4954                 hdev->acl_cnt = cnt;
4955
4956         if (cnt != tmp)
4957                 hci_prio_recalculate(hdev, LE_LINK);
4958 }
4959
4960 static void hci_tx_work(struct work_struct *work)
4961 {
4962         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4963         struct sk_buff *skb;
4964
4965         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4966                hdev->sco_cnt, hdev->le_cnt);
4967
4968         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4969                 /* Schedule queues and send stuff to HCI driver */
4970                 hci_sched_acl(hdev);
4971                 hci_sched_sco(hdev);
4972                 hci_sched_esco(hdev);
4973                 hci_sched_le(hdev);
4974         }
4975
4976         /* Send next queued raw (unknown type) packet */
4977         while ((skb = skb_dequeue(&hdev->raw_q)))
4978                 hci_send_frame(hdev, skb);
4979 }
4980
4981 /* ----- HCI RX task (incoming data processing) ----- */
4982
4983 /* ACL data packet */
4984 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4985 {
4986         struct hci_acl_hdr *hdr = (void *) skb->data;
4987         struct hci_conn *conn;
4988         __u16 handle, flags;
4989
4990         skb_pull(skb, HCI_ACL_HDR_SIZE);
4991
4992         handle = __le16_to_cpu(hdr->handle);
4993         flags  = hci_flags(handle);
4994         handle = hci_handle(handle);
4995
4996         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4997                handle, flags);
4998
4999         hdev->stat.acl_rx++;
5000
5001         hci_dev_lock(hdev);
5002         conn = hci_conn_hash_lookup_handle(hdev, handle);
5003         hci_dev_unlock(hdev);
5004
5005         if (conn) {
5006                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5007
5008                 /* Send to upper protocol */
5009                 l2cap_recv_acldata(conn, skb, flags);
5010                 return;
5011         } else {
5012                 BT_ERR("%s ACL packet for unknown connection handle %d",
5013                        hdev->name, handle);
5014         }
5015
5016         kfree_skb(skb);
5017 }
5018
5019 /* SCO data packet */
5020 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5021 {
5022         struct hci_sco_hdr *hdr = (void *) skb->data;
5023         struct hci_conn *conn;
5024         __u16 handle;
5025
5026         skb_pull(skb, HCI_SCO_HDR_SIZE);
5027
5028         handle = __le16_to_cpu(hdr->handle);
5029
5030         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5031
5032         hdev->stat.sco_rx++;
5033
5034         hci_dev_lock(hdev);
5035         conn = hci_conn_hash_lookup_handle(hdev, handle);
5036         hci_dev_unlock(hdev);
5037
5038         if (conn) {
5039                 /* Send to upper protocol */
5040                 sco_recv_scodata(conn, skb);
5041                 return;
5042         } else {
5043                 BT_ERR("%s SCO packet for unknown connection handle %d",
5044                        hdev->name, handle);
5045         }
5046
5047         kfree_skb(skb);
5048 }
5049
5050 static bool hci_req_is_complete(struct hci_dev *hdev)
5051 {
5052         struct sk_buff *skb;
5053
5054         skb = skb_peek(&hdev->cmd_q);
5055         if (!skb)
5056                 return true;
5057
5058         return bt_cb(skb)->req.start;
5059 }
5060
5061 static void hci_resend_last(struct hci_dev *hdev)
5062 {
5063         struct hci_command_hdr *sent;
5064         struct sk_buff *skb;
5065         u16 opcode;
5066
5067         if (!hdev->sent_cmd)
5068                 return;
5069
5070         sent = (void *) hdev->sent_cmd->data;
5071         opcode = __le16_to_cpu(sent->opcode);
5072         if (opcode == HCI_OP_RESET)
5073                 return;
5074
5075         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5076         if (!skb)
5077                 return;
5078
5079         skb_queue_head(&hdev->cmd_q, skb);
5080         queue_work(hdev->workqueue, &hdev->cmd_work);
5081 }
5082
5083 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5084 {
5085         hci_req_complete_t req_complete = NULL;
5086         struct sk_buff *skb;
5087         unsigned long flags;
5088
5089         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5090
5091         /* If the completed command doesn't match the last one that was
5092          * sent we need to do special handling of it.
5093          */
5094         if (!hci_sent_cmd_data(hdev, opcode)) {
5095                 /* Some CSR based controllers generate a spontaneous
5096                  * reset complete event during init and any pending
5097                  * command will never be completed. In such a case we
5098                  * need to resend whatever was the last sent
5099                  * command.
5100                  */
5101                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5102                         hci_resend_last(hdev);
5103
5104                 return;
5105         }
5106
5107         /* If the command succeeded and there's still more commands in
5108          * this request the request is not yet complete.
5109          */
5110         if (!status && !hci_req_is_complete(hdev))
5111                 return;
5112
5113         /* If this was the last command in a request the complete
5114          * callback would be found in hdev->sent_cmd instead of the
5115          * command queue (hdev->cmd_q).
5116          */
5117         if (hdev->sent_cmd) {
5118                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5119
5120                 if (req_complete) {
5121                         /* We must set the complete callback to NULL to
5122                          * avoid calling the callback more than once if
5123                          * this function gets called again.
5124                          */
5125                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5126
5127                         goto call_complete;
5128                 }
5129         }
5130
5131         /* Remove all pending commands belonging to this request */
5132         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5133         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5134                 if (bt_cb(skb)->req.start) {
5135                         __skb_queue_head(&hdev->cmd_q, skb);
5136                         break;
5137                 }
5138
5139                 req_complete = bt_cb(skb)->req.complete;
5140                 kfree_skb(skb);
5141         }
5142         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5143
5144 call_complete:
5145         if (req_complete)
5146                 req_complete(hdev, status);
5147 }
5148
5149 static void hci_rx_work(struct work_struct *work)
5150 {
5151         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5152         struct sk_buff *skb;
5153
5154         BT_DBG("%s", hdev->name);
5155
5156         while ((skb = skb_dequeue(&hdev->rx_q))) {
5157                 /* Send copy to monitor */
5158                 hci_send_to_monitor(hdev, skb);
5159
5160                 if (atomic_read(&hdev->promisc)) {
5161                         /* Send copy to the sockets */
5162                         hci_send_to_sock(hdev, skb);
5163                 }
5164
5165                 if (test_bit(HCI_RAW, &hdev->flags) ||
5166                     test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5167                         kfree_skb(skb);
5168                         continue;
5169                 }
5170
5171                 if (test_bit(HCI_INIT, &hdev->flags)) {
5172                         /* Don't process data packets in this states. */
5173                         switch (bt_cb(skb)->pkt_type) {
5174                         case HCI_ACLDATA_PKT:
5175                         case HCI_SCODATA_PKT:
5176                                 kfree_skb(skb);
5177                                 continue;
5178                         }
5179                 }
5180
5181                 /* Process frame */
5182                 switch (bt_cb(skb)->pkt_type) {
5183                 case HCI_EVENT_PKT:
5184                         BT_DBG("%s Event packet", hdev->name);
5185                         hci_event_packet(hdev, skb);
5186                         break;
5187
5188                 case HCI_ACLDATA_PKT:
5189                         BT_DBG("%s ACL data packet", hdev->name);
5190                         hci_acldata_packet(hdev, skb);
5191                         break;
5192
5193                 case HCI_SCODATA_PKT:
5194                         BT_DBG("%s SCO data packet", hdev->name);
5195                         hci_scodata_packet(hdev, skb);
5196                         break;
5197
5198                 default:
5199                         kfree_skb(skb);
5200                         break;
5201                 }
5202         }
5203 }
5204
5205 static void hci_cmd_work(struct work_struct *work)
5206 {
5207         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5208         struct sk_buff *skb;
5209
5210         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5211                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5212
5213         /* Send queued commands */
5214         if (atomic_read(&hdev->cmd_cnt)) {
5215                 skb = skb_dequeue(&hdev->cmd_q);
5216                 if (!skb)
5217                         return;
5218
5219                 kfree_skb(hdev->sent_cmd);
5220
5221                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5222                 if (hdev->sent_cmd) {
5223                         atomic_dec(&hdev->cmd_cnt);
5224                         hci_send_frame(hdev, skb);
5225                         if (test_bit(HCI_RESET, &hdev->flags))
5226                                 del_timer(&hdev->cmd_timer);
5227                         else
5228                                 mod_timer(&hdev->cmd_timer,
5229                                           jiffies + HCI_CMD_TIMEOUT);
5230                 } else {
5231                         skb_queue_head(&hdev->cmd_q, skb);
5232                         queue_work(hdev->workqueue, &hdev->cmd_work);
5233                 }
5234         }
5235 }
5236
5237 void hci_req_add_le_scan_disable(struct hci_request *req)
5238 {
5239         struct hci_cp_le_set_scan_enable cp;
5240
5241         memset(&cp, 0, sizeof(cp));
5242         cp.enable = LE_SCAN_DISABLE;
5243         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5244 }
5245
5246 void hci_req_add_le_passive_scan(struct hci_request *req)
5247 {
5248         struct hci_cp_le_set_scan_param param_cp;
5249         struct hci_cp_le_set_scan_enable enable_cp;
5250         struct hci_dev *hdev = req->hdev;
5251         u8 own_addr_type;
5252
5253         /* Set require_privacy to true to avoid identification from
5254          * unknown peer devices. Since this is passive scanning, no
5255          * SCAN_REQ using the local identity should be sent. Mandating
5256          * privacy is just an extra precaution.
5257          */
5258         if (hci_update_random_address(req, true, &own_addr_type))
5259                 return;
5260
5261         memset(&param_cp, 0, sizeof(param_cp));
5262         param_cp.type = LE_SCAN_PASSIVE;
5263         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5264         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5265         param_cp.own_address_type = own_addr_type;
5266         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5267                     &param_cp);
5268
5269         memset(&enable_cp, 0, sizeof(enable_cp));
5270         enable_cp.enable = LE_SCAN_ENABLE;
5271         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5272         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5273                     &enable_cp);
5274 }
5275
5276 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5277 {
5278         if (status)
5279                 BT_DBG("HCI request failed to update background scanning: "
5280                        "status 0x%2.2x", status);
5281 }
5282
5283 /* This function controls the background scanning based on hdev->pend_le_conns
5284  * list. If there are pending LE connection we start the background scanning,
5285  * otherwise we stop it.
5286  *
5287  * This function requires the caller holds hdev->lock.
5288  */
5289 void hci_update_background_scan(struct hci_dev *hdev)
5290 {
5291         struct hci_request req;
5292         struct hci_conn *conn;
5293         int err;
5294
5295         hci_req_init(&req, hdev);
5296
5297         if (list_empty(&hdev->pend_le_conns)) {
5298                 /* If there is no pending LE connections, we should stop
5299                  * the background scanning.
5300                  */
5301
5302                 /* If controller is not scanning we are done. */
5303                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5304                         return;
5305
5306                 hci_req_add_le_scan_disable(&req);
5307
5308                 BT_DBG("%s stopping background scanning", hdev->name);
5309         } else {
5310                 /* If there is at least one pending LE connection, we should
5311                  * keep the background scan running.
5312                  */
5313
5314                 /* If controller is connecting, we should not start scanning
5315                  * since some controllers are not able to scan and connect at
5316                  * the same time.
5317                  */
5318                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5319                 if (conn)
5320                         return;
5321
5322                 /* If controller is currently scanning, we stop it to ensure we
5323                  * don't miss any advertising (due to duplicates filter).
5324                  */
5325                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5326                         hci_req_add_le_scan_disable(&req);
5327
5328                 hci_req_add_le_passive_scan(&req);
5329
5330                 BT_DBG("%s starting background scanning", hdev->name);
5331         }
5332
5333         err = hci_req_run(&req, update_background_scan_complete);
5334         if (err)
5335                 BT_ERR("Failed to run HCI request: err %d", err);
5336 }