Merge tag 'master-2014-11-20' of git://git.kernel.org/pub/scm/linux/kernel/git/linvil...
[linux-2.6-block.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ----- HCI requests ----- */
58
59 #define HCI_REQ_DONE      0
60 #define HCI_REQ_PEND      1
61 #define HCI_REQ_CANCELED  2
62
63 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
65
66 /* ---- HCI notifications ---- */
67
68 static void hci_notify(struct hci_dev *hdev, int event)
69 {
70         hci_sock_dev_event(hdev, event);
71 }
72
73 /* ---- HCI debugfs entries ---- */
74
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76                              size_t count, loff_t *ppos)
77 {
78         struct hci_dev *hdev = file->private_data;
79         char buf[3];
80
81         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
82         buf[1] = '\n';
83         buf[2] = '\0';
84         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85 }
86
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88                               size_t count, loff_t *ppos)
89 {
90         struct hci_dev *hdev = file->private_data;
91         struct sk_buff *skb;
92         char buf[32];
93         size_t buf_size = min(count, (sizeof(buf)-1));
94         bool enable;
95         int err;
96
97         if (!test_bit(HCI_UP, &hdev->flags))
98                 return -ENETDOWN;
99
100         if (copy_from_user(buf, user_buf, buf_size))
101                 return -EFAULT;
102
103         buf[buf_size] = '\0';
104         if (strtobool(buf, &enable))
105                 return -EINVAL;
106
107         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108                 return -EALREADY;
109
110         hci_req_lock(hdev);
111         if (enable)
112                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113                                      HCI_CMD_TIMEOUT);
114         else
115                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116                                      HCI_CMD_TIMEOUT);
117         hci_req_unlock(hdev);
118
119         if (IS_ERR(skb))
120                 return PTR_ERR(skb);
121
122         err = -bt_to_errno(skb->data[0]);
123         kfree_skb(skb);
124
125         if (err < 0)
126                 return err;
127
128         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
129
130         return count;
131 }
132
133 static const struct file_operations dut_mode_fops = {
134         .open           = simple_open,
135         .read           = dut_mode_read,
136         .write          = dut_mode_write,
137         .llseek         = default_llseek,
138 };
139
140 static int features_show(struct seq_file *f, void *ptr)
141 {
142         struct hci_dev *hdev = f->private;
143         u8 p;
144
145         hci_dev_lock(hdev);
146         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149                            hdev->features[p][0], hdev->features[p][1],
150                            hdev->features[p][2], hdev->features[p][3],
151                            hdev->features[p][4], hdev->features[p][5],
152                            hdev->features[p][6], hdev->features[p][7]);
153         }
154         if (lmp_le_capable(hdev))
155                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157                            hdev->le_features[0], hdev->le_features[1],
158                            hdev->le_features[2], hdev->le_features[3],
159                            hdev->le_features[4], hdev->le_features[5],
160                            hdev->le_features[6], hdev->le_features[7]);
161         hci_dev_unlock(hdev);
162
163         return 0;
164 }
165
166 static int features_open(struct inode *inode, struct file *file)
167 {
168         return single_open(file, features_show, inode->i_private);
169 }
170
171 static const struct file_operations features_fops = {
172         .open           = features_open,
173         .read           = seq_read,
174         .llseek         = seq_lseek,
175         .release        = single_release,
176 };
177
178 static int blacklist_show(struct seq_file *f, void *p)
179 {
180         struct hci_dev *hdev = f->private;
181         struct bdaddr_list *b;
182
183         hci_dev_lock(hdev);
184         list_for_each_entry(b, &hdev->blacklist, list)
185                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186         hci_dev_unlock(hdev);
187
188         return 0;
189 }
190
191 static int blacklist_open(struct inode *inode, struct file *file)
192 {
193         return single_open(file, blacklist_show, inode->i_private);
194 }
195
196 static const struct file_operations blacklist_fops = {
197         .open           = blacklist_open,
198         .read           = seq_read,
199         .llseek         = seq_lseek,
200         .release        = single_release,
201 };
202
203 static int uuids_show(struct seq_file *f, void *p)
204 {
205         struct hci_dev *hdev = f->private;
206         struct bt_uuid *uuid;
207
208         hci_dev_lock(hdev);
209         list_for_each_entry(uuid, &hdev->uuids, list) {
210                 u8 i, val[16];
211
212                 /* The Bluetooth UUID values are stored in big endian,
213                  * but with reversed byte order. So convert them into
214                  * the right order for the %pUb modifier.
215                  */
216                 for (i = 0; i < 16; i++)
217                         val[i] = uuid->uuid[15 - i];
218
219                 seq_printf(f, "%pUb\n", val);
220         }
221         hci_dev_unlock(hdev);
222
223         return 0;
224 }
225
226 static int uuids_open(struct inode *inode, struct file *file)
227 {
228         return single_open(file, uuids_show, inode->i_private);
229 }
230
231 static const struct file_operations uuids_fops = {
232         .open           = uuids_open,
233         .read           = seq_read,
234         .llseek         = seq_lseek,
235         .release        = single_release,
236 };
237
238 static int inquiry_cache_show(struct seq_file *f, void *p)
239 {
240         struct hci_dev *hdev = f->private;
241         struct discovery_state *cache = &hdev->discovery;
242         struct inquiry_entry *e;
243
244         hci_dev_lock(hdev);
245
246         list_for_each_entry(e, &cache->all, all) {
247                 struct inquiry_data *data = &e->data;
248                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249                            &data->bdaddr,
250                            data->pscan_rep_mode, data->pscan_period_mode,
251                            data->pscan_mode, data->dev_class[2],
252                            data->dev_class[1], data->dev_class[0],
253                            __le16_to_cpu(data->clock_offset),
254                            data->rssi, data->ssp_mode, e->timestamp);
255         }
256
257         hci_dev_unlock(hdev);
258
259         return 0;
260 }
261
262 static int inquiry_cache_open(struct inode *inode, struct file *file)
263 {
264         return single_open(file, inquiry_cache_show, inode->i_private);
265 }
266
267 static const struct file_operations inquiry_cache_fops = {
268         .open           = inquiry_cache_open,
269         .read           = seq_read,
270         .llseek         = seq_lseek,
271         .release        = single_release,
272 };
273
274 static int link_keys_show(struct seq_file *f, void *ptr)
275 {
276         struct hci_dev *hdev = f->private;
277         struct list_head *p, *n;
278
279         hci_dev_lock(hdev);
280         list_for_each_safe(p, n, &hdev->link_keys) {
281                 struct link_key *key = list_entry(p, struct link_key, list);
282                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
283                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
284         }
285         hci_dev_unlock(hdev);
286
287         return 0;
288 }
289
290 static int link_keys_open(struct inode *inode, struct file *file)
291 {
292         return single_open(file, link_keys_show, inode->i_private);
293 }
294
295 static const struct file_operations link_keys_fops = {
296         .open           = link_keys_open,
297         .read           = seq_read,
298         .llseek         = seq_lseek,
299         .release        = single_release,
300 };
301
302 static int dev_class_show(struct seq_file *f, void *ptr)
303 {
304         struct hci_dev *hdev = f->private;
305
306         hci_dev_lock(hdev);
307         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
308                    hdev->dev_class[1], hdev->dev_class[0]);
309         hci_dev_unlock(hdev);
310
311         return 0;
312 }
313
314 static int dev_class_open(struct inode *inode, struct file *file)
315 {
316         return single_open(file, dev_class_show, inode->i_private);
317 }
318
319 static const struct file_operations dev_class_fops = {
320         .open           = dev_class_open,
321         .read           = seq_read,
322         .llseek         = seq_lseek,
323         .release        = single_release,
324 };
325
326 static int voice_setting_get(void *data, u64 *val)
327 {
328         struct hci_dev *hdev = data;
329
330         hci_dev_lock(hdev);
331         *val = hdev->voice_setting;
332         hci_dev_unlock(hdev);
333
334         return 0;
335 }
336
337 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
338                         NULL, "0x%4.4llx\n");
339
340 static int auto_accept_delay_set(void *data, u64 val)
341 {
342         struct hci_dev *hdev = data;
343
344         hci_dev_lock(hdev);
345         hdev->auto_accept_delay = val;
346         hci_dev_unlock(hdev);
347
348         return 0;
349 }
350
351 static int auto_accept_delay_get(void *data, u64 *val)
352 {
353         struct hci_dev *hdev = data;
354
355         hci_dev_lock(hdev);
356         *val = hdev->auto_accept_delay;
357         hci_dev_unlock(hdev);
358
359         return 0;
360 }
361
362 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
363                         auto_accept_delay_set, "%llu\n");
364
365 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
366                                      size_t count, loff_t *ppos)
367 {
368         struct hci_dev *hdev = file->private_data;
369         char buf[3];
370
371         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
372         buf[1] = '\n';
373         buf[2] = '\0';
374         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
375 }
376
377 static ssize_t force_sc_support_write(struct file *file,
378                                       const char __user *user_buf,
379                                       size_t count, loff_t *ppos)
380 {
381         struct hci_dev *hdev = file->private_data;
382         char buf[32];
383         size_t buf_size = min(count, (sizeof(buf)-1));
384         bool enable;
385
386         if (test_bit(HCI_UP, &hdev->flags))
387                 return -EBUSY;
388
389         if (copy_from_user(buf, user_buf, buf_size))
390                 return -EFAULT;
391
392         buf[buf_size] = '\0';
393         if (strtobool(buf, &enable))
394                 return -EINVAL;
395
396         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
397                 return -EALREADY;
398
399         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
400
401         return count;
402 }
403
404 static const struct file_operations force_sc_support_fops = {
405         .open           = simple_open,
406         .read           = force_sc_support_read,
407         .write          = force_sc_support_write,
408         .llseek         = default_llseek,
409 };
410
411 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
412                                  size_t count, loff_t *ppos)
413 {
414         struct hci_dev *hdev = file->private_data;
415         char buf[3];
416
417         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
418         buf[1] = '\n';
419         buf[2] = '\0';
420         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
421 }
422
423 static const struct file_operations sc_only_mode_fops = {
424         .open           = simple_open,
425         .read           = sc_only_mode_read,
426         .llseek         = default_llseek,
427 };
428
429 static int idle_timeout_set(void *data, u64 val)
430 {
431         struct hci_dev *hdev = data;
432
433         if (val != 0 && (val < 500 || val > 3600000))
434                 return -EINVAL;
435
436         hci_dev_lock(hdev);
437         hdev->idle_timeout = val;
438         hci_dev_unlock(hdev);
439
440         return 0;
441 }
442
443 static int idle_timeout_get(void *data, u64 *val)
444 {
445         struct hci_dev *hdev = data;
446
447         hci_dev_lock(hdev);
448         *val = hdev->idle_timeout;
449         hci_dev_unlock(hdev);
450
451         return 0;
452 }
453
454 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
455                         idle_timeout_set, "%llu\n");
456
457 static int rpa_timeout_set(void *data, u64 val)
458 {
459         struct hci_dev *hdev = data;
460
461         /* Require the RPA timeout to be at least 30 seconds and at most
462          * 24 hours.
463          */
464         if (val < 30 || val > (60 * 60 * 24))
465                 return -EINVAL;
466
467         hci_dev_lock(hdev);
468         hdev->rpa_timeout = val;
469         hci_dev_unlock(hdev);
470
471         return 0;
472 }
473
474 static int rpa_timeout_get(void *data, u64 *val)
475 {
476         struct hci_dev *hdev = data;
477
478         hci_dev_lock(hdev);
479         *val = hdev->rpa_timeout;
480         hci_dev_unlock(hdev);
481
482         return 0;
483 }
484
485 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
486                         rpa_timeout_set, "%llu\n");
487
488 static int sniff_min_interval_set(void *data, u64 val)
489 {
490         struct hci_dev *hdev = data;
491
492         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
493                 return -EINVAL;
494
495         hci_dev_lock(hdev);
496         hdev->sniff_min_interval = val;
497         hci_dev_unlock(hdev);
498
499         return 0;
500 }
501
502 static int sniff_min_interval_get(void *data, u64 *val)
503 {
504         struct hci_dev *hdev = data;
505
506         hci_dev_lock(hdev);
507         *val = hdev->sniff_min_interval;
508         hci_dev_unlock(hdev);
509
510         return 0;
511 }
512
513 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
514                         sniff_min_interval_set, "%llu\n");
515
516 static int sniff_max_interval_set(void *data, u64 val)
517 {
518         struct hci_dev *hdev = data;
519
520         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
521                 return -EINVAL;
522
523         hci_dev_lock(hdev);
524         hdev->sniff_max_interval = val;
525         hci_dev_unlock(hdev);
526
527         return 0;
528 }
529
530 static int sniff_max_interval_get(void *data, u64 *val)
531 {
532         struct hci_dev *hdev = data;
533
534         hci_dev_lock(hdev);
535         *val = hdev->sniff_max_interval;
536         hci_dev_unlock(hdev);
537
538         return 0;
539 }
540
541 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
542                         sniff_max_interval_set, "%llu\n");
543
544 static int conn_info_min_age_set(void *data, u64 val)
545 {
546         struct hci_dev *hdev = data;
547
548         if (val == 0 || val > hdev->conn_info_max_age)
549                 return -EINVAL;
550
551         hci_dev_lock(hdev);
552         hdev->conn_info_min_age = val;
553         hci_dev_unlock(hdev);
554
555         return 0;
556 }
557
558 static int conn_info_min_age_get(void *data, u64 *val)
559 {
560         struct hci_dev *hdev = data;
561
562         hci_dev_lock(hdev);
563         *val = hdev->conn_info_min_age;
564         hci_dev_unlock(hdev);
565
566         return 0;
567 }
568
569 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
570                         conn_info_min_age_set, "%llu\n");
571
572 static int conn_info_max_age_set(void *data, u64 val)
573 {
574         struct hci_dev *hdev = data;
575
576         if (val == 0 || val < hdev->conn_info_min_age)
577                 return -EINVAL;
578
579         hci_dev_lock(hdev);
580         hdev->conn_info_max_age = val;
581         hci_dev_unlock(hdev);
582
583         return 0;
584 }
585
586 static int conn_info_max_age_get(void *data, u64 *val)
587 {
588         struct hci_dev *hdev = data;
589
590         hci_dev_lock(hdev);
591         *val = hdev->conn_info_max_age;
592         hci_dev_unlock(hdev);
593
594         return 0;
595 }
596
597 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
598                         conn_info_max_age_set, "%llu\n");
599
600 static int identity_show(struct seq_file *f, void *p)
601 {
602         struct hci_dev *hdev = f->private;
603         bdaddr_t addr;
604         u8 addr_type;
605
606         hci_dev_lock(hdev);
607
608         hci_copy_identity_address(hdev, &addr, &addr_type);
609
610         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
611                    16, hdev->irk, &hdev->rpa);
612
613         hci_dev_unlock(hdev);
614
615         return 0;
616 }
617
618 static int identity_open(struct inode *inode, struct file *file)
619 {
620         return single_open(file, identity_show, inode->i_private);
621 }
622
623 static const struct file_operations identity_fops = {
624         .open           = identity_open,
625         .read           = seq_read,
626         .llseek         = seq_lseek,
627         .release        = single_release,
628 };
629
630 static int random_address_show(struct seq_file *f, void *p)
631 {
632         struct hci_dev *hdev = f->private;
633
634         hci_dev_lock(hdev);
635         seq_printf(f, "%pMR\n", &hdev->random_addr);
636         hci_dev_unlock(hdev);
637
638         return 0;
639 }
640
641 static int random_address_open(struct inode *inode, struct file *file)
642 {
643         return single_open(file, random_address_show, inode->i_private);
644 }
645
646 static const struct file_operations random_address_fops = {
647         .open           = random_address_open,
648         .read           = seq_read,
649         .llseek         = seq_lseek,
650         .release        = single_release,
651 };
652
653 static int static_address_show(struct seq_file *f, void *p)
654 {
655         struct hci_dev *hdev = f->private;
656
657         hci_dev_lock(hdev);
658         seq_printf(f, "%pMR\n", &hdev->static_addr);
659         hci_dev_unlock(hdev);
660
661         return 0;
662 }
663
664 static int static_address_open(struct inode *inode, struct file *file)
665 {
666         return single_open(file, static_address_show, inode->i_private);
667 }
668
669 static const struct file_operations static_address_fops = {
670         .open           = static_address_open,
671         .read           = seq_read,
672         .llseek         = seq_lseek,
673         .release        = single_release,
674 };
675
676 static ssize_t force_static_address_read(struct file *file,
677                                          char __user *user_buf,
678                                          size_t count, loff_t *ppos)
679 {
680         struct hci_dev *hdev = file->private_data;
681         char buf[3];
682
683         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
684         buf[1] = '\n';
685         buf[2] = '\0';
686         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
687 }
688
689 static ssize_t force_static_address_write(struct file *file,
690                                           const char __user *user_buf,
691                                           size_t count, loff_t *ppos)
692 {
693         struct hci_dev *hdev = file->private_data;
694         char buf[32];
695         size_t buf_size = min(count, (sizeof(buf)-1));
696         bool enable;
697
698         if (test_bit(HCI_UP, &hdev->flags))
699                 return -EBUSY;
700
701         if (copy_from_user(buf, user_buf, buf_size))
702                 return -EFAULT;
703
704         buf[buf_size] = '\0';
705         if (strtobool(buf, &enable))
706                 return -EINVAL;
707
708         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
709                 return -EALREADY;
710
711         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
712
713         return count;
714 }
715
716 static const struct file_operations force_static_address_fops = {
717         .open           = simple_open,
718         .read           = force_static_address_read,
719         .write          = force_static_address_write,
720         .llseek         = default_llseek,
721 };
722
723 static int white_list_show(struct seq_file *f, void *ptr)
724 {
725         struct hci_dev *hdev = f->private;
726         struct bdaddr_list *b;
727
728         hci_dev_lock(hdev);
729         list_for_each_entry(b, &hdev->le_white_list, list)
730                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
731         hci_dev_unlock(hdev);
732
733         return 0;
734 }
735
736 static int white_list_open(struct inode *inode, struct file *file)
737 {
738         return single_open(file, white_list_show, inode->i_private);
739 }
740
741 static const struct file_operations white_list_fops = {
742         .open           = white_list_open,
743         .read           = seq_read,
744         .llseek         = seq_lseek,
745         .release        = single_release,
746 };
747
748 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
749 {
750         struct hci_dev *hdev = f->private;
751         struct smp_irk *irk;
752
753         rcu_read_lock();
754         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
755                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
756                            &irk->bdaddr, irk->addr_type,
757                            16, irk->val, &irk->rpa);
758         }
759         rcu_read_unlock();
760
761         return 0;
762 }
763
764 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
765 {
766         return single_open(file, identity_resolving_keys_show,
767                            inode->i_private);
768 }
769
770 static const struct file_operations identity_resolving_keys_fops = {
771         .open           = identity_resolving_keys_open,
772         .read           = seq_read,
773         .llseek         = seq_lseek,
774         .release        = single_release,
775 };
776
777 static int long_term_keys_show(struct seq_file *f, void *ptr)
778 {
779         struct hci_dev *hdev = f->private;
780         struct smp_ltk *ltk;
781
782         rcu_read_lock();
783         list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
784                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
785                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
786                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
787                            __le64_to_cpu(ltk->rand), 16, ltk->val);
788         rcu_read_unlock();
789
790         return 0;
791 }
792
793 static int long_term_keys_open(struct inode *inode, struct file *file)
794 {
795         return single_open(file, long_term_keys_show, inode->i_private);
796 }
797
798 static const struct file_operations long_term_keys_fops = {
799         .open           = long_term_keys_open,
800         .read           = seq_read,
801         .llseek         = seq_lseek,
802         .release        = single_release,
803 };
804
805 static int conn_min_interval_set(void *data, u64 val)
806 {
807         struct hci_dev *hdev = data;
808
809         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
810                 return -EINVAL;
811
812         hci_dev_lock(hdev);
813         hdev->le_conn_min_interval = val;
814         hci_dev_unlock(hdev);
815
816         return 0;
817 }
818
819 static int conn_min_interval_get(void *data, u64 *val)
820 {
821         struct hci_dev *hdev = data;
822
823         hci_dev_lock(hdev);
824         *val = hdev->le_conn_min_interval;
825         hci_dev_unlock(hdev);
826
827         return 0;
828 }
829
830 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
831                         conn_min_interval_set, "%llu\n");
832
833 static int conn_max_interval_set(void *data, u64 val)
834 {
835         struct hci_dev *hdev = data;
836
837         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
838                 return -EINVAL;
839
840         hci_dev_lock(hdev);
841         hdev->le_conn_max_interval = val;
842         hci_dev_unlock(hdev);
843
844         return 0;
845 }
846
847 static int conn_max_interval_get(void *data, u64 *val)
848 {
849         struct hci_dev *hdev = data;
850
851         hci_dev_lock(hdev);
852         *val = hdev->le_conn_max_interval;
853         hci_dev_unlock(hdev);
854
855         return 0;
856 }
857
858 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
859                         conn_max_interval_set, "%llu\n");
860
861 static int conn_latency_set(void *data, u64 val)
862 {
863         struct hci_dev *hdev = data;
864
865         if (val > 0x01f3)
866                 return -EINVAL;
867
868         hci_dev_lock(hdev);
869         hdev->le_conn_latency = val;
870         hci_dev_unlock(hdev);
871
872         return 0;
873 }
874
875 static int conn_latency_get(void *data, u64 *val)
876 {
877         struct hci_dev *hdev = data;
878
879         hci_dev_lock(hdev);
880         *val = hdev->le_conn_latency;
881         hci_dev_unlock(hdev);
882
883         return 0;
884 }
885
886 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
887                         conn_latency_set, "%llu\n");
888
889 static int supervision_timeout_set(void *data, u64 val)
890 {
891         struct hci_dev *hdev = data;
892
893         if (val < 0x000a || val > 0x0c80)
894                 return -EINVAL;
895
896         hci_dev_lock(hdev);
897         hdev->le_supv_timeout = val;
898         hci_dev_unlock(hdev);
899
900         return 0;
901 }
902
903 static int supervision_timeout_get(void *data, u64 *val)
904 {
905         struct hci_dev *hdev = data;
906
907         hci_dev_lock(hdev);
908         *val = hdev->le_supv_timeout;
909         hci_dev_unlock(hdev);
910
911         return 0;
912 }
913
914 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
915                         supervision_timeout_set, "%llu\n");
916
917 static int adv_channel_map_set(void *data, u64 val)
918 {
919         struct hci_dev *hdev = data;
920
921         if (val < 0x01 || val > 0x07)
922                 return -EINVAL;
923
924         hci_dev_lock(hdev);
925         hdev->le_adv_channel_map = val;
926         hci_dev_unlock(hdev);
927
928         return 0;
929 }
930
931 static int adv_channel_map_get(void *data, u64 *val)
932 {
933         struct hci_dev *hdev = data;
934
935         hci_dev_lock(hdev);
936         *val = hdev->le_adv_channel_map;
937         hci_dev_unlock(hdev);
938
939         return 0;
940 }
941
942 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
943                         adv_channel_map_set, "%llu\n");
944
945 static int adv_min_interval_set(void *data, u64 val)
946 {
947         struct hci_dev *hdev = data;
948
949         if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
950                 return -EINVAL;
951
952         hci_dev_lock(hdev);
953         hdev->le_adv_min_interval = val;
954         hci_dev_unlock(hdev);
955
956         return 0;
957 }
958
959 static int adv_min_interval_get(void *data, u64 *val)
960 {
961         struct hci_dev *hdev = data;
962
963         hci_dev_lock(hdev);
964         *val = hdev->le_adv_min_interval;
965         hci_dev_unlock(hdev);
966
967         return 0;
968 }
969
970 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
971                         adv_min_interval_set, "%llu\n");
972
973 static int adv_max_interval_set(void *data, u64 val)
974 {
975         struct hci_dev *hdev = data;
976
977         if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
978                 return -EINVAL;
979
980         hci_dev_lock(hdev);
981         hdev->le_adv_max_interval = val;
982         hci_dev_unlock(hdev);
983
984         return 0;
985 }
986
987 static int adv_max_interval_get(void *data, u64 *val)
988 {
989         struct hci_dev *hdev = data;
990
991         hci_dev_lock(hdev);
992         *val = hdev->le_adv_max_interval;
993         hci_dev_unlock(hdev);
994
995         return 0;
996 }
997
998 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
999                         adv_max_interval_set, "%llu\n");
1000
1001 static int device_list_show(struct seq_file *f, void *ptr)
1002 {
1003         struct hci_dev *hdev = f->private;
1004         struct hci_conn_params *p;
1005         struct bdaddr_list *b;
1006
1007         hci_dev_lock(hdev);
1008         list_for_each_entry(b, &hdev->whitelist, list)
1009                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
1010         list_for_each_entry(p, &hdev->le_conn_params, list) {
1011                 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
1012                            p->auto_connect);
1013         }
1014         hci_dev_unlock(hdev);
1015
1016         return 0;
1017 }
1018
1019 static int device_list_open(struct inode *inode, struct file *file)
1020 {
1021         return single_open(file, device_list_show, inode->i_private);
1022 }
1023
1024 static const struct file_operations device_list_fops = {
1025         .open           = device_list_open,
1026         .read           = seq_read,
1027         .llseek         = seq_lseek,
1028         .release        = single_release,
1029 };
1030
1031 /* ---- HCI requests ---- */
1032
1033 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1034 {
1035         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1036
1037         if (hdev->req_status == HCI_REQ_PEND) {
1038                 hdev->req_result = result;
1039                 hdev->req_status = HCI_REQ_DONE;
1040                 wake_up_interruptible(&hdev->req_wait_q);
1041         }
1042 }
1043
1044 static void hci_req_cancel(struct hci_dev *hdev, int err)
1045 {
1046         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1047
1048         if (hdev->req_status == HCI_REQ_PEND) {
1049                 hdev->req_result = err;
1050                 hdev->req_status = HCI_REQ_CANCELED;
1051                 wake_up_interruptible(&hdev->req_wait_q);
1052         }
1053 }
1054
1055 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1056                                             u8 event)
1057 {
1058         struct hci_ev_cmd_complete *ev;
1059         struct hci_event_hdr *hdr;
1060         struct sk_buff *skb;
1061
1062         hci_dev_lock(hdev);
1063
1064         skb = hdev->recv_evt;
1065         hdev->recv_evt = NULL;
1066
1067         hci_dev_unlock(hdev);
1068
1069         if (!skb)
1070                 return ERR_PTR(-ENODATA);
1071
1072         if (skb->len < sizeof(*hdr)) {
1073                 BT_ERR("Too short HCI event");
1074                 goto failed;
1075         }
1076
1077         hdr = (void *) skb->data;
1078         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1079
1080         if (event) {
1081                 if (hdr->evt != event)
1082                         goto failed;
1083                 return skb;
1084         }
1085
1086         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1087                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1088                 goto failed;
1089         }
1090
1091         if (skb->len < sizeof(*ev)) {
1092                 BT_ERR("Too short cmd_complete event");
1093                 goto failed;
1094         }
1095
1096         ev = (void *) skb->data;
1097         skb_pull(skb, sizeof(*ev));
1098
1099         if (opcode == __le16_to_cpu(ev->opcode))
1100                 return skb;
1101
1102         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1103                __le16_to_cpu(ev->opcode));
1104
1105 failed:
1106         kfree_skb(skb);
1107         return ERR_PTR(-ENODATA);
1108 }
1109
1110 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1111                                   const void *param, u8 event, u32 timeout)
1112 {
1113         DECLARE_WAITQUEUE(wait, current);
1114         struct hci_request req;
1115         int err = 0;
1116
1117         BT_DBG("%s", hdev->name);
1118
1119         hci_req_init(&req, hdev);
1120
1121         hci_req_add_ev(&req, opcode, plen, param, event);
1122
1123         hdev->req_status = HCI_REQ_PEND;
1124
1125         add_wait_queue(&hdev->req_wait_q, &wait);
1126         set_current_state(TASK_INTERRUPTIBLE);
1127
1128         err = hci_req_run(&req, hci_req_sync_complete);
1129         if (err < 0) {
1130                 remove_wait_queue(&hdev->req_wait_q, &wait);
1131                 return ERR_PTR(err);
1132         }
1133
1134         schedule_timeout(timeout);
1135
1136         remove_wait_queue(&hdev->req_wait_q, &wait);
1137
1138         if (signal_pending(current))
1139                 return ERR_PTR(-EINTR);
1140
1141         switch (hdev->req_status) {
1142         case HCI_REQ_DONE:
1143                 err = -bt_to_errno(hdev->req_result);
1144                 break;
1145
1146         case HCI_REQ_CANCELED:
1147                 err = -hdev->req_result;
1148                 break;
1149
1150         default:
1151                 err = -ETIMEDOUT;
1152                 break;
1153         }
1154
1155         hdev->req_status = hdev->req_result = 0;
1156
1157         BT_DBG("%s end: err %d", hdev->name, err);
1158
1159         if (err < 0)
1160                 return ERR_PTR(err);
1161
1162         return hci_get_cmd_complete(hdev, opcode, event);
1163 }
1164 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1165
1166 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1167                                const void *param, u32 timeout)
1168 {
1169         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1170 }
1171 EXPORT_SYMBOL(__hci_cmd_sync);
1172
1173 /* Execute request and wait for completion. */
1174 static int __hci_req_sync(struct hci_dev *hdev,
1175                           void (*func)(struct hci_request *req,
1176                                       unsigned long opt),
1177                           unsigned long opt, __u32 timeout)
1178 {
1179         struct hci_request req;
1180         DECLARE_WAITQUEUE(wait, current);
1181         int err = 0;
1182
1183         BT_DBG("%s start", hdev->name);
1184
1185         hci_req_init(&req, hdev);
1186
1187         hdev->req_status = HCI_REQ_PEND;
1188
1189         func(&req, opt);
1190
1191         add_wait_queue(&hdev->req_wait_q, &wait);
1192         set_current_state(TASK_INTERRUPTIBLE);
1193
1194         err = hci_req_run(&req, hci_req_sync_complete);
1195         if (err < 0) {
1196                 hdev->req_status = 0;
1197
1198                 remove_wait_queue(&hdev->req_wait_q, &wait);
1199
1200                 /* ENODATA means the HCI request command queue is empty.
1201                  * This can happen when a request with conditionals doesn't
1202                  * trigger any commands to be sent. This is normal behavior
1203                  * and should not trigger an error return.
1204                  */
1205                 if (err == -ENODATA)
1206                         return 0;
1207
1208                 return err;
1209         }
1210
1211         schedule_timeout(timeout);
1212
1213         remove_wait_queue(&hdev->req_wait_q, &wait);
1214
1215         if (signal_pending(current))
1216                 return -EINTR;
1217
1218         switch (hdev->req_status) {
1219         case HCI_REQ_DONE:
1220                 err = -bt_to_errno(hdev->req_result);
1221                 break;
1222
1223         case HCI_REQ_CANCELED:
1224                 err = -hdev->req_result;
1225                 break;
1226
1227         default:
1228                 err = -ETIMEDOUT;
1229                 break;
1230         }
1231
1232         hdev->req_status = hdev->req_result = 0;
1233
1234         BT_DBG("%s end: err %d", hdev->name, err);
1235
1236         return err;
1237 }
1238
1239 static int hci_req_sync(struct hci_dev *hdev,
1240                         void (*req)(struct hci_request *req,
1241                                     unsigned long opt),
1242                         unsigned long opt, __u32 timeout)
1243 {
1244         int ret;
1245
1246         if (!test_bit(HCI_UP, &hdev->flags))
1247                 return -ENETDOWN;
1248
1249         /* Serialize all requests */
1250         hci_req_lock(hdev);
1251         ret = __hci_req_sync(hdev, req, opt, timeout);
1252         hci_req_unlock(hdev);
1253
1254         return ret;
1255 }
1256
1257 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1258 {
1259         BT_DBG("%s %ld", req->hdev->name, opt);
1260
1261         /* Reset device */
1262         set_bit(HCI_RESET, &req->hdev->flags);
1263         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1264 }
1265
1266 static void bredr_init(struct hci_request *req)
1267 {
1268         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1269
1270         /* Read Local Supported Features */
1271         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1272
1273         /* Read Local Version */
1274         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1275
1276         /* Read BD Address */
1277         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1278 }
1279
1280 static void amp_init(struct hci_request *req)
1281 {
1282         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1283
1284         /* Read Local Version */
1285         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1286
1287         /* Read Local Supported Commands */
1288         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1289
1290         /* Read Local Supported Features */
1291         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1292
1293         /* Read Local AMP Info */
1294         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1295
1296         /* Read Data Blk size */
1297         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1298
1299         /* Read Flow Control Mode */
1300         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1301
1302         /* Read Location Data */
1303         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1304 }
1305
1306 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1307 {
1308         struct hci_dev *hdev = req->hdev;
1309
1310         BT_DBG("%s %ld", hdev->name, opt);
1311
1312         /* Reset */
1313         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1314                 hci_reset_req(req, 0);
1315
1316         switch (hdev->dev_type) {
1317         case HCI_BREDR:
1318                 bredr_init(req);
1319                 break;
1320
1321         case HCI_AMP:
1322                 amp_init(req);
1323                 break;
1324
1325         default:
1326                 BT_ERR("Unknown device type %d", hdev->dev_type);
1327                 break;
1328         }
1329 }
1330
1331 static void bredr_setup(struct hci_request *req)
1332 {
1333         struct hci_dev *hdev = req->hdev;
1334
1335         __le16 param;
1336         __u8 flt_type;
1337
1338         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1339         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1340
1341         /* Read Class of Device */
1342         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1343
1344         /* Read Local Name */
1345         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1346
1347         /* Read Voice Setting */
1348         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1349
1350         /* Read Number of Supported IAC */
1351         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1352
1353         /* Read Current IAC LAP */
1354         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1355
1356         /* Clear Event Filters */
1357         flt_type = HCI_FLT_CLEAR_ALL;
1358         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1359
1360         /* Connection accept timeout ~20 secs */
1361         param = cpu_to_le16(0x7d00);
1362         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1363
1364         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1365          * but it does not support page scan related HCI commands.
1366          */
1367         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1368                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1369                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1370         }
1371 }
1372
1373 static void le_setup(struct hci_request *req)
1374 {
1375         struct hci_dev *hdev = req->hdev;
1376
1377         /* Read LE Buffer Size */
1378         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1379
1380         /* Read LE Local Supported Features */
1381         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1382
1383         /* Read LE Supported States */
1384         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1385
1386         /* Read LE White List Size */
1387         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1388
1389         /* Clear LE White List */
1390         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1391
1392         /* LE-only controllers have LE implicitly enabled */
1393         if (!lmp_bredr_capable(hdev))
1394                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1395 }
1396
1397 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1398 {
1399         if (lmp_ext_inq_capable(hdev))
1400                 return 0x02;
1401
1402         if (lmp_inq_rssi_capable(hdev))
1403                 return 0x01;
1404
1405         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1406             hdev->lmp_subver == 0x0757)
1407                 return 0x01;
1408
1409         if (hdev->manufacturer == 15) {
1410                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1411                         return 0x01;
1412                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1413                         return 0x01;
1414                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1415                         return 0x01;
1416         }
1417
1418         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1419             hdev->lmp_subver == 0x1805)
1420                 return 0x01;
1421
1422         return 0x00;
1423 }
1424
1425 static void hci_setup_inquiry_mode(struct hci_request *req)
1426 {
1427         u8 mode;
1428
1429         mode = hci_get_inquiry_mode(req->hdev);
1430
1431         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1432 }
1433
1434 static void hci_setup_event_mask(struct hci_request *req)
1435 {
1436         struct hci_dev *hdev = req->hdev;
1437
1438         /* The second byte is 0xff instead of 0x9f (two reserved bits
1439          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1440          * command otherwise.
1441          */
1442         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1443
1444         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1445          * any event mask for pre 1.2 devices.
1446          */
1447         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1448                 return;
1449
1450         if (lmp_bredr_capable(hdev)) {
1451                 events[4] |= 0x01; /* Flow Specification Complete */
1452                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1453                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1454                 events[5] |= 0x08; /* Synchronous Connection Complete */
1455                 events[5] |= 0x10; /* Synchronous Connection Changed */
1456         } else {
1457                 /* Use a different default for LE-only devices */
1458                 memset(events, 0, sizeof(events));
1459                 events[0] |= 0x10; /* Disconnection Complete */
1460                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1461                 events[1] |= 0x20; /* Command Complete */
1462                 events[1] |= 0x40; /* Command Status */
1463                 events[1] |= 0x80; /* Hardware Error */
1464                 events[2] |= 0x04; /* Number of Completed Packets */
1465                 events[3] |= 0x02; /* Data Buffer Overflow */
1466
1467                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1468                         events[0] |= 0x80; /* Encryption Change */
1469                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
1470                 }
1471         }
1472
1473         if (lmp_inq_rssi_capable(hdev))
1474                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1475
1476         if (lmp_sniffsubr_capable(hdev))
1477                 events[5] |= 0x20; /* Sniff Subrating */
1478
1479         if (lmp_pause_enc_capable(hdev))
1480                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1481
1482         if (lmp_ext_inq_capable(hdev))
1483                 events[5] |= 0x40; /* Extended Inquiry Result */
1484
1485         if (lmp_no_flush_capable(hdev))
1486                 events[7] |= 0x01; /* Enhanced Flush Complete */
1487
1488         if (lmp_lsto_capable(hdev))
1489                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1490
1491         if (lmp_ssp_capable(hdev)) {
1492                 events[6] |= 0x01;      /* IO Capability Request */
1493                 events[6] |= 0x02;      /* IO Capability Response */
1494                 events[6] |= 0x04;      /* User Confirmation Request */
1495                 events[6] |= 0x08;      /* User Passkey Request */
1496                 events[6] |= 0x10;      /* Remote OOB Data Request */
1497                 events[6] |= 0x20;      /* Simple Pairing Complete */
1498                 events[7] |= 0x04;      /* User Passkey Notification */
1499                 events[7] |= 0x08;      /* Keypress Notification */
1500                 events[7] |= 0x10;      /* Remote Host Supported
1501                                          * Features Notification
1502                                          */
1503         }
1504
1505         if (lmp_le_capable(hdev))
1506                 events[7] |= 0x20;      /* LE Meta-Event */
1507
1508         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1509 }
1510
1511 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1512 {
1513         struct hci_dev *hdev = req->hdev;
1514
1515         if (lmp_bredr_capable(hdev))
1516                 bredr_setup(req);
1517         else
1518                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1519
1520         if (lmp_le_capable(hdev))
1521                 le_setup(req);
1522
1523         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1524          * local supported commands HCI command.
1525          */
1526         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1527                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1528
1529         if (lmp_ssp_capable(hdev)) {
1530                 /* When SSP is available, then the host features page
1531                  * should also be available as well. However some
1532                  * controllers list the max_page as 0 as long as SSP
1533                  * has not been enabled. To achieve proper debugging
1534                  * output, force the minimum max_page to 1 at least.
1535                  */
1536                 hdev->max_page = 0x01;
1537
1538                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1539                         u8 mode = 0x01;
1540                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1541                                     sizeof(mode), &mode);
1542                 } else {
1543                         struct hci_cp_write_eir cp;
1544
1545                         memset(hdev->eir, 0, sizeof(hdev->eir));
1546                         memset(&cp, 0, sizeof(cp));
1547
1548                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1549                 }
1550         }
1551
1552         if (lmp_inq_rssi_capable(hdev))
1553                 hci_setup_inquiry_mode(req);
1554
1555         if (lmp_inq_tx_pwr_capable(hdev))
1556                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1557
1558         if (lmp_ext_feat_capable(hdev)) {
1559                 struct hci_cp_read_local_ext_features cp;
1560
1561                 cp.page = 0x01;
1562                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1563                             sizeof(cp), &cp);
1564         }
1565
1566         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1567                 u8 enable = 1;
1568                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1569                             &enable);
1570         }
1571 }
1572
1573 static void hci_setup_link_policy(struct hci_request *req)
1574 {
1575         struct hci_dev *hdev = req->hdev;
1576         struct hci_cp_write_def_link_policy cp;
1577         u16 link_policy = 0;
1578
1579         if (lmp_rswitch_capable(hdev))
1580                 link_policy |= HCI_LP_RSWITCH;
1581         if (lmp_hold_capable(hdev))
1582                 link_policy |= HCI_LP_HOLD;
1583         if (lmp_sniff_capable(hdev))
1584                 link_policy |= HCI_LP_SNIFF;
1585         if (lmp_park_capable(hdev))
1586                 link_policy |= HCI_LP_PARK;
1587
1588         cp.policy = cpu_to_le16(link_policy);
1589         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1590 }
1591
1592 static void hci_set_le_support(struct hci_request *req)
1593 {
1594         struct hci_dev *hdev = req->hdev;
1595         struct hci_cp_write_le_host_supported cp;
1596
1597         /* LE-only devices do not support explicit enablement */
1598         if (!lmp_bredr_capable(hdev))
1599                 return;
1600
1601         memset(&cp, 0, sizeof(cp));
1602
1603         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1604                 cp.le = 0x01;
1605                 cp.simul = 0x00;
1606         }
1607
1608         if (cp.le != lmp_host_le_capable(hdev))
1609                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1610                             &cp);
1611 }
1612
1613 static void hci_set_event_mask_page_2(struct hci_request *req)
1614 {
1615         struct hci_dev *hdev = req->hdev;
1616         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1617
1618         /* If Connectionless Slave Broadcast master role is supported
1619          * enable all necessary events for it.
1620          */
1621         if (lmp_csb_master_capable(hdev)) {
1622                 events[1] |= 0x40;      /* Triggered Clock Capture */
1623                 events[1] |= 0x80;      /* Synchronization Train Complete */
1624                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1625                 events[2] |= 0x20;      /* CSB Channel Map Change */
1626         }
1627
1628         /* If Connectionless Slave Broadcast slave role is supported
1629          * enable all necessary events for it.
1630          */
1631         if (lmp_csb_slave_capable(hdev)) {
1632                 events[2] |= 0x01;      /* Synchronization Train Received */
1633                 events[2] |= 0x02;      /* CSB Receive */
1634                 events[2] |= 0x04;      /* CSB Timeout */
1635                 events[2] |= 0x08;      /* Truncated Page Complete */
1636         }
1637
1638         /* Enable Authenticated Payload Timeout Expired event if supported */
1639         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1640                 events[2] |= 0x80;
1641
1642         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1643 }
1644
1645 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1646 {
1647         struct hci_dev *hdev = req->hdev;
1648         u8 p;
1649
1650         hci_setup_event_mask(req);
1651
1652         /* Some Broadcom based Bluetooth controllers do not support the
1653          * Delete Stored Link Key command. They are clearly indicating its
1654          * absence in the bit mask of supported commands.
1655          *
1656          * Check the supported commands and only if the the command is marked
1657          * as supported send it. If not supported assume that the controller
1658          * does not have actual support for stored link keys which makes this
1659          * command redundant anyway.
1660          *
1661          * Some controllers indicate that they support handling deleting
1662          * stored link keys, but they don't. The quirk lets a driver
1663          * just disable this command.
1664          */
1665         if (hdev->commands[6] & 0x80 &&
1666             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1667                 struct hci_cp_delete_stored_link_key cp;
1668
1669                 bacpy(&cp.bdaddr, BDADDR_ANY);
1670                 cp.delete_all = 0x01;
1671                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1672                             sizeof(cp), &cp);
1673         }
1674
1675         if (hdev->commands[5] & 0x10)
1676                 hci_setup_link_policy(req);
1677
1678         if (lmp_le_capable(hdev)) {
1679                 u8 events[8];
1680
1681                 memset(events, 0, sizeof(events));
1682                 events[0] = 0x0f;
1683
1684                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1685                         events[0] |= 0x10;      /* LE Long Term Key Request */
1686
1687                 /* If controller supports the Connection Parameters Request
1688                  * Link Layer Procedure, enable the corresponding event.
1689                  */
1690                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1691                         events[0] |= 0x20;      /* LE Remote Connection
1692                                                  * Parameter Request
1693                                                  */
1694
1695                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1696                             events);
1697
1698                 if (hdev->commands[25] & 0x40) {
1699                         /* Read LE Advertising Channel TX Power */
1700                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1701                 }
1702
1703                 hci_set_le_support(req);
1704         }
1705
1706         /* Read features beyond page 1 if available */
1707         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1708                 struct hci_cp_read_local_ext_features cp;
1709
1710                 cp.page = p;
1711                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1712                             sizeof(cp), &cp);
1713         }
1714 }
1715
1716 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1717 {
1718         struct hci_dev *hdev = req->hdev;
1719
1720         /* Set event mask page 2 if the HCI command for it is supported */
1721         if (hdev->commands[22] & 0x04)
1722                 hci_set_event_mask_page_2(req);
1723
1724         /* Read local codec list if the HCI command is supported */
1725         if (hdev->commands[29] & 0x20)
1726                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1727
1728         /* Get MWS transport configuration if the HCI command is supported */
1729         if (hdev->commands[30] & 0x08)
1730                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1731
1732         /* Check for Synchronization Train support */
1733         if (lmp_sync_train_capable(hdev))
1734                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1735
1736         /* Enable Secure Connections if supported and configured */
1737         if ((lmp_sc_capable(hdev) ||
1738              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1739             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1740                 u8 support = 0x01;
1741                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1742                             sizeof(support), &support);
1743         }
1744 }
1745
1746 static int __hci_init(struct hci_dev *hdev)
1747 {
1748         int err;
1749
1750         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1751         if (err < 0)
1752                 return err;
1753
1754         /* The Device Under Test (DUT) mode is special and available for
1755          * all controller types. So just create it early on.
1756          */
1757         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1758                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1759                                     &dut_mode_fops);
1760         }
1761
1762         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1763          * BR/EDR/LE type controllers. AMP controllers only need the
1764          * first stage init.
1765          */
1766         if (hdev->dev_type != HCI_BREDR)
1767                 return 0;
1768
1769         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1770         if (err < 0)
1771                 return err;
1772
1773         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1774         if (err < 0)
1775                 return err;
1776
1777         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1778         if (err < 0)
1779                 return err;
1780
1781         /* Only create debugfs entries during the initial setup
1782          * phase and not every time the controller gets powered on.
1783          */
1784         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1785                 return 0;
1786
1787         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1788                             &features_fops);
1789         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1790                            &hdev->manufacturer);
1791         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1792         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1793         debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1794                             &device_list_fops);
1795         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1796                             &blacklist_fops);
1797         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1798
1799         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1800                             &conn_info_min_age_fops);
1801         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1802                             &conn_info_max_age_fops);
1803
1804         if (lmp_bredr_capable(hdev)) {
1805                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1806                                     hdev, &inquiry_cache_fops);
1807                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1808                                     hdev, &link_keys_fops);
1809                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1810                                     hdev, &dev_class_fops);
1811                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1812                                     hdev, &voice_setting_fops);
1813         }
1814
1815         if (lmp_ssp_capable(hdev)) {
1816                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1817                                     hdev, &auto_accept_delay_fops);
1818                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1819                                     hdev, &force_sc_support_fops);
1820                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1821                                     hdev, &sc_only_mode_fops);
1822         }
1823
1824         if (lmp_sniff_capable(hdev)) {
1825                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1826                                     hdev, &idle_timeout_fops);
1827                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1828                                     hdev, &sniff_min_interval_fops);
1829                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1830                                     hdev, &sniff_max_interval_fops);
1831         }
1832
1833         if (lmp_le_capable(hdev)) {
1834                 debugfs_create_file("identity", 0400, hdev->debugfs,
1835                                     hdev, &identity_fops);
1836                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1837                                     hdev, &rpa_timeout_fops);
1838                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1839                                     hdev, &random_address_fops);
1840                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1841                                     hdev, &static_address_fops);
1842
1843                 /* For controllers with a public address, provide a debug
1844                  * option to force the usage of the configured static
1845                  * address. By default the public address is used.
1846                  */
1847                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1848                         debugfs_create_file("force_static_address", 0644,
1849                                             hdev->debugfs, hdev,
1850                                             &force_static_address_fops);
1851
1852                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1853                                   &hdev->le_white_list_size);
1854                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1855                                     &white_list_fops);
1856                 debugfs_create_file("identity_resolving_keys", 0400,
1857                                     hdev->debugfs, hdev,
1858                                     &identity_resolving_keys_fops);
1859                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1860                                     hdev, &long_term_keys_fops);
1861                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1862                                     hdev, &conn_min_interval_fops);
1863                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1864                                     hdev, &conn_max_interval_fops);
1865                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1866                                     hdev, &conn_latency_fops);
1867                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1868                                     hdev, &supervision_timeout_fops);
1869                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1870                                     hdev, &adv_channel_map_fops);
1871                 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1872                                     hdev, &adv_min_interval_fops);
1873                 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1874                                     hdev, &adv_max_interval_fops);
1875                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1876                                    hdev->debugfs,
1877                                    &hdev->discov_interleaved_timeout);
1878
1879                 smp_register(hdev);
1880         }
1881
1882         return 0;
1883 }
1884
1885 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1886 {
1887         struct hci_dev *hdev = req->hdev;
1888
1889         BT_DBG("%s %ld", hdev->name, opt);
1890
1891         /* Reset */
1892         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1893                 hci_reset_req(req, 0);
1894
1895         /* Read Local Version */
1896         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1897
1898         /* Read BD Address */
1899         if (hdev->set_bdaddr)
1900                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1901 }
1902
1903 static int __hci_unconf_init(struct hci_dev *hdev)
1904 {
1905         int err;
1906
1907         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1908                 return 0;
1909
1910         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1911         if (err < 0)
1912                 return err;
1913
1914         return 0;
1915 }
1916
1917 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1918 {
1919         __u8 scan = opt;
1920
1921         BT_DBG("%s %x", req->hdev->name, scan);
1922
1923         /* Inquiry and Page scans */
1924         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1925 }
1926
1927 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1928 {
1929         __u8 auth = opt;
1930
1931         BT_DBG("%s %x", req->hdev->name, auth);
1932
1933         /* Authentication */
1934         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1935 }
1936
1937 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1938 {
1939         __u8 encrypt = opt;
1940
1941         BT_DBG("%s %x", req->hdev->name, encrypt);
1942
1943         /* Encryption */
1944         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1945 }
1946
1947 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1948 {
1949         __le16 policy = cpu_to_le16(opt);
1950
1951         BT_DBG("%s %x", req->hdev->name, policy);
1952
1953         /* Default link policy */
1954         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1955 }
1956
1957 /* Get HCI device by index.
1958  * Device is held on return. */
1959 struct hci_dev *hci_dev_get(int index)
1960 {
1961         struct hci_dev *hdev = NULL, *d;
1962
1963         BT_DBG("%d", index);
1964
1965         if (index < 0)
1966                 return NULL;
1967
1968         read_lock(&hci_dev_list_lock);
1969         list_for_each_entry(d, &hci_dev_list, list) {
1970                 if (d->id == index) {
1971                         hdev = hci_dev_hold(d);
1972                         break;
1973                 }
1974         }
1975         read_unlock(&hci_dev_list_lock);
1976         return hdev;
1977 }
1978
1979 /* ---- Inquiry support ---- */
1980
1981 bool hci_discovery_active(struct hci_dev *hdev)
1982 {
1983         struct discovery_state *discov = &hdev->discovery;
1984
1985         switch (discov->state) {
1986         case DISCOVERY_FINDING:
1987         case DISCOVERY_RESOLVING:
1988                 return true;
1989
1990         default:
1991                 return false;
1992         }
1993 }
1994
1995 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1996 {
1997         int old_state = hdev->discovery.state;
1998
1999         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2000
2001         if (old_state == state)
2002                 return;
2003
2004         hdev->discovery.state = state;
2005
2006         switch (state) {
2007         case DISCOVERY_STOPPED:
2008                 hci_update_background_scan(hdev);
2009
2010                 if (old_state != DISCOVERY_STARTING)
2011                         mgmt_discovering(hdev, 0);
2012                 break;
2013         case DISCOVERY_STARTING:
2014                 break;
2015         case DISCOVERY_FINDING:
2016                 mgmt_discovering(hdev, 1);
2017                 break;
2018         case DISCOVERY_RESOLVING:
2019                 break;
2020         case DISCOVERY_STOPPING:
2021                 break;
2022         }
2023 }
2024
2025 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2026 {
2027         struct discovery_state *cache = &hdev->discovery;
2028         struct inquiry_entry *p, *n;
2029
2030         list_for_each_entry_safe(p, n, &cache->all, all) {
2031                 list_del(&p->all);
2032                 kfree(p);
2033         }
2034
2035         INIT_LIST_HEAD(&cache->unknown);
2036         INIT_LIST_HEAD(&cache->resolve);
2037 }
2038
2039 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2040                                                bdaddr_t *bdaddr)
2041 {
2042         struct discovery_state *cache = &hdev->discovery;
2043         struct inquiry_entry *e;
2044
2045         BT_DBG("cache %p, %pMR", cache, bdaddr);
2046
2047         list_for_each_entry(e, &cache->all, all) {
2048                 if (!bacmp(&e->data.bdaddr, bdaddr))
2049                         return e;
2050         }
2051
2052         return NULL;
2053 }
2054
2055 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2056                                                        bdaddr_t *bdaddr)
2057 {
2058         struct discovery_state *cache = &hdev->discovery;
2059         struct inquiry_entry *e;
2060
2061         BT_DBG("cache %p, %pMR", cache, bdaddr);
2062
2063         list_for_each_entry(e, &cache->unknown, list) {
2064                 if (!bacmp(&e->data.bdaddr, bdaddr))
2065                         return e;
2066         }
2067
2068         return NULL;
2069 }
2070
2071 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2072                                                        bdaddr_t *bdaddr,
2073                                                        int state)
2074 {
2075         struct discovery_state *cache = &hdev->discovery;
2076         struct inquiry_entry *e;
2077
2078         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2079
2080         list_for_each_entry(e, &cache->resolve, list) {
2081                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2082                         return e;
2083                 if (!bacmp(&e->data.bdaddr, bdaddr))
2084                         return e;
2085         }
2086
2087         return NULL;
2088 }
2089
2090 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2091                                       struct inquiry_entry *ie)
2092 {
2093         struct discovery_state *cache = &hdev->discovery;
2094         struct list_head *pos = &cache->resolve;
2095         struct inquiry_entry *p;
2096
2097         list_del(&ie->list);
2098
2099         list_for_each_entry(p, &cache->resolve, list) {
2100                 if (p->name_state != NAME_PENDING &&
2101                     abs(p->data.rssi) >= abs(ie->data.rssi))
2102                         break;
2103                 pos = &p->list;
2104         }
2105
2106         list_add(&ie->list, pos);
2107 }
2108
2109 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2110                              bool name_known)
2111 {
2112         struct discovery_state *cache = &hdev->discovery;
2113         struct inquiry_entry *ie;
2114         u32 flags = 0;
2115
2116         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2117
2118         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2119
2120         if (!data->ssp_mode)
2121                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2122
2123         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2124         if (ie) {
2125                 if (!ie->data.ssp_mode)
2126                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2127
2128                 if (ie->name_state == NAME_NEEDED &&
2129                     data->rssi != ie->data.rssi) {
2130                         ie->data.rssi = data->rssi;
2131                         hci_inquiry_cache_update_resolve(hdev, ie);
2132                 }
2133
2134                 goto update;
2135         }
2136
2137         /* Entry not in the cache. Add new one. */
2138         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2139         if (!ie) {
2140                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2141                 goto done;
2142         }
2143
2144         list_add(&ie->all, &cache->all);
2145
2146         if (name_known) {
2147                 ie->name_state = NAME_KNOWN;
2148         } else {
2149                 ie->name_state = NAME_NOT_KNOWN;
2150                 list_add(&ie->list, &cache->unknown);
2151         }
2152
2153 update:
2154         if (name_known && ie->name_state != NAME_KNOWN &&
2155             ie->name_state != NAME_PENDING) {
2156                 ie->name_state = NAME_KNOWN;
2157                 list_del(&ie->list);
2158         }
2159
2160         memcpy(&ie->data, data, sizeof(*data));
2161         ie->timestamp = jiffies;
2162         cache->timestamp = jiffies;
2163
2164         if (ie->name_state == NAME_NOT_KNOWN)
2165                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2166
2167 done:
2168         return flags;
2169 }
2170
2171 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2172 {
2173         struct discovery_state *cache = &hdev->discovery;
2174         struct inquiry_info *info = (struct inquiry_info *) buf;
2175         struct inquiry_entry *e;
2176         int copied = 0;
2177
2178         list_for_each_entry(e, &cache->all, all) {
2179                 struct inquiry_data *data = &e->data;
2180
2181                 if (copied >= num)
2182                         break;
2183
2184                 bacpy(&info->bdaddr, &data->bdaddr);
2185                 info->pscan_rep_mode    = data->pscan_rep_mode;
2186                 info->pscan_period_mode = data->pscan_period_mode;
2187                 info->pscan_mode        = data->pscan_mode;
2188                 memcpy(info->dev_class, data->dev_class, 3);
2189                 info->clock_offset      = data->clock_offset;
2190
2191                 info++;
2192                 copied++;
2193         }
2194
2195         BT_DBG("cache %p, copied %d", cache, copied);
2196         return copied;
2197 }
2198
2199 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2200 {
2201         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2202         struct hci_dev *hdev = req->hdev;
2203         struct hci_cp_inquiry cp;
2204
2205         BT_DBG("%s", hdev->name);
2206
2207         if (test_bit(HCI_INQUIRY, &hdev->flags))
2208                 return;
2209
2210         /* Start Inquiry */
2211         memcpy(&cp.lap, &ir->lap, 3);
2212         cp.length  = ir->length;
2213         cp.num_rsp = ir->num_rsp;
2214         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2215 }
2216
2217 int hci_inquiry(void __user *arg)
2218 {
2219         __u8 __user *ptr = arg;
2220         struct hci_inquiry_req ir;
2221         struct hci_dev *hdev;
2222         int err = 0, do_inquiry = 0, max_rsp;
2223         long timeo;
2224         __u8 *buf;
2225
2226         if (copy_from_user(&ir, ptr, sizeof(ir)))
2227                 return -EFAULT;
2228
2229         hdev = hci_dev_get(ir.dev_id);
2230         if (!hdev)
2231                 return -ENODEV;
2232
2233         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2234                 err = -EBUSY;
2235                 goto done;
2236         }
2237
2238         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2239                 err = -EOPNOTSUPP;
2240                 goto done;
2241         }
2242
2243         if (hdev->dev_type != HCI_BREDR) {
2244                 err = -EOPNOTSUPP;
2245                 goto done;
2246         }
2247
2248         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2249                 err = -EOPNOTSUPP;
2250                 goto done;
2251         }
2252
2253         hci_dev_lock(hdev);
2254         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2255             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2256                 hci_inquiry_cache_flush(hdev);
2257                 do_inquiry = 1;
2258         }
2259         hci_dev_unlock(hdev);
2260
2261         timeo = ir.length * msecs_to_jiffies(2000);
2262
2263         if (do_inquiry) {
2264                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2265                                    timeo);
2266                 if (err < 0)
2267                         goto done;
2268
2269                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2270                  * cleared). If it is interrupted by a signal, return -EINTR.
2271                  */
2272                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2273                                 TASK_INTERRUPTIBLE))
2274                         return -EINTR;
2275         }
2276
2277         /* for unlimited number of responses we will use buffer with
2278          * 255 entries
2279          */
2280         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2281
2282         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2283          * copy it to the user space.
2284          */
2285         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2286         if (!buf) {
2287                 err = -ENOMEM;
2288                 goto done;
2289         }
2290
2291         hci_dev_lock(hdev);
2292         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2293         hci_dev_unlock(hdev);
2294
2295         BT_DBG("num_rsp %d", ir.num_rsp);
2296
2297         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2298                 ptr += sizeof(ir);
2299                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2300                                  ir.num_rsp))
2301                         err = -EFAULT;
2302         } else
2303                 err = -EFAULT;
2304
2305         kfree(buf);
2306
2307 done:
2308         hci_dev_put(hdev);
2309         return err;
2310 }
2311
2312 static int hci_dev_do_open(struct hci_dev *hdev)
2313 {
2314         int ret = 0;
2315
2316         BT_DBG("%s %p", hdev->name, hdev);
2317
2318         hci_req_lock(hdev);
2319
2320         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2321                 ret = -ENODEV;
2322                 goto done;
2323         }
2324
2325         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2326             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2327                 /* Check for rfkill but allow the HCI setup stage to
2328                  * proceed (which in itself doesn't cause any RF activity).
2329                  */
2330                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2331                         ret = -ERFKILL;
2332                         goto done;
2333                 }
2334
2335                 /* Check for valid public address or a configured static
2336                  * random adddress, but let the HCI setup proceed to
2337                  * be able to determine if there is a public address
2338                  * or not.
2339                  *
2340                  * In case of user channel usage, it is not important
2341                  * if a public address or static random address is
2342                  * available.
2343                  *
2344                  * This check is only valid for BR/EDR controllers
2345                  * since AMP controllers do not have an address.
2346                  */
2347                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2348                     hdev->dev_type == HCI_BREDR &&
2349                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2350                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2351                         ret = -EADDRNOTAVAIL;
2352                         goto done;
2353                 }
2354         }
2355
2356         if (test_bit(HCI_UP, &hdev->flags)) {
2357                 ret = -EALREADY;
2358                 goto done;
2359         }
2360
2361         if (hdev->open(hdev)) {
2362                 ret = -EIO;
2363                 goto done;
2364         }
2365
2366         atomic_set(&hdev->cmd_cnt, 1);
2367         set_bit(HCI_INIT, &hdev->flags);
2368
2369         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2370                 if (hdev->setup)
2371                         ret = hdev->setup(hdev);
2372
2373                 /* The transport driver can set these quirks before
2374                  * creating the HCI device or in its setup callback.
2375                  *
2376                  * In case any of them is set, the controller has to
2377                  * start up as unconfigured.
2378                  */
2379                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2380                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2381                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2382
2383                 /* For an unconfigured controller it is required to
2384                  * read at least the version information provided by
2385                  * the Read Local Version Information command.
2386                  *
2387                  * If the set_bdaddr driver callback is provided, then
2388                  * also the original Bluetooth public device address
2389                  * will be read using the Read BD Address command.
2390                  */
2391                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2392                         ret = __hci_unconf_init(hdev);
2393         }
2394
2395         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2396                 /* If public address change is configured, ensure that
2397                  * the address gets programmed. If the driver does not
2398                  * support changing the public address, fail the power
2399                  * on procedure.
2400                  */
2401                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2402                     hdev->set_bdaddr)
2403                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2404                 else
2405                         ret = -EADDRNOTAVAIL;
2406         }
2407
2408         if (!ret) {
2409                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2410                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2411                         ret = __hci_init(hdev);
2412         }
2413
2414         clear_bit(HCI_INIT, &hdev->flags);
2415
2416         if (!ret) {
2417                 hci_dev_hold(hdev);
2418                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2419                 set_bit(HCI_UP, &hdev->flags);
2420                 hci_notify(hdev, HCI_DEV_UP);
2421                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2422                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2423                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2424                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2425                     hdev->dev_type == HCI_BREDR) {
2426                         hci_dev_lock(hdev);
2427                         mgmt_powered(hdev, 1);
2428                         hci_dev_unlock(hdev);
2429                 }
2430         } else {
2431                 /* Init failed, cleanup */
2432                 flush_work(&hdev->tx_work);
2433                 flush_work(&hdev->cmd_work);
2434                 flush_work(&hdev->rx_work);
2435
2436                 skb_queue_purge(&hdev->cmd_q);
2437                 skb_queue_purge(&hdev->rx_q);
2438
2439                 if (hdev->flush)
2440                         hdev->flush(hdev);
2441
2442                 if (hdev->sent_cmd) {
2443                         kfree_skb(hdev->sent_cmd);
2444                         hdev->sent_cmd = NULL;
2445                 }
2446
2447                 hdev->close(hdev);
2448                 hdev->flags &= BIT(HCI_RAW);
2449         }
2450
2451 done:
2452         hci_req_unlock(hdev);
2453         return ret;
2454 }
2455
2456 /* ---- HCI ioctl helpers ---- */
2457
2458 int hci_dev_open(__u16 dev)
2459 {
2460         struct hci_dev *hdev;
2461         int err;
2462
2463         hdev = hci_dev_get(dev);
2464         if (!hdev)
2465                 return -ENODEV;
2466
2467         /* Devices that are marked as unconfigured can only be powered
2468          * up as user channel. Trying to bring them up as normal devices
2469          * will result into a failure. Only user channel operation is
2470          * possible.
2471          *
2472          * When this function is called for a user channel, the flag
2473          * HCI_USER_CHANNEL will be set first before attempting to
2474          * open the device.
2475          */
2476         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2477             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2478                 err = -EOPNOTSUPP;
2479                 goto done;
2480         }
2481
2482         /* We need to ensure that no other power on/off work is pending
2483          * before proceeding to call hci_dev_do_open. This is
2484          * particularly important if the setup procedure has not yet
2485          * completed.
2486          */
2487         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2488                 cancel_delayed_work(&hdev->power_off);
2489
2490         /* After this call it is guaranteed that the setup procedure
2491          * has finished. This means that error conditions like RFKILL
2492          * or no valid public or static random address apply.
2493          */
2494         flush_workqueue(hdev->req_workqueue);
2495
2496         /* For controllers not using the management interface and that
2497          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2498          * so that pairing works for them. Once the management interface
2499          * is in use this bit will be cleared again and userspace has
2500          * to explicitly enable it.
2501          */
2502         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2503             !test_bit(HCI_MGMT, &hdev->dev_flags))
2504                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2505
2506         err = hci_dev_do_open(hdev);
2507
2508 done:
2509         hci_dev_put(hdev);
2510         return err;
2511 }
2512
2513 /* This function requires the caller holds hdev->lock */
2514 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2515 {
2516         struct hci_conn_params *p;
2517
2518         list_for_each_entry(p, &hdev->le_conn_params, list) {
2519                 if (p->conn) {
2520                         hci_conn_drop(p->conn);
2521                         hci_conn_put(p->conn);
2522                         p->conn = NULL;
2523                 }
2524                 list_del_init(&p->action);
2525         }
2526
2527         BT_DBG("All LE pending actions cleared");
2528 }
2529
2530 static int hci_dev_do_close(struct hci_dev *hdev)
2531 {
2532         BT_DBG("%s %p", hdev->name, hdev);
2533
2534         cancel_delayed_work(&hdev->power_off);
2535
2536         hci_req_cancel(hdev, ENODEV);
2537         hci_req_lock(hdev);
2538
2539         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2540                 cancel_delayed_work_sync(&hdev->cmd_timer);
2541                 hci_req_unlock(hdev);
2542                 return 0;
2543         }
2544
2545         /* Flush RX and TX works */
2546         flush_work(&hdev->tx_work);
2547         flush_work(&hdev->rx_work);
2548
2549         if (hdev->discov_timeout > 0) {
2550                 cancel_delayed_work(&hdev->discov_off);
2551                 hdev->discov_timeout = 0;
2552                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2553                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2554         }
2555
2556         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2557                 cancel_delayed_work(&hdev->service_cache);
2558
2559         cancel_delayed_work_sync(&hdev->le_scan_disable);
2560
2561         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2562                 cancel_delayed_work_sync(&hdev->rpa_expired);
2563
2564         /* Avoid potential lockdep warnings from the *_flush() calls by
2565          * ensuring the workqueue is empty up front.
2566          */
2567         drain_workqueue(hdev->workqueue);
2568
2569         hci_dev_lock(hdev);
2570         hci_inquiry_cache_flush(hdev);
2571         hci_pend_le_actions_clear(hdev);
2572         hci_conn_hash_flush(hdev);
2573         hci_dev_unlock(hdev);
2574
2575         hci_notify(hdev, HCI_DEV_DOWN);
2576
2577         if (hdev->flush)
2578                 hdev->flush(hdev);
2579
2580         /* Reset device */
2581         skb_queue_purge(&hdev->cmd_q);
2582         atomic_set(&hdev->cmd_cnt, 1);
2583         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2584             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2585             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2586                 set_bit(HCI_INIT, &hdev->flags);
2587                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2588                 clear_bit(HCI_INIT, &hdev->flags);
2589         }
2590
2591         /* flush cmd  work */
2592         flush_work(&hdev->cmd_work);
2593
2594         /* Drop queues */
2595         skb_queue_purge(&hdev->rx_q);
2596         skb_queue_purge(&hdev->cmd_q);
2597         skb_queue_purge(&hdev->raw_q);
2598
2599         /* Drop last sent command */
2600         if (hdev->sent_cmd) {
2601                 cancel_delayed_work_sync(&hdev->cmd_timer);
2602                 kfree_skb(hdev->sent_cmd);
2603                 hdev->sent_cmd = NULL;
2604         }
2605
2606         kfree_skb(hdev->recv_evt);
2607         hdev->recv_evt = NULL;
2608
2609         /* After this point our queues are empty
2610          * and no tasks are scheduled. */
2611         hdev->close(hdev);
2612
2613         /* Clear flags */
2614         hdev->flags &= BIT(HCI_RAW);
2615         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2616
2617         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2618                 if (hdev->dev_type == HCI_BREDR) {
2619                         hci_dev_lock(hdev);
2620                         mgmt_powered(hdev, 0);
2621                         hci_dev_unlock(hdev);
2622                 }
2623         }
2624
2625         /* Controller radio is available but is currently powered down */
2626         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2627
2628         memset(hdev->eir, 0, sizeof(hdev->eir));
2629         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2630         bacpy(&hdev->random_addr, BDADDR_ANY);
2631
2632         hci_req_unlock(hdev);
2633
2634         hci_dev_put(hdev);
2635         return 0;
2636 }
2637
2638 int hci_dev_close(__u16 dev)
2639 {
2640         struct hci_dev *hdev;
2641         int err;
2642
2643         hdev = hci_dev_get(dev);
2644         if (!hdev)
2645                 return -ENODEV;
2646
2647         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2648                 err = -EBUSY;
2649                 goto done;
2650         }
2651
2652         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2653                 cancel_delayed_work(&hdev->power_off);
2654
2655         err = hci_dev_do_close(hdev);
2656
2657 done:
2658         hci_dev_put(hdev);
2659         return err;
2660 }
2661
2662 int hci_dev_reset(__u16 dev)
2663 {
2664         struct hci_dev *hdev;
2665         int ret = 0;
2666
2667         hdev = hci_dev_get(dev);
2668         if (!hdev)
2669                 return -ENODEV;
2670
2671         hci_req_lock(hdev);
2672
2673         if (!test_bit(HCI_UP, &hdev->flags)) {
2674                 ret = -ENETDOWN;
2675                 goto done;
2676         }
2677
2678         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2679                 ret = -EBUSY;
2680                 goto done;
2681         }
2682
2683         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2684                 ret = -EOPNOTSUPP;
2685                 goto done;
2686         }
2687
2688         /* Drop queues */
2689         skb_queue_purge(&hdev->rx_q);
2690         skb_queue_purge(&hdev->cmd_q);
2691
2692         /* Avoid potential lockdep warnings from the *_flush() calls by
2693          * ensuring the workqueue is empty up front.
2694          */
2695         drain_workqueue(hdev->workqueue);
2696
2697         hci_dev_lock(hdev);
2698         hci_inquiry_cache_flush(hdev);
2699         hci_conn_hash_flush(hdev);
2700         hci_dev_unlock(hdev);
2701
2702         if (hdev->flush)
2703                 hdev->flush(hdev);
2704
2705         atomic_set(&hdev->cmd_cnt, 1);
2706         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2707
2708         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2709
2710 done:
2711         hci_req_unlock(hdev);
2712         hci_dev_put(hdev);
2713         return ret;
2714 }
2715
2716 int hci_dev_reset_stat(__u16 dev)
2717 {
2718         struct hci_dev *hdev;
2719         int ret = 0;
2720
2721         hdev = hci_dev_get(dev);
2722         if (!hdev)
2723                 return -ENODEV;
2724
2725         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2726                 ret = -EBUSY;
2727                 goto done;
2728         }
2729
2730         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2731                 ret = -EOPNOTSUPP;
2732                 goto done;
2733         }
2734
2735         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2736
2737 done:
2738         hci_dev_put(hdev);
2739         return ret;
2740 }
2741
2742 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2743 {
2744         bool conn_changed, discov_changed;
2745
2746         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2747
2748         if ((scan & SCAN_PAGE))
2749                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2750                                                  &hdev->dev_flags);
2751         else
2752                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2753                                                   &hdev->dev_flags);
2754
2755         if ((scan & SCAN_INQUIRY)) {
2756                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2757                                                    &hdev->dev_flags);
2758         } else {
2759                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2760                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2761                                                     &hdev->dev_flags);
2762         }
2763
2764         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2765                 return;
2766
2767         if (conn_changed || discov_changed) {
2768                 /* In case this was disabled through mgmt */
2769                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2770
2771                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2772                         mgmt_update_adv_data(hdev);
2773
2774                 mgmt_new_settings(hdev);
2775         }
2776 }
2777
2778 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2779 {
2780         struct hci_dev *hdev;
2781         struct hci_dev_req dr;
2782         int err = 0;
2783
2784         if (copy_from_user(&dr, arg, sizeof(dr)))
2785                 return -EFAULT;
2786
2787         hdev = hci_dev_get(dr.dev_id);
2788         if (!hdev)
2789                 return -ENODEV;
2790
2791         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2792                 err = -EBUSY;
2793                 goto done;
2794         }
2795
2796         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2797                 err = -EOPNOTSUPP;
2798                 goto done;
2799         }
2800
2801         if (hdev->dev_type != HCI_BREDR) {
2802                 err = -EOPNOTSUPP;
2803                 goto done;
2804         }
2805
2806         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2807                 err = -EOPNOTSUPP;
2808                 goto done;
2809         }
2810
2811         switch (cmd) {
2812         case HCISETAUTH:
2813                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2814                                    HCI_INIT_TIMEOUT);
2815                 break;
2816
2817         case HCISETENCRYPT:
2818                 if (!lmp_encrypt_capable(hdev)) {
2819                         err = -EOPNOTSUPP;
2820                         break;
2821                 }
2822
2823                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2824                         /* Auth must be enabled first */
2825                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2826                                            HCI_INIT_TIMEOUT);
2827                         if (err)
2828                                 break;
2829                 }
2830
2831                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2832                                    HCI_INIT_TIMEOUT);
2833                 break;
2834
2835         case HCISETSCAN:
2836                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2837                                    HCI_INIT_TIMEOUT);
2838
2839                 /* Ensure that the connectable and discoverable states
2840                  * get correctly modified as this was a non-mgmt change.
2841                  */
2842                 if (!err)
2843                         hci_update_scan_state(hdev, dr.dev_opt);
2844                 break;
2845
2846         case HCISETLINKPOL:
2847                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2848                                    HCI_INIT_TIMEOUT);
2849                 break;
2850
2851         case HCISETLINKMODE:
2852                 hdev->link_mode = ((__u16) dr.dev_opt) &
2853                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2854                 break;
2855
2856         case HCISETPTYPE:
2857                 hdev->pkt_type = (__u16) dr.dev_opt;
2858                 break;
2859
2860         case HCISETACLMTU:
2861                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2862                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2863                 break;
2864
2865         case HCISETSCOMTU:
2866                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2867                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2868                 break;
2869
2870         default:
2871                 err = -EINVAL;
2872                 break;
2873         }
2874
2875 done:
2876         hci_dev_put(hdev);
2877         return err;
2878 }
2879
2880 int hci_get_dev_list(void __user *arg)
2881 {
2882         struct hci_dev *hdev;
2883         struct hci_dev_list_req *dl;
2884         struct hci_dev_req *dr;
2885         int n = 0, size, err;
2886         __u16 dev_num;
2887
2888         if (get_user(dev_num, (__u16 __user *) arg))
2889                 return -EFAULT;
2890
2891         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2892                 return -EINVAL;
2893
2894         size = sizeof(*dl) + dev_num * sizeof(*dr);
2895
2896         dl = kzalloc(size, GFP_KERNEL);
2897         if (!dl)
2898                 return -ENOMEM;
2899
2900         dr = dl->dev_req;
2901
2902         read_lock(&hci_dev_list_lock);
2903         list_for_each_entry(hdev, &hci_dev_list, list) {
2904                 unsigned long flags = hdev->flags;
2905
2906                 /* When the auto-off is configured it means the transport
2907                  * is running, but in that case still indicate that the
2908                  * device is actually down.
2909                  */
2910                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2911                         flags &= ~BIT(HCI_UP);
2912
2913                 (dr + n)->dev_id  = hdev->id;
2914                 (dr + n)->dev_opt = flags;
2915
2916                 if (++n >= dev_num)
2917                         break;
2918         }
2919         read_unlock(&hci_dev_list_lock);
2920
2921         dl->dev_num = n;
2922         size = sizeof(*dl) + n * sizeof(*dr);
2923
2924         err = copy_to_user(arg, dl, size);
2925         kfree(dl);
2926
2927         return err ? -EFAULT : 0;
2928 }
2929
2930 int hci_get_dev_info(void __user *arg)
2931 {
2932         struct hci_dev *hdev;
2933         struct hci_dev_info di;
2934         unsigned long flags;
2935         int err = 0;
2936
2937         if (copy_from_user(&di, arg, sizeof(di)))
2938                 return -EFAULT;
2939
2940         hdev = hci_dev_get(di.dev_id);
2941         if (!hdev)
2942                 return -ENODEV;
2943
2944         /* When the auto-off is configured it means the transport
2945          * is running, but in that case still indicate that the
2946          * device is actually down.
2947          */
2948         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2949                 flags = hdev->flags & ~BIT(HCI_UP);
2950         else
2951                 flags = hdev->flags;
2952
2953         strcpy(di.name, hdev->name);
2954         di.bdaddr   = hdev->bdaddr;
2955         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2956         di.flags    = flags;
2957         di.pkt_type = hdev->pkt_type;
2958         if (lmp_bredr_capable(hdev)) {
2959                 di.acl_mtu  = hdev->acl_mtu;
2960                 di.acl_pkts = hdev->acl_pkts;
2961                 di.sco_mtu  = hdev->sco_mtu;
2962                 di.sco_pkts = hdev->sco_pkts;
2963         } else {
2964                 di.acl_mtu  = hdev->le_mtu;
2965                 di.acl_pkts = hdev->le_pkts;
2966                 di.sco_mtu  = 0;
2967                 di.sco_pkts = 0;
2968         }
2969         di.link_policy = hdev->link_policy;
2970         di.link_mode   = hdev->link_mode;
2971
2972         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2973         memcpy(&di.features, &hdev->features, sizeof(di.features));
2974
2975         if (copy_to_user(arg, &di, sizeof(di)))
2976                 err = -EFAULT;
2977
2978         hci_dev_put(hdev);
2979
2980         return err;
2981 }
2982
2983 /* ---- Interface to HCI drivers ---- */
2984
2985 static int hci_rfkill_set_block(void *data, bool blocked)
2986 {
2987         struct hci_dev *hdev = data;
2988
2989         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2990
2991         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2992                 return -EBUSY;
2993
2994         if (blocked) {
2995                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2996                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2997                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2998                         hci_dev_do_close(hdev);
2999         } else {
3000                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3001         }
3002
3003         return 0;
3004 }
3005
3006 static const struct rfkill_ops hci_rfkill_ops = {
3007         .set_block = hci_rfkill_set_block,
3008 };
3009
3010 static void hci_power_on(struct work_struct *work)
3011 {
3012         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3013         int err;
3014
3015         BT_DBG("%s", hdev->name);
3016
3017         err = hci_dev_do_open(hdev);
3018         if (err < 0) {
3019                 mgmt_set_powered_failed(hdev, err);
3020                 return;
3021         }
3022
3023         /* During the HCI setup phase, a few error conditions are
3024          * ignored and they need to be checked now. If they are still
3025          * valid, it is important to turn the device back off.
3026          */
3027         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3028             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3029             (hdev->dev_type == HCI_BREDR &&
3030              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3031              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3032                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3033                 hci_dev_do_close(hdev);
3034         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3035                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3036                                    HCI_AUTO_OFF_TIMEOUT);
3037         }
3038
3039         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3040                 /* For unconfigured devices, set the HCI_RAW flag
3041                  * so that userspace can easily identify them.
3042                  */
3043                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3044                         set_bit(HCI_RAW, &hdev->flags);
3045
3046                 /* For fully configured devices, this will send
3047                  * the Index Added event. For unconfigured devices,
3048                  * it will send Unconfigued Index Added event.
3049                  *
3050                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3051                  * and no event will be send.
3052                  */
3053                 mgmt_index_added(hdev);
3054         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3055                 /* When the controller is now configured, then it
3056                  * is important to clear the HCI_RAW flag.
3057                  */
3058                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3059                         clear_bit(HCI_RAW, &hdev->flags);
3060
3061                 /* Powering on the controller with HCI_CONFIG set only
3062                  * happens with the transition from unconfigured to
3063                  * configured. This will send the Index Added event.
3064                  */
3065                 mgmt_index_added(hdev);
3066         }
3067 }
3068
3069 static void hci_power_off(struct work_struct *work)
3070 {
3071         struct hci_dev *hdev = container_of(work, struct hci_dev,
3072                                             power_off.work);
3073
3074         BT_DBG("%s", hdev->name);
3075
3076         hci_dev_do_close(hdev);
3077 }
3078
3079 static void hci_discov_off(struct work_struct *work)
3080 {
3081         struct hci_dev *hdev;
3082
3083         hdev = container_of(work, struct hci_dev, discov_off.work);
3084
3085         BT_DBG("%s", hdev->name);
3086
3087         mgmt_discoverable_timeout(hdev);
3088 }
3089
3090 void hci_uuids_clear(struct hci_dev *hdev)
3091 {
3092         struct bt_uuid *uuid, *tmp;
3093
3094         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3095                 list_del(&uuid->list);
3096                 kfree(uuid);
3097         }
3098 }
3099
3100 void hci_link_keys_clear(struct hci_dev *hdev)
3101 {
3102         struct list_head *p, *n;
3103
3104         list_for_each_safe(p, n, &hdev->link_keys) {
3105                 struct link_key *key;
3106
3107                 key = list_entry(p, struct link_key, list);
3108
3109                 list_del(p);
3110                 kfree(key);
3111         }
3112 }
3113
3114 void hci_smp_ltks_clear(struct hci_dev *hdev)
3115 {
3116         struct smp_ltk *k;
3117
3118         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3119                 list_del_rcu(&k->list);
3120                 kfree_rcu(k, rcu);
3121         }
3122 }
3123
3124 void hci_smp_irks_clear(struct hci_dev *hdev)
3125 {
3126         struct smp_irk *k;
3127
3128         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3129                 list_del_rcu(&k->list);
3130                 kfree_rcu(k, rcu);
3131         }
3132 }
3133
3134 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3135 {
3136         struct link_key *k;
3137
3138         list_for_each_entry(k, &hdev->link_keys, list)
3139                 if (bacmp(bdaddr, &k->bdaddr) == 0)
3140                         return k;
3141
3142         return NULL;
3143 }
3144
3145 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3146                                u8 key_type, u8 old_key_type)
3147 {
3148         /* Legacy key */
3149         if (key_type < 0x03)
3150                 return true;
3151
3152         /* Debug keys are insecure so don't store them persistently */
3153         if (key_type == HCI_LK_DEBUG_COMBINATION)
3154                 return false;
3155
3156         /* Changed combination key and there's no previous one */
3157         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3158                 return false;
3159
3160         /* Security mode 3 case */
3161         if (!conn)
3162                 return true;
3163
3164         /* Neither local nor remote side had no-bonding as requirement */
3165         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3166                 return true;
3167
3168         /* Local side had dedicated bonding as requirement */
3169         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3170                 return true;
3171
3172         /* Remote side had dedicated bonding as requirement */
3173         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3174                 return true;
3175
3176         /* If none of the above criteria match, then don't store the key
3177          * persistently */
3178         return false;
3179 }
3180
3181 static u8 ltk_role(u8 type)
3182 {
3183         if (type == SMP_LTK)
3184                 return HCI_ROLE_MASTER;
3185
3186         return HCI_ROLE_SLAVE;
3187 }
3188
3189 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3190                              u8 role)
3191 {
3192         struct smp_ltk *k;
3193
3194         rcu_read_lock();
3195         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3196                 if (k->ediv != ediv || k->rand != rand)
3197                         continue;
3198
3199                 if (ltk_role(k->type) != role)
3200                         continue;
3201
3202                 rcu_read_unlock();
3203                 return k;
3204         }
3205         rcu_read_unlock();
3206
3207         return NULL;
3208 }
3209
3210 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3211                                      u8 addr_type, u8 role)
3212 {
3213         struct smp_ltk *k;
3214
3215         rcu_read_lock();
3216         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3217                 if (addr_type == k->bdaddr_type &&
3218                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3219                     ltk_role(k->type) == role) {
3220                         rcu_read_unlock();
3221                         return k;
3222                 }
3223         }
3224         rcu_read_unlock();
3225
3226         return NULL;
3227 }
3228
3229 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3230 {
3231         struct smp_irk *irk;
3232
3233         rcu_read_lock();
3234         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3235                 if (!bacmp(&irk->rpa, rpa)) {
3236                         rcu_read_unlock();
3237                         return irk;
3238                 }
3239         }
3240
3241         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3242                 if (smp_irk_matches(hdev, irk->val, rpa)) {
3243                         bacpy(&irk->rpa, rpa);
3244                         rcu_read_unlock();
3245                         return irk;
3246                 }
3247         }
3248         rcu_read_unlock();
3249
3250         return NULL;
3251 }
3252
3253 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3254                                      u8 addr_type)
3255 {
3256         struct smp_irk *irk;
3257
3258         /* Identity Address must be public or static random */
3259         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3260                 return NULL;
3261
3262         rcu_read_lock();
3263         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3264                 if (addr_type == irk->addr_type &&
3265                     bacmp(bdaddr, &irk->bdaddr) == 0) {
3266                         rcu_read_unlock();
3267                         return irk;
3268                 }
3269         }
3270         rcu_read_unlock();
3271
3272         return NULL;
3273 }
3274
3275 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3276                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3277                                   u8 pin_len, bool *persistent)
3278 {
3279         struct link_key *key, *old_key;
3280         u8 old_key_type;
3281
3282         old_key = hci_find_link_key(hdev, bdaddr);
3283         if (old_key) {
3284                 old_key_type = old_key->type;
3285                 key = old_key;
3286         } else {
3287                 old_key_type = conn ? conn->key_type : 0xff;
3288                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3289                 if (!key)
3290                         return NULL;
3291                 list_add(&key->list, &hdev->link_keys);
3292         }
3293
3294         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3295
3296         /* Some buggy controller combinations generate a changed
3297          * combination key for legacy pairing even when there's no
3298          * previous key */
3299         if (type == HCI_LK_CHANGED_COMBINATION &&
3300             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3301                 type = HCI_LK_COMBINATION;
3302                 if (conn)
3303                         conn->key_type = type;
3304         }
3305
3306         bacpy(&key->bdaddr, bdaddr);
3307         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3308         key->pin_len = pin_len;
3309
3310         if (type == HCI_LK_CHANGED_COMBINATION)
3311                 key->type = old_key_type;
3312         else
3313                 key->type = type;
3314
3315         if (persistent)
3316                 *persistent = hci_persistent_key(hdev, conn, type,
3317                                                  old_key_type);
3318
3319         return key;
3320 }
3321
3322 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3323                             u8 addr_type, u8 type, u8 authenticated,
3324                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3325 {
3326         struct smp_ltk *key, *old_key;
3327         u8 role = ltk_role(type);
3328
3329         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3330         if (old_key)
3331                 key = old_key;
3332         else {
3333                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3334                 if (!key)
3335                         return NULL;
3336                 list_add_rcu(&key->list, &hdev->long_term_keys);
3337         }
3338
3339         bacpy(&key->bdaddr, bdaddr);
3340         key->bdaddr_type = addr_type;
3341         memcpy(key->val, tk, sizeof(key->val));
3342         key->authenticated = authenticated;
3343         key->ediv = ediv;
3344         key->rand = rand;
3345         key->enc_size = enc_size;
3346         key->type = type;
3347
3348         return key;
3349 }
3350
3351 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3352                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3353 {
3354         struct smp_irk *irk;
3355
3356         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3357         if (!irk) {
3358                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3359                 if (!irk)
3360                         return NULL;
3361
3362                 bacpy(&irk->bdaddr, bdaddr);
3363                 irk->addr_type = addr_type;
3364
3365                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
3366         }
3367
3368         memcpy(irk->val, val, 16);
3369         bacpy(&irk->rpa, rpa);
3370
3371         return irk;
3372 }
3373
3374 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3375 {
3376         struct link_key *key;
3377
3378         key = hci_find_link_key(hdev, bdaddr);
3379         if (!key)
3380                 return -ENOENT;
3381
3382         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3383
3384         list_del(&key->list);
3385         kfree(key);
3386
3387         return 0;
3388 }
3389
3390 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3391 {
3392         struct smp_ltk *k;
3393         int removed = 0;
3394
3395         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3396                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3397                         continue;
3398
3399                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3400
3401                 list_del_rcu(&k->list);
3402                 kfree_rcu(k, rcu);
3403                 removed++;
3404         }
3405
3406         return removed ? 0 : -ENOENT;
3407 }
3408
3409 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3410 {
3411         struct smp_irk *k;
3412
3413         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3414                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3415                         continue;
3416
3417                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3418
3419                 list_del_rcu(&k->list);
3420                 kfree_rcu(k, rcu);
3421         }
3422 }
3423
3424 /* HCI command timer function */
3425 static void hci_cmd_timeout(struct work_struct *work)
3426 {
3427         struct hci_dev *hdev = container_of(work, struct hci_dev,
3428                                             cmd_timer.work);
3429
3430         if (hdev->sent_cmd) {
3431                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3432                 u16 opcode = __le16_to_cpu(sent->opcode);
3433
3434                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3435         } else {
3436                 BT_ERR("%s command tx timeout", hdev->name);
3437         }
3438
3439         atomic_set(&hdev->cmd_cnt, 1);
3440         queue_work(hdev->workqueue, &hdev->cmd_work);
3441 }
3442
3443 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3444                                           bdaddr_t *bdaddr)
3445 {
3446         struct oob_data *data;
3447
3448         list_for_each_entry(data, &hdev->remote_oob_data, list)
3449                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3450                         return data;
3451
3452         return NULL;
3453 }
3454
3455 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3456 {
3457         struct oob_data *data;
3458
3459         data = hci_find_remote_oob_data(hdev, bdaddr);
3460         if (!data)
3461                 return -ENOENT;
3462
3463         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3464
3465         list_del(&data->list);
3466         kfree(data);
3467
3468         return 0;
3469 }
3470
3471 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3472 {
3473         struct oob_data *data, *n;
3474
3475         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3476                 list_del(&data->list);
3477                 kfree(data);
3478         }
3479 }
3480
3481 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3482                             u8 *hash, u8 *rand)
3483 {
3484         struct oob_data *data;
3485
3486         data = hci_find_remote_oob_data(hdev, bdaddr);
3487         if (!data) {
3488                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3489                 if (!data)
3490                         return -ENOMEM;
3491
3492                 bacpy(&data->bdaddr, bdaddr);
3493                 list_add(&data->list, &hdev->remote_oob_data);
3494         }
3495
3496         memcpy(data->hash192, hash, sizeof(data->hash192));
3497         memcpy(data->rand192, rand, sizeof(data->rand192));
3498
3499         memset(data->hash256, 0, sizeof(data->hash256));
3500         memset(data->rand256, 0, sizeof(data->rand256));
3501
3502         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3503
3504         return 0;
3505 }
3506
3507 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3508                                 u8 *hash192, u8 *rand192,
3509                                 u8 *hash256, u8 *rand256)
3510 {
3511         struct oob_data *data;
3512
3513         data = hci_find_remote_oob_data(hdev, bdaddr);
3514         if (!data) {
3515                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3516                 if (!data)
3517                         return -ENOMEM;
3518
3519                 bacpy(&data->bdaddr, bdaddr);
3520                 list_add(&data->list, &hdev->remote_oob_data);
3521         }
3522
3523         memcpy(data->hash192, hash192, sizeof(data->hash192));
3524         memcpy(data->rand192, rand192, sizeof(data->rand192));
3525
3526         memcpy(data->hash256, hash256, sizeof(data->hash256));
3527         memcpy(data->rand256, rand256, sizeof(data->rand256));
3528
3529         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3530
3531         return 0;
3532 }
3533
3534 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3535                                          bdaddr_t *bdaddr, u8 type)
3536 {
3537         struct bdaddr_list *b;
3538
3539         list_for_each_entry(b, bdaddr_list, list) {
3540                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3541                         return b;
3542         }
3543
3544         return NULL;
3545 }
3546
3547 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3548 {
3549         struct list_head *p, *n;
3550
3551         list_for_each_safe(p, n, bdaddr_list) {
3552                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3553
3554                 list_del(p);
3555                 kfree(b);
3556         }
3557 }
3558
3559 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3560 {
3561         struct bdaddr_list *entry;
3562
3563         if (!bacmp(bdaddr, BDADDR_ANY))
3564                 return -EBADF;
3565
3566         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3567                 return -EEXIST;
3568
3569         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3570         if (!entry)
3571                 return -ENOMEM;
3572
3573         bacpy(&entry->bdaddr, bdaddr);
3574         entry->bdaddr_type = type;
3575
3576         list_add(&entry->list, list);
3577
3578         return 0;
3579 }
3580
3581 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3582 {
3583         struct bdaddr_list *entry;
3584
3585         if (!bacmp(bdaddr, BDADDR_ANY)) {
3586                 hci_bdaddr_list_clear(list);
3587                 return 0;
3588         }
3589
3590         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3591         if (!entry)
3592                 return -ENOENT;
3593
3594         list_del(&entry->list);
3595         kfree(entry);
3596
3597         return 0;
3598 }
3599
3600 /* This function requires the caller holds hdev->lock */
3601 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3602                                                bdaddr_t *addr, u8 addr_type)
3603 {
3604         struct hci_conn_params *params;
3605
3606         /* The conn params list only contains identity addresses */
3607         if (!hci_is_identity_address(addr, addr_type))
3608                 return NULL;
3609
3610         list_for_each_entry(params, &hdev->le_conn_params, list) {
3611                 if (bacmp(&params->addr, addr) == 0 &&
3612                     params->addr_type == addr_type) {
3613                         return params;
3614                 }
3615         }
3616
3617         return NULL;
3618 }
3619
3620 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3621 {
3622         struct hci_conn *conn;
3623
3624         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3625         if (!conn)
3626                 return false;
3627
3628         if (conn->dst_type != type)
3629                 return false;
3630
3631         if (conn->state != BT_CONNECTED)
3632                 return false;
3633
3634         return true;
3635 }
3636
3637 /* This function requires the caller holds hdev->lock */
3638 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3639                                                   bdaddr_t *addr, u8 addr_type)
3640 {
3641         struct hci_conn_params *param;
3642
3643         /* The list only contains identity addresses */
3644         if (!hci_is_identity_address(addr, addr_type))
3645                 return NULL;
3646
3647         list_for_each_entry(param, list, action) {
3648                 if (bacmp(&param->addr, addr) == 0 &&
3649                     param->addr_type == addr_type)
3650                         return param;
3651         }
3652
3653         return NULL;
3654 }
3655
3656 /* This function requires the caller holds hdev->lock */
3657 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3658                                             bdaddr_t *addr, u8 addr_type)
3659 {
3660         struct hci_conn_params *params;
3661
3662         if (!hci_is_identity_address(addr, addr_type))
3663                 return NULL;
3664
3665         params = hci_conn_params_lookup(hdev, addr, addr_type);
3666         if (params)
3667                 return params;
3668
3669         params = kzalloc(sizeof(*params), GFP_KERNEL);
3670         if (!params) {
3671                 BT_ERR("Out of memory");
3672                 return NULL;
3673         }
3674
3675         bacpy(&params->addr, addr);
3676         params->addr_type = addr_type;
3677
3678         list_add(&params->list, &hdev->le_conn_params);
3679         INIT_LIST_HEAD(&params->action);
3680
3681         params->conn_min_interval = hdev->le_conn_min_interval;
3682         params->conn_max_interval = hdev->le_conn_max_interval;
3683         params->conn_latency = hdev->le_conn_latency;
3684         params->supervision_timeout = hdev->le_supv_timeout;
3685         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3686
3687         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3688
3689         return params;
3690 }
3691
3692 /* This function requires the caller holds hdev->lock */
3693 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3694                         u8 auto_connect)
3695 {
3696         struct hci_conn_params *params;
3697
3698         params = hci_conn_params_add(hdev, addr, addr_type);
3699         if (!params)
3700                 return -EIO;
3701
3702         if (params->auto_connect == auto_connect)
3703                 return 0;
3704
3705         list_del_init(&params->action);
3706
3707         switch (auto_connect) {
3708         case HCI_AUTO_CONN_DISABLED:
3709         case HCI_AUTO_CONN_LINK_LOSS:
3710                 hci_update_background_scan(hdev);
3711                 break;
3712         case HCI_AUTO_CONN_REPORT:
3713                 list_add(&params->action, &hdev->pend_le_reports);
3714                 hci_update_background_scan(hdev);
3715                 break;
3716         case HCI_AUTO_CONN_DIRECT:
3717         case HCI_AUTO_CONN_ALWAYS:
3718                 if (!is_connected(hdev, addr, addr_type)) {
3719                         list_add(&params->action, &hdev->pend_le_conns);
3720                         hci_update_background_scan(hdev);
3721                 }
3722                 break;
3723         }
3724
3725         params->auto_connect = auto_connect;
3726
3727         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3728                auto_connect);
3729
3730         return 0;
3731 }
3732
3733 static void hci_conn_params_free(struct hci_conn_params *params)
3734 {
3735         if (params->conn) {
3736                 hci_conn_drop(params->conn);
3737                 hci_conn_put(params->conn);
3738         }
3739
3740         list_del(&params->action);
3741         list_del(&params->list);
3742         kfree(params);
3743 }
3744
3745 /* This function requires the caller holds hdev->lock */
3746 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3747 {
3748         struct hci_conn_params *params;
3749
3750         params = hci_conn_params_lookup(hdev, addr, addr_type);
3751         if (!params)
3752                 return;
3753
3754         hci_conn_params_free(params);
3755
3756         hci_update_background_scan(hdev);
3757
3758         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3759 }
3760
3761 /* This function requires the caller holds hdev->lock */
3762 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3763 {
3764         struct hci_conn_params *params, *tmp;
3765
3766         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3767                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3768                         continue;
3769                 list_del(&params->list);
3770                 kfree(params);
3771         }
3772
3773         BT_DBG("All LE disabled connection parameters were removed");
3774 }
3775
3776 /* This function requires the caller holds hdev->lock */
3777 void hci_conn_params_clear_all(struct hci_dev *hdev)
3778 {
3779         struct hci_conn_params *params, *tmp;
3780
3781         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3782                 hci_conn_params_free(params);
3783
3784         hci_update_background_scan(hdev);
3785
3786         BT_DBG("All LE connection parameters were removed");
3787 }
3788
3789 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3790 {
3791         if (status) {
3792                 BT_ERR("Failed to start inquiry: status %d", status);
3793
3794                 hci_dev_lock(hdev);
3795                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3796                 hci_dev_unlock(hdev);
3797                 return;
3798         }
3799 }
3800
3801 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3802 {
3803         /* General inquiry access code (GIAC) */
3804         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3805         struct hci_request req;
3806         struct hci_cp_inquiry cp;
3807         int err;
3808
3809         if (status) {
3810                 BT_ERR("Failed to disable LE scanning: status %d", status);
3811                 return;
3812         }
3813
3814         switch (hdev->discovery.type) {
3815         case DISCOV_TYPE_LE:
3816                 hci_dev_lock(hdev);
3817                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3818                 hci_dev_unlock(hdev);
3819                 break;
3820
3821         case DISCOV_TYPE_INTERLEAVED:
3822                 hci_req_init(&req, hdev);
3823
3824                 memset(&cp, 0, sizeof(cp));
3825                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3826                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3827                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3828
3829                 hci_dev_lock(hdev);
3830
3831                 hci_inquiry_cache_flush(hdev);
3832
3833                 err = hci_req_run(&req, inquiry_complete);
3834                 if (err) {
3835                         BT_ERR("Inquiry request failed: err %d", err);
3836                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3837                 }
3838
3839                 hci_dev_unlock(hdev);
3840                 break;
3841         }
3842 }
3843
3844 static void le_scan_disable_work(struct work_struct *work)
3845 {
3846         struct hci_dev *hdev = container_of(work, struct hci_dev,
3847                                             le_scan_disable.work);
3848         struct hci_request req;
3849         int err;
3850
3851         BT_DBG("%s", hdev->name);
3852
3853         hci_req_init(&req, hdev);
3854
3855         hci_req_add_le_scan_disable(&req);
3856
3857         err = hci_req_run(&req, le_scan_disable_work_complete);
3858         if (err)
3859                 BT_ERR("Disable LE scanning request failed: err %d", err);
3860 }
3861
3862 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3863 {
3864         struct hci_dev *hdev = req->hdev;
3865
3866         /* If we're advertising or initiating an LE connection we can't
3867          * go ahead and change the random address at this time. This is
3868          * because the eventual initiator address used for the
3869          * subsequently created connection will be undefined (some
3870          * controllers use the new address and others the one we had
3871          * when the operation started).
3872          *
3873          * In this kind of scenario skip the update and let the random
3874          * address be updated at the next cycle.
3875          */
3876         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3877             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3878                 BT_DBG("Deferring random address update");
3879                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
3880                 return;
3881         }
3882
3883         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3884 }
3885
3886 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3887                               u8 *own_addr_type)
3888 {
3889         struct hci_dev *hdev = req->hdev;
3890         int err;
3891
3892         /* If privacy is enabled use a resolvable private address. If
3893          * current RPA has expired or there is something else than
3894          * the current RPA in use, then generate a new one.
3895          */
3896         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3897                 int to;
3898
3899                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3900
3901                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3902                     !bacmp(&hdev->random_addr, &hdev->rpa))
3903                         return 0;
3904
3905                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
3906                 if (err < 0) {
3907                         BT_ERR("%s failed to generate new RPA", hdev->name);
3908                         return err;
3909                 }
3910
3911                 set_random_addr(req, &hdev->rpa);
3912
3913                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3914                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3915
3916                 return 0;
3917         }
3918
3919         /* In case of required privacy without resolvable private address,
3920          * use an unresolvable private address. This is useful for active
3921          * scanning and non-connectable advertising.
3922          */
3923         if (require_privacy) {
3924                 bdaddr_t urpa;
3925
3926                 get_random_bytes(&urpa, 6);
3927                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3928
3929                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3930                 set_random_addr(req, &urpa);
3931                 return 0;
3932         }
3933
3934         /* If forcing static address is in use or there is no public
3935          * address use the static address as random address (but skip
3936          * the HCI command if the current random address is already the
3937          * static one.
3938          */
3939         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3940             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3941                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3942                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3943                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3944                                     &hdev->static_addr);
3945                 return 0;
3946         }
3947
3948         /* Neither privacy nor static address is being used so use a
3949          * public address.
3950          */
3951         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3952
3953         return 0;
3954 }
3955
3956 /* Copy the Identity Address of the controller.
3957  *
3958  * If the controller has a public BD_ADDR, then by default use that one.
3959  * If this is a LE only controller without a public address, default to
3960  * the static random address.
3961  *
3962  * For debugging purposes it is possible to force controllers with a
3963  * public address to use the static random address instead.
3964  */
3965 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3966                                u8 *bdaddr_type)
3967 {
3968         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3969             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3970                 bacpy(bdaddr, &hdev->static_addr);
3971                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3972         } else {
3973                 bacpy(bdaddr, &hdev->bdaddr);
3974                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3975         }
3976 }
3977
3978 /* Alloc HCI device */
3979 struct hci_dev *hci_alloc_dev(void)
3980 {
3981         struct hci_dev *hdev;
3982
3983         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3984         if (!hdev)
3985                 return NULL;
3986
3987         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3988         hdev->esco_type = (ESCO_HV1);
3989         hdev->link_mode = (HCI_LM_ACCEPT);
3990         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3991         hdev->io_capability = 0x03;     /* No Input No Output */
3992         hdev->manufacturer = 0xffff;    /* Default to internal use */
3993         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3994         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3995
3996         hdev->sniff_max_interval = 800;
3997         hdev->sniff_min_interval = 80;
3998
3999         hdev->le_adv_channel_map = 0x07;
4000         hdev->le_adv_min_interval = 0x0800;
4001         hdev->le_adv_max_interval = 0x0800;
4002         hdev->le_scan_interval = 0x0060;
4003         hdev->le_scan_window = 0x0030;
4004         hdev->le_conn_min_interval = 0x0028;
4005         hdev->le_conn_max_interval = 0x0038;
4006         hdev->le_conn_latency = 0x0000;
4007         hdev->le_supv_timeout = 0x002a;
4008
4009         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
4010         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
4011         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4012         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
4013
4014         mutex_init(&hdev->lock);
4015         mutex_init(&hdev->req_lock);
4016
4017         INIT_LIST_HEAD(&hdev->mgmt_pending);
4018         INIT_LIST_HEAD(&hdev->blacklist);
4019         INIT_LIST_HEAD(&hdev->whitelist);
4020         INIT_LIST_HEAD(&hdev->uuids);
4021         INIT_LIST_HEAD(&hdev->link_keys);
4022         INIT_LIST_HEAD(&hdev->long_term_keys);
4023         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4024         INIT_LIST_HEAD(&hdev->remote_oob_data);
4025         INIT_LIST_HEAD(&hdev->le_white_list);
4026         INIT_LIST_HEAD(&hdev->le_conn_params);
4027         INIT_LIST_HEAD(&hdev->pend_le_conns);
4028         INIT_LIST_HEAD(&hdev->pend_le_reports);
4029         INIT_LIST_HEAD(&hdev->conn_hash.list);
4030
4031         INIT_WORK(&hdev->rx_work, hci_rx_work);
4032         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4033         INIT_WORK(&hdev->tx_work, hci_tx_work);
4034         INIT_WORK(&hdev->power_on, hci_power_on);
4035
4036         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4037         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4038         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4039
4040         skb_queue_head_init(&hdev->rx_q);
4041         skb_queue_head_init(&hdev->cmd_q);
4042         skb_queue_head_init(&hdev->raw_q);
4043
4044         init_waitqueue_head(&hdev->req_wait_q);
4045
4046         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4047
4048         hci_init_sysfs(hdev);
4049         discovery_init(hdev);
4050
4051         return hdev;
4052 }
4053 EXPORT_SYMBOL(hci_alloc_dev);
4054
4055 /* Free HCI device */
4056 void hci_free_dev(struct hci_dev *hdev)
4057 {
4058         /* will free via device release */
4059         put_device(&hdev->dev);
4060 }
4061 EXPORT_SYMBOL(hci_free_dev);
4062
4063 /* Register HCI device */
4064 int hci_register_dev(struct hci_dev *hdev)
4065 {
4066         int id, error;
4067
4068         if (!hdev->open || !hdev->close || !hdev->send)
4069                 return -EINVAL;
4070
4071         /* Do not allow HCI_AMP devices to register at index 0,
4072          * so the index can be used as the AMP controller ID.
4073          */
4074         switch (hdev->dev_type) {
4075         case HCI_BREDR:
4076                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4077                 break;
4078         case HCI_AMP:
4079                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4080                 break;
4081         default:
4082                 return -EINVAL;
4083         }
4084
4085         if (id < 0)
4086                 return id;
4087
4088         sprintf(hdev->name, "hci%d", id);
4089         hdev->id = id;
4090
4091         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4092
4093         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4094                                           WQ_MEM_RECLAIM, 1, hdev->name);
4095         if (!hdev->workqueue) {
4096                 error = -ENOMEM;
4097                 goto err;
4098         }
4099
4100         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4101                                               WQ_MEM_RECLAIM, 1, hdev->name);
4102         if (!hdev->req_workqueue) {
4103                 destroy_workqueue(hdev->workqueue);
4104                 error = -ENOMEM;
4105                 goto err;
4106         }
4107
4108         if (!IS_ERR_OR_NULL(bt_debugfs))
4109                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4110
4111         dev_set_name(&hdev->dev, "%s", hdev->name);
4112
4113         error = device_add(&hdev->dev);
4114         if (error < 0)
4115                 goto err_wqueue;
4116
4117         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4118                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4119                                     hdev);
4120         if (hdev->rfkill) {
4121                 if (rfkill_register(hdev->rfkill) < 0) {
4122                         rfkill_destroy(hdev->rfkill);
4123                         hdev->rfkill = NULL;
4124                 }
4125         }
4126
4127         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4128                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4129
4130         set_bit(HCI_SETUP, &hdev->dev_flags);
4131         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4132
4133         if (hdev->dev_type == HCI_BREDR) {
4134                 /* Assume BR/EDR support until proven otherwise (such as
4135                  * through reading supported features during init.
4136                  */
4137                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4138         }
4139
4140         write_lock(&hci_dev_list_lock);
4141         list_add(&hdev->list, &hci_dev_list);
4142         write_unlock(&hci_dev_list_lock);
4143
4144         /* Devices that are marked for raw-only usage are unconfigured
4145          * and should not be included in normal operation.
4146          */
4147         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4148                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4149
4150         hci_notify(hdev, HCI_DEV_REG);
4151         hci_dev_hold(hdev);
4152
4153         queue_work(hdev->req_workqueue, &hdev->power_on);
4154
4155         return id;
4156
4157 err_wqueue:
4158         destroy_workqueue(hdev->workqueue);
4159         destroy_workqueue(hdev->req_workqueue);
4160 err:
4161         ida_simple_remove(&hci_index_ida, hdev->id);
4162
4163         return error;
4164 }
4165 EXPORT_SYMBOL(hci_register_dev);
4166
4167 /* Unregister HCI device */
4168 void hci_unregister_dev(struct hci_dev *hdev)
4169 {
4170         int i, id;
4171
4172         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4173
4174         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4175
4176         id = hdev->id;
4177
4178         write_lock(&hci_dev_list_lock);
4179         list_del(&hdev->list);
4180         write_unlock(&hci_dev_list_lock);
4181
4182         hci_dev_do_close(hdev);
4183
4184         for (i = 0; i < NUM_REASSEMBLY; i++)
4185                 kfree_skb(hdev->reassembly[i]);
4186
4187         cancel_work_sync(&hdev->power_on);
4188
4189         if (!test_bit(HCI_INIT, &hdev->flags) &&
4190             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4191             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4192                 hci_dev_lock(hdev);
4193                 mgmt_index_removed(hdev);
4194                 hci_dev_unlock(hdev);
4195         }
4196
4197         /* mgmt_index_removed should take care of emptying the
4198          * pending list */
4199         BUG_ON(!list_empty(&hdev->mgmt_pending));
4200
4201         hci_notify(hdev, HCI_DEV_UNREG);
4202
4203         if (hdev->rfkill) {
4204                 rfkill_unregister(hdev->rfkill);
4205                 rfkill_destroy(hdev->rfkill);
4206         }
4207
4208         smp_unregister(hdev);
4209
4210         device_del(&hdev->dev);
4211
4212         debugfs_remove_recursive(hdev->debugfs);
4213
4214         destroy_workqueue(hdev->workqueue);
4215         destroy_workqueue(hdev->req_workqueue);
4216
4217         hci_dev_lock(hdev);
4218         hci_bdaddr_list_clear(&hdev->blacklist);
4219         hci_bdaddr_list_clear(&hdev->whitelist);
4220         hci_uuids_clear(hdev);
4221         hci_link_keys_clear(hdev);
4222         hci_smp_ltks_clear(hdev);
4223         hci_smp_irks_clear(hdev);
4224         hci_remote_oob_data_clear(hdev);
4225         hci_bdaddr_list_clear(&hdev->le_white_list);
4226         hci_conn_params_clear_all(hdev);
4227         hci_dev_unlock(hdev);
4228
4229         hci_dev_put(hdev);
4230
4231         ida_simple_remove(&hci_index_ida, id);
4232 }
4233 EXPORT_SYMBOL(hci_unregister_dev);
4234
4235 /* Suspend HCI device */
4236 int hci_suspend_dev(struct hci_dev *hdev)
4237 {
4238         hci_notify(hdev, HCI_DEV_SUSPEND);
4239         return 0;
4240 }
4241 EXPORT_SYMBOL(hci_suspend_dev);
4242
4243 /* Resume HCI device */
4244 int hci_resume_dev(struct hci_dev *hdev)
4245 {
4246         hci_notify(hdev, HCI_DEV_RESUME);
4247         return 0;
4248 }
4249 EXPORT_SYMBOL(hci_resume_dev);
4250
4251 /* Reset HCI device */
4252 int hci_reset_dev(struct hci_dev *hdev)
4253 {
4254         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4255         struct sk_buff *skb;
4256
4257         skb = bt_skb_alloc(3, GFP_ATOMIC);
4258         if (!skb)
4259                 return -ENOMEM;
4260
4261         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4262         memcpy(skb_put(skb, 3), hw_err, 3);
4263
4264         /* Send Hardware Error to upper stack */
4265         return hci_recv_frame(hdev, skb);
4266 }
4267 EXPORT_SYMBOL(hci_reset_dev);
4268
4269 /* Receive frame from HCI drivers */
4270 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4271 {
4272         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4273                       && !test_bit(HCI_INIT, &hdev->flags))) {
4274                 kfree_skb(skb);
4275                 return -ENXIO;
4276         }
4277
4278         /* Incoming skb */
4279         bt_cb(skb)->incoming = 1;
4280
4281         /* Time stamp */
4282         __net_timestamp(skb);
4283
4284         skb_queue_tail(&hdev->rx_q, skb);
4285         queue_work(hdev->workqueue, &hdev->rx_work);
4286
4287         return 0;
4288 }
4289 EXPORT_SYMBOL(hci_recv_frame);
4290
4291 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4292                           int count, __u8 index)
4293 {
4294         int len = 0;
4295         int hlen = 0;
4296         int remain = count;
4297         struct sk_buff *skb;
4298         struct bt_skb_cb *scb;
4299
4300         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4301             index >= NUM_REASSEMBLY)
4302                 return -EILSEQ;
4303
4304         skb = hdev->reassembly[index];
4305
4306         if (!skb) {
4307                 switch (type) {
4308                 case HCI_ACLDATA_PKT:
4309                         len = HCI_MAX_FRAME_SIZE;
4310                         hlen = HCI_ACL_HDR_SIZE;
4311                         break;
4312                 case HCI_EVENT_PKT:
4313                         len = HCI_MAX_EVENT_SIZE;
4314                         hlen = HCI_EVENT_HDR_SIZE;
4315                         break;
4316                 case HCI_SCODATA_PKT:
4317                         len = HCI_MAX_SCO_SIZE;
4318                         hlen = HCI_SCO_HDR_SIZE;
4319                         break;
4320                 }
4321
4322                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4323                 if (!skb)
4324                         return -ENOMEM;
4325
4326                 scb = (void *) skb->cb;
4327                 scb->expect = hlen;
4328                 scb->pkt_type = type;
4329
4330                 hdev->reassembly[index] = skb;
4331         }
4332
4333         while (count) {
4334                 scb = (void *) skb->cb;
4335                 len = min_t(uint, scb->expect, count);
4336
4337                 memcpy(skb_put(skb, len), data, len);
4338
4339                 count -= len;
4340                 data += len;
4341                 scb->expect -= len;
4342                 remain = count;
4343
4344                 switch (type) {
4345                 case HCI_EVENT_PKT:
4346                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4347                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4348                                 scb->expect = h->plen;
4349
4350                                 if (skb_tailroom(skb) < scb->expect) {
4351                                         kfree_skb(skb);
4352                                         hdev->reassembly[index] = NULL;
4353                                         return -ENOMEM;
4354                                 }
4355                         }
4356                         break;
4357
4358                 case HCI_ACLDATA_PKT:
4359                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4360                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4361                                 scb->expect = __le16_to_cpu(h->dlen);
4362
4363                                 if (skb_tailroom(skb) < scb->expect) {
4364                                         kfree_skb(skb);
4365                                         hdev->reassembly[index] = NULL;
4366                                         return -ENOMEM;
4367                                 }
4368                         }
4369                         break;
4370
4371                 case HCI_SCODATA_PKT:
4372                         if (skb->len == HCI_SCO_HDR_SIZE) {
4373                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4374                                 scb->expect = h->dlen;
4375
4376                                 if (skb_tailroom(skb) < scb->expect) {
4377                                         kfree_skb(skb);
4378                                         hdev->reassembly[index] = NULL;
4379                                         return -ENOMEM;
4380                                 }
4381                         }
4382                         break;
4383                 }
4384
4385                 if (scb->expect == 0) {
4386                         /* Complete frame */
4387
4388                         bt_cb(skb)->pkt_type = type;
4389                         hci_recv_frame(hdev, skb);
4390
4391                         hdev->reassembly[index] = NULL;
4392                         return remain;
4393                 }
4394         }
4395
4396         return remain;
4397 }
4398
4399 #define STREAM_REASSEMBLY 0
4400
4401 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4402 {
4403         int type;
4404         int rem = 0;
4405
4406         while (count) {
4407                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4408
4409                 if (!skb) {
4410                         struct { char type; } *pkt;
4411
4412                         /* Start of the frame */
4413                         pkt = data;
4414                         type = pkt->type;
4415
4416                         data++;
4417                         count--;
4418                 } else
4419                         type = bt_cb(skb)->pkt_type;
4420
4421                 rem = hci_reassembly(hdev, type, data, count,
4422                                      STREAM_REASSEMBLY);
4423                 if (rem < 0)
4424                         return rem;
4425
4426                 data += (count - rem);
4427                 count = rem;
4428         }
4429
4430         return rem;
4431 }
4432 EXPORT_SYMBOL(hci_recv_stream_fragment);
4433
4434 /* ---- Interface to upper protocols ---- */
4435
4436 int hci_register_cb(struct hci_cb *cb)
4437 {
4438         BT_DBG("%p name %s", cb, cb->name);
4439
4440         write_lock(&hci_cb_list_lock);
4441         list_add(&cb->list, &hci_cb_list);
4442         write_unlock(&hci_cb_list_lock);
4443
4444         return 0;
4445 }
4446 EXPORT_SYMBOL(hci_register_cb);
4447
4448 int hci_unregister_cb(struct hci_cb *cb)
4449 {
4450         BT_DBG("%p name %s", cb, cb->name);
4451
4452         write_lock(&hci_cb_list_lock);
4453         list_del(&cb->list);
4454         write_unlock(&hci_cb_list_lock);
4455
4456         return 0;
4457 }
4458 EXPORT_SYMBOL(hci_unregister_cb);
4459
4460 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4461 {
4462         int err;
4463
4464         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4465
4466         /* Time stamp */
4467         __net_timestamp(skb);
4468
4469         /* Send copy to monitor */
4470         hci_send_to_monitor(hdev, skb);
4471
4472         if (atomic_read(&hdev->promisc)) {
4473                 /* Send copy to the sockets */
4474                 hci_send_to_sock(hdev, skb);
4475         }
4476
4477         /* Get rid of skb owner, prior to sending to the driver. */
4478         skb_orphan(skb);
4479
4480         err = hdev->send(hdev, skb);
4481         if (err < 0) {
4482                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4483                 kfree_skb(skb);
4484         }
4485 }
4486
4487 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4488 {
4489         skb_queue_head_init(&req->cmd_q);
4490         req->hdev = hdev;
4491         req->err = 0;
4492 }
4493
4494 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4495 {
4496         struct hci_dev *hdev = req->hdev;
4497         struct sk_buff *skb;
4498         unsigned long flags;
4499
4500         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4501
4502         /* If an error occurred during request building, remove all HCI
4503          * commands queued on the HCI request queue.
4504          */
4505         if (req->err) {
4506                 skb_queue_purge(&req->cmd_q);
4507                 return req->err;
4508         }
4509
4510         /* Do not allow empty requests */
4511         if (skb_queue_empty(&req->cmd_q))
4512                 return -ENODATA;
4513
4514         skb = skb_peek_tail(&req->cmd_q);
4515         bt_cb(skb)->req.complete = complete;
4516
4517         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4518         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4519         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4520
4521         queue_work(hdev->workqueue, &hdev->cmd_work);
4522
4523         return 0;
4524 }
4525
4526 bool hci_req_pending(struct hci_dev *hdev)
4527 {
4528         return (hdev->req_status == HCI_REQ_PEND);
4529 }
4530
4531 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4532                                        u32 plen, const void *param)
4533 {
4534         int len = HCI_COMMAND_HDR_SIZE + plen;
4535         struct hci_command_hdr *hdr;
4536         struct sk_buff *skb;
4537
4538         skb = bt_skb_alloc(len, GFP_ATOMIC);
4539         if (!skb)
4540                 return NULL;
4541
4542         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4543         hdr->opcode = cpu_to_le16(opcode);
4544         hdr->plen   = plen;
4545
4546         if (plen)
4547                 memcpy(skb_put(skb, plen), param, plen);
4548
4549         BT_DBG("skb len %d", skb->len);
4550
4551         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4552         bt_cb(skb)->opcode = opcode;
4553
4554         return skb;
4555 }
4556
4557 /* Send HCI command */
4558 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4559                  const void *param)
4560 {
4561         struct sk_buff *skb;
4562
4563         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4564
4565         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4566         if (!skb) {
4567                 BT_ERR("%s no memory for command", hdev->name);
4568                 return -ENOMEM;
4569         }
4570
4571         /* Stand-alone HCI commands must be flagged as
4572          * single-command requests.
4573          */
4574         bt_cb(skb)->req.start = true;
4575
4576         skb_queue_tail(&hdev->cmd_q, skb);
4577         queue_work(hdev->workqueue, &hdev->cmd_work);
4578
4579         return 0;
4580 }
4581
4582 /* Queue a command to an asynchronous HCI request */
4583 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4584                     const void *param, u8 event)
4585 {
4586         struct hci_dev *hdev = req->hdev;
4587         struct sk_buff *skb;
4588
4589         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4590
4591         /* If an error occurred during request building, there is no point in
4592          * queueing the HCI command. We can simply return.
4593          */
4594         if (req->err)
4595                 return;
4596
4597         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4598         if (!skb) {
4599                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4600                        hdev->name, opcode);
4601                 req->err = -ENOMEM;
4602                 return;
4603         }
4604
4605         if (skb_queue_empty(&req->cmd_q))
4606                 bt_cb(skb)->req.start = true;
4607
4608         bt_cb(skb)->req.event = event;
4609
4610         skb_queue_tail(&req->cmd_q, skb);
4611 }
4612
4613 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4614                  const void *param)
4615 {
4616         hci_req_add_ev(req, opcode, plen, param, 0);
4617 }
4618
4619 /* Get data from the previously sent command */
4620 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4621 {
4622         struct hci_command_hdr *hdr;
4623
4624         if (!hdev->sent_cmd)
4625                 return NULL;
4626
4627         hdr = (void *) hdev->sent_cmd->data;
4628
4629         if (hdr->opcode != cpu_to_le16(opcode))
4630                 return NULL;
4631
4632         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4633
4634         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4635 }
4636
4637 /* Send ACL data */
4638 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4639 {
4640         struct hci_acl_hdr *hdr;
4641         int len = skb->len;
4642
4643         skb_push(skb, HCI_ACL_HDR_SIZE);
4644         skb_reset_transport_header(skb);
4645         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4646         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4647         hdr->dlen   = cpu_to_le16(len);
4648 }
4649
4650 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4651                           struct sk_buff *skb, __u16 flags)
4652 {
4653         struct hci_conn *conn = chan->conn;
4654         struct hci_dev *hdev = conn->hdev;
4655         struct sk_buff *list;
4656
4657         skb->len = skb_headlen(skb);
4658         skb->data_len = 0;
4659
4660         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4661
4662         switch (hdev->dev_type) {
4663         case HCI_BREDR:
4664                 hci_add_acl_hdr(skb, conn->handle, flags);
4665                 break;
4666         case HCI_AMP:
4667                 hci_add_acl_hdr(skb, chan->handle, flags);
4668                 break;
4669         default:
4670                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4671                 return;
4672         }
4673
4674         list = skb_shinfo(skb)->frag_list;
4675         if (!list) {
4676                 /* Non fragmented */
4677                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4678
4679                 skb_queue_tail(queue, skb);
4680         } else {
4681                 /* Fragmented */
4682                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4683
4684                 skb_shinfo(skb)->frag_list = NULL;
4685
4686                 /* Queue all fragments atomically. We need to use spin_lock_bh
4687                  * here because of 6LoWPAN links, as there this function is
4688                  * called from softirq and using normal spin lock could cause
4689                  * deadlocks.
4690                  */
4691                 spin_lock_bh(&queue->lock);
4692
4693                 __skb_queue_tail(queue, skb);
4694
4695                 flags &= ~ACL_START;
4696                 flags |= ACL_CONT;
4697                 do {
4698                         skb = list; list = list->next;
4699
4700                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4701                         hci_add_acl_hdr(skb, conn->handle, flags);
4702
4703                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4704
4705                         __skb_queue_tail(queue, skb);
4706                 } while (list);
4707
4708                 spin_unlock_bh(&queue->lock);
4709         }
4710 }
4711
4712 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4713 {
4714         struct hci_dev *hdev = chan->conn->hdev;
4715
4716         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4717
4718         hci_queue_acl(chan, &chan->data_q, skb, flags);
4719
4720         queue_work(hdev->workqueue, &hdev->tx_work);
4721 }
4722
4723 /* Send SCO data */
4724 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4725 {
4726         struct hci_dev *hdev = conn->hdev;
4727         struct hci_sco_hdr hdr;
4728
4729         BT_DBG("%s len %d", hdev->name, skb->len);
4730
4731         hdr.handle = cpu_to_le16(conn->handle);
4732         hdr.dlen   = skb->len;
4733
4734         skb_push(skb, HCI_SCO_HDR_SIZE);
4735         skb_reset_transport_header(skb);
4736         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4737
4738         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4739
4740         skb_queue_tail(&conn->data_q, skb);
4741         queue_work(hdev->workqueue, &hdev->tx_work);
4742 }
4743
4744 /* ---- HCI TX task (outgoing data) ---- */
4745
4746 /* HCI Connection scheduler */
4747 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4748                                      int *quote)
4749 {
4750         struct hci_conn_hash *h = &hdev->conn_hash;
4751         struct hci_conn *conn = NULL, *c;
4752         unsigned int num = 0, min = ~0;
4753
4754         /* We don't have to lock device here. Connections are always
4755          * added and removed with TX task disabled. */
4756
4757         rcu_read_lock();
4758
4759         list_for_each_entry_rcu(c, &h->list, list) {
4760                 if (c->type != type || skb_queue_empty(&c->data_q))
4761                         continue;
4762
4763                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4764                         continue;
4765
4766                 num++;
4767
4768                 if (c->sent < min) {
4769                         min  = c->sent;
4770                         conn = c;
4771                 }
4772
4773                 if (hci_conn_num(hdev, type) == num)
4774                         break;
4775         }
4776
4777         rcu_read_unlock();
4778
4779         if (conn) {
4780                 int cnt, q;
4781
4782                 switch (conn->type) {
4783                 case ACL_LINK:
4784                         cnt = hdev->acl_cnt;
4785                         break;
4786                 case SCO_LINK:
4787                 case ESCO_LINK:
4788                         cnt = hdev->sco_cnt;
4789                         break;
4790                 case LE_LINK:
4791                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4792                         break;
4793                 default:
4794                         cnt = 0;
4795                         BT_ERR("Unknown link type");
4796                 }
4797
4798                 q = cnt / num;
4799                 *quote = q ? q : 1;
4800         } else
4801                 *quote = 0;
4802
4803         BT_DBG("conn %p quote %d", conn, *quote);
4804         return conn;
4805 }
4806
4807 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4808 {
4809         struct hci_conn_hash *h = &hdev->conn_hash;
4810         struct hci_conn *c;
4811
4812         BT_ERR("%s link tx timeout", hdev->name);
4813
4814         rcu_read_lock();
4815
4816         /* Kill stalled connections */
4817         list_for_each_entry_rcu(c, &h->list, list) {
4818                 if (c->type == type && c->sent) {
4819                         BT_ERR("%s killing stalled connection %pMR",
4820                                hdev->name, &c->dst);
4821                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4822                 }
4823         }
4824
4825         rcu_read_unlock();
4826 }
4827
4828 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4829                                       int *quote)
4830 {
4831         struct hci_conn_hash *h = &hdev->conn_hash;
4832         struct hci_chan *chan = NULL;
4833         unsigned int num = 0, min = ~0, cur_prio = 0;
4834         struct hci_conn *conn;
4835         int cnt, q, conn_num = 0;
4836
4837         BT_DBG("%s", hdev->name);
4838
4839         rcu_read_lock();
4840
4841         list_for_each_entry_rcu(conn, &h->list, list) {
4842                 struct hci_chan *tmp;
4843
4844                 if (conn->type != type)
4845                         continue;
4846
4847                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4848                         continue;
4849
4850                 conn_num++;
4851
4852                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4853                         struct sk_buff *skb;
4854
4855                         if (skb_queue_empty(&tmp->data_q))
4856                                 continue;
4857
4858                         skb = skb_peek(&tmp->data_q);
4859                         if (skb->priority < cur_prio)
4860                                 continue;
4861
4862                         if (skb->priority > cur_prio) {
4863                                 num = 0;
4864                                 min = ~0;
4865                                 cur_prio = skb->priority;
4866                         }
4867
4868                         num++;
4869
4870                         if (conn->sent < min) {
4871                                 min  = conn->sent;
4872                                 chan = tmp;
4873                         }
4874                 }
4875
4876                 if (hci_conn_num(hdev, type) == conn_num)
4877                         break;
4878         }
4879
4880         rcu_read_unlock();
4881
4882         if (!chan)
4883                 return NULL;
4884
4885         switch (chan->conn->type) {
4886         case ACL_LINK:
4887                 cnt = hdev->acl_cnt;
4888                 break;
4889         case AMP_LINK:
4890                 cnt = hdev->block_cnt;
4891                 break;
4892         case SCO_LINK:
4893         case ESCO_LINK:
4894                 cnt = hdev->sco_cnt;
4895                 break;
4896         case LE_LINK:
4897                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4898                 break;
4899         default:
4900                 cnt = 0;
4901                 BT_ERR("Unknown link type");
4902         }
4903
4904         q = cnt / num;
4905         *quote = q ? q : 1;
4906         BT_DBG("chan %p quote %d", chan, *quote);
4907         return chan;
4908 }
4909
4910 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4911 {
4912         struct hci_conn_hash *h = &hdev->conn_hash;
4913         struct hci_conn *conn;
4914         int num = 0;
4915
4916         BT_DBG("%s", hdev->name);
4917
4918         rcu_read_lock();
4919
4920         list_for_each_entry_rcu(conn, &h->list, list) {
4921                 struct hci_chan *chan;
4922
4923                 if (conn->type != type)
4924                         continue;
4925
4926                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4927                         continue;
4928
4929                 num++;
4930
4931                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4932                         struct sk_buff *skb;
4933
4934                         if (chan->sent) {
4935                                 chan->sent = 0;
4936                                 continue;
4937                         }
4938
4939                         if (skb_queue_empty(&chan->data_q))
4940                                 continue;
4941
4942                         skb = skb_peek(&chan->data_q);
4943                         if (skb->priority >= HCI_PRIO_MAX - 1)
4944                                 continue;
4945
4946                         skb->priority = HCI_PRIO_MAX - 1;
4947
4948                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4949                                skb->priority);
4950                 }
4951
4952                 if (hci_conn_num(hdev, type) == num)
4953                         break;
4954         }
4955
4956         rcu_read_unlock();
4957
4958 }
4959
4960 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4961 {
4962         /* Calculate count of blocks used by this packet */
4963         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4964 }
4965
4966 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4967 {
4968         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4969                 /* ACL tx timeout must be longer than maximum
4970                  * link supervision timeout (40.9 seconds) */
4971                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4972                                        HCI_ACL_TX_TIMEOUT))
4973                         hci_link_tx_to(hdev, ACL_LINK);
4974         }
4975 }
4976
4977 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4978 {
4979         unsigned int cnt = hdev->acl_cnt;
4980         struct hci_chan *chan;
4981         struct sk_buff *skb;
4982         int quote;
4983
4984         __check_timeout(hdev, cnt);
4985
4986         while (hdev->acl_cnt &&
4987                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4988                 u32 priority = (skb_peek(&chan->data_q))->priority;
4989                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4990                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4991                                skb->len, skb->priority);
4992
4993                         /* Stop if priority has changed */
4994                         if (skb->priority < priority)
4995                                 break;
4996
4997                         skb = skb_dequeue(&chan->data_q);
4998
4999                         hci_conn_enter_active_mode(chan->conn,
5000                                                    bt_cb(skb)->force_active);
5001
5002                         hci_send_frame(hdev, skb);
5003                         hdev->acl_last_tx = jiffies;
5004
5005                         hdev->acl_cnt--;
5006                         chan->sent++;
5007                         chan->conn->sent++;
5008                 }
5009         }
5010
5011         if (cnt != hdev->acl_cnt)
5012                 hci_prio_recalculate(hdev, ACL_LINK);
5013 }
5014
5015 static void hci_sched_acl_blk(struct hci_dev *hdev)
5016 {
5017         unsigned int cnt = hdev->block_cnt;
5018         struct hci_chan *chan;
5019         struct sk_buff *skb;
5020         int quote;
5021         u8 type;
5022
5023         __check_timeout(hdev, cnt);
5024
5025         BT_DBG("%s", hdev->name);
5026
5027         if (hdev->dev_type == HCI_AMP)
5028                 type = AMP_LINK;
5029         else
5030                 type = ACL_LINK;
5031
5032         while (hdev->block_cnt > 0 &&
5033                (chan = hci_chan_sent(hdev, type, &quote))) {
5034                 u32 priority = (skb_peek(&chan->data_q))->priority;
5035                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5036                         int blocks;
5037
5038                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5039                                skb->len, skb->priority);
5040
5041                         /* Stop if priority has changed */
5042                         if (skb->priority < priority)
5043                                 break;
5044
5045                         skb = skb_dequeue(&chan->data_q);
5046
5047                         blocks = __get_blocks(hdev, skb);
5048                         if (blocks > hdev->block_cnt)
5049                                 return;
5050
5051                         hci_conn_enter_active_mode(chan->conn,
5052                                                    bt_cb(skb)->force_active);
5053
5054                         hci_send_frame(hdev, skb);
5055                         hdev->acl_last_tx = jiffies;
5056
5057                         hdev->block_cnt -= blocks;
5058                         quote -= blocks;
5059
5060                         chan->sent += blocks;
5061                         chan->conn->sent += blocks;
5062                 }
5063         }
5064
5065         if (cnt != hdev->block_cnt)
5066                 hci_prio_recalculate(hdev, type);
5067 }
5068
5069 static void hci_sched_acl(struct hci_dev *hdev)
5070 {
5071         BT_DBG("%s", hdev->name);
5072
5073         /* No ACL link over BR/EDR controller */
5074         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5075                 return;
5076
5077         /* No AMP link over AMP controller */
5078         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5079                 return;
5080
5081         switch (hdev->flow_ctl_mode) {
5082         case HCI_FLOW_CTL_MODE_PACKET_BASED:
5083                 hci_sched_acl_pkt(hdev);
5084                 break;
5085
5086         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5087                 hci_sched_acl_blk(hdev);
5088                 break;
5089         }
5090 }
5091
5092 /* Schedule SCO */
5093 static void hci_sched_sco(struct hci_dev *hdev)
5094 {
5095         struct hci_conn *conn;
5096         struct sk_buff *skb;
5097         int quote;
5098
5099         BT_DBG("%s", hdev->name);
5100
5101         if (!hci_conn_num(hdev, SCO_LINK))
5102                 return;
5103
5104         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5105                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5106                         BT_DBG("skb %p len %d", skb, skb->len);
5107                         hci_send_frame(hdev, skb);
5108
5109                         conn->sent++;
5110                         if (conn->sent == ~0)
5111                                 conn->sent = 0;
5112                 }
5113         }
5114 }
5115
5116 static void hci_sched_esco(struct hci_dev *hdev)
5117 {
5118         struct hci_conn *conn;
5119         struct sk_buff *skb;
5120         int quote;
5121
5122         BT_DBG("%s", hdev->name);
5123
5124         if (!hci_conn_num(hdev, ESCO_LINK))
5125                 return;
5126
5127         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5128                                                      &quote))) {
5129                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5130                         BT_DBG("skb %p len %d", skb, skb->len);
5131                         hci_send_frame(hdev, skb);
5132
5133                         conn->sent++;
5134                         if (conn->sent == ~0)
5135                                 conn->sent = 0;
5136                 }
5137         }
5138 }
5139
5140 static void hci_sched_le(struct hci_dev *hdev)
5141 {
5142         struct hci_chan *chan;
5143         struct sk_buff *skb;
5144         int quote, cnt, tmp;
5145
5146         BT_DBG("%s", hdev->name);
5147
5148         if (!hci_conn_num(hdev, LE_LINK))
5149                 return;
5150
5151         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5152                 /* LE tx timeout must be longer than maximum
5153                  * link supervision timeout (40.9 seconds) */
5154                 if (!hdev->le_cnt && hdev->le_pkts &&
5155                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5156                         hci_link_tx_to(hdev, LE_LINK);
5157         }
5158
5159         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5160         tmp = cnt;
5161         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5162                 u32 priority = (skb_peek(&chan->data_q))->priority;
5163                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5164                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5165                                skb->len, skb->priority);
5166
5167                         /* Stop if priority has changed */
5168                         if (skb->priority < priority)
5169                                 break;
5170
5171                         skb = skb_dequeue(&chan->data_q);
5172
5173                         hci_send_frame(hdev, skb);
5174                         hdev->le_last_tx = jiffies;
5175
5176                         cnt--;
5177                         chan->sent++;
5178                         chan->conn->sent++;
5179                 }
5180         }
5181
5182         if (hdev->le_pkts)
5183                 hdev->le_cnt = cnt;
5184         else
5185                 hdev->acl_cnt = cnt;
5186
5187         if (cnt != tmp)
5188                 hci_prio_recalculate(hdev, LE_LINK);
5189 }
5190
5191 static void hci_tx_work(struct work_struct *work)
5192 {
5193         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5194         struct sk_buff *skb;
5195
5196         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5197                hdev->sco_cnt, hdev->le_cnt);
5198
5199         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5200                 /* Schedule queues and send stuff to HCI driver */
5201                 hci_sched_acl(hdev);
5202                 hci_sched_sco(hdev);
5203                 hci_sched_esco(hdev);
5204                 hci_sched_le(hdev);
5205         }
5206
5207         /* Send next queued raw (unknown type) packet */
5208         while ((skb = skb_dequeue(&hdev->raw_q)))
5209                 hci_send_frame(hdev, skb);
5210 }
5211
5212 /* ----- HCI RX task (incoming data processing) ----- */
5213
5214 /* ACL data packet */
5215 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5216 {
5217         struct hci_acl_hdr *hdr = (void *) skb->data;
5218         struct hci_conn *conn;
5219         __u16 handle, flags;
5220
5221         skb_pull(skb, HCI_ACL_HDR_SIZE);
5222
5223         handle = __le16_to_cpu(hdr->handle);
5224         flags  = hci_flags(handle);
5225         handle = hci_handle(handle);
5226
5227         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5228                handle, flags);
5229
5230         hdev->stat.acl_rx++;
5231
5232         hci_dev_lock(hdev);
5233         conn = hci_conn_hash_lookup_handle(hdev, handle);
5234         hci_dev_unlock(hdev);
5235
5236         if (conn) {
5237                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5238
5239                 /* Send to upper protocol */
5240                 l2cap_recv_acldata(conn, skb, flags);
5241                 return;
5242         } else {
5243                 BT_ERR("%s ACL packet for unknown connection handle %d",
5244                        hdev->name, handle);
5245         }
5246
5247         kfree_skb(skb);
5248 }
5249
5250 /* SCO data packet */
5251 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5252 {
5253         struct hci_sco_hdr *hdr = (void *) skb->data;
5254         struct hci_conn *conn;
5255         __u16 handle;
5256
5257         skb_pull(skb, HCI_SCO_HDR_SIZE);
5258
5259         handle = __le16_to_cpu(hdr->handle);
5260
5261         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5262
5263         hdev->stat.sco_rx++;
5264
5265         hci_dev_lock(hdev);
5266         conn = hci_conn_hash_lookup_handle(hdev, handle);
5267         hci_dev_unlock(hdev);
5268
5269         if (conn) {
5270                 /* Send to upper protocol */
5271                 sco_recv_scodata(conn, skb);
5272                 return;
5273         } else {
5274                 BT_ERR("%s SCO packet for unknown connection handle %d",
5275                        hdev->name, handle);
5276         }
5277
5278         kfree_skb(skb);
5279 }
5280
5281 static bool hci_req_is_complete(struct hci_dev *hdev)
5282 {
5283         struct sk_buff *skb;
5284
5285         skb = skb_peek(&hdev->cmd_q);
5286         if (!skb)
5287                 return true;
5288
5289         return bt_cb(skb)->req.start;
5290 }
5291
5292 static void hci_resend_last(struct hci_dev *hdev)
5293 {
5294         struct hci_command_hdr *sent;
5295         struct sk_buff *skb;
5296         u16 opcode;
5297
5298         if (!hdev->sent_cmd)
5299                 return;
5300
5301         sent = (void *) hdev->sent_cmd->data;
5302         opcode = __le16_to_cpu(sent->opcode);
5303         if (opcode == HCI_OP_RESET)
5304                 return;
5305
5306         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5307         if (!skb)
5308                 return;
5309
5310         skb_queue_head(&hdev->cmd_q, skb);
5311         queue_work(hdev->workqueue, &hdev->cmd_work);
5312 }
5313
5314 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5315 {
5316         hci_req_complete_t req_complete = NULL;
5317         struct sk_buff *skb;
5318         unsigned long flags;
5319
5320         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5321
5322         /* If the completed command doesn't match the last one that was
5323          * sent we need to do special handling of it.
5324          */
5325         if (!hci_sent_cmd_data(hdev, opcode)) {
5326                 /* Some CSR based controllers generate a spontaneous
5327                  * reset complete event during init and any pending
5328                  * command will never be completed. In such a case we
5329                  * need to resend whatever was the last sent
5330                  * command.
5331                  */
5332                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5333                         hci_resend_last(hdev);
5334
5335                 return;
5336         }
5337
5338         /* If the command succeeded and there's still more commands in
5339          * this request the request is not yet complete.
5340          */
5341         if (!status && !hci_req_is_complete(hdev))
5342                 return;
5343
5344         /* If this was the last command in a request the complete
5345          * callback would be found in hdev->sent_cmd instead of the
5346          * command queue (hdev->cmd_q).
5347          */
5348         if (hdev->sent_cmd) {
5349                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5350
5351                 if (req_complete) {
5352                         /* We must set the complete callback to NULL to
5353                          * avoid calling the callback more than once if
5354                          * this function gets called again.
5355                          */
5356                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5357
5358                         goto call_complete;
5359                 }
5360         }
5361
5362         /* Remove all pending commands belonging to this request */
5363         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5364         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5365                 if (bt_cb(skb)->req.start) {
5366                         __skb_queue_head(&hdev->cmd_q, skb);
5367                         break;
5368                 }
5369
5370                 req_complete = bt_cb(skb)->req.complete;
5371                 kfree_skb(skb);
5372         }
5373         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5374
5375 call_complete:
5376         if (req_complete)
5377                 req_complete(hdev, status);
5378 }
5379
5380 static void hci_rx_work(struct work_struct *work)
5381 {
5382         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5383         struct sk_buff *skb;
5384
5385         BT_DBG("%s", hdev->name);
5386
5387         while ((skb = skb_dequeue(&hdev->rx_q))) {
5388                 /* Send copy to monitor */
5389                 hci_send_to_monitor(hdev, skb);
5390
5391                 if (atomic_read(&hdev->promisc)) {
5392                         /* Send copy to the sockets */
5393                         hci_send_to_sock(hdev, skb);
5394                 }
5395
5396                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5397                         kfree_skb(skb);
5398                         continue;
5399                 }
5400
5401                 if (test_bit(HCI_INIT, &hdev->flags)) {
5402                         /* Don't process data packets in this states. */
5403                         switch (bt_cb(skb)->pkt_type) {
5404                         case HCI_ACLDATA_PKT:
5405                         case HCI_SCODATA_PKT:
5406                                 kfree_skb(skb);
5407                                 continue;
5408                         }
5409                 }
5410
5411                 /* Process frame */
5412                 switch (bt_cb(skb)->pkt_type) {
5413                 case HCI_EVENT_PKT:
5414                         BT_DBG("%s Event packet", hdev->name);
5415                         hci_event_packet(hdev, skb);
5416                         break;
5417
5418                 case HCI_ACLDATA_PKT:
5419                         BT_DBG("%s ACL data packet", hdev->name);
5420                         hci_acldata_packet(hdev, skb);
5421                         break;
5422
5423                 case HCI_SCODATA_PKT:
5424                         BT_DBG("%s SCO data packet", hdev->name);
5425                         hci_scodata_packet(hdev, skb);
5426                         break;
5427
5428                 default:
5429                         kfree_skb(skb);
5430                         break;
5431                 }
5432         }
5433 }
5434
5435 static void hci_cmd_work(struct work_struct *work)
5436 {
5437         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5438         struct sk_buff *skb;
5439
5440         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5441                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5442
5443         /* Send queued commands */
5444         if (atomic_read(&hdev->cmd_cnt)) {
5445                 skb = skb_dequeue(&hdev->cmd_q);
5446                 if (!skb)
5447                         return;
5448
5449                 kfree_skb(hdev->sent_cmd);
5450
5451                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5452                 if (hdev->sent_cmd) {
5453                         atomic_dec(&hdev->cmd_cnt);
5454                         hci_send_frame(hdev, skb);
5455                         if (test_bit(HCI_RESET, &hdev->flags))
5456                                 cancel_delayed_work(&hdev->cmd_timer);
5457                         else
5458                                 schedule_delayed_work(&hdev->cmd_timer,
5459                                                       HCI_CMD_TIMEOUT);
5460                 } else {
5461                         skb_queue_head(&hdev->cmd_q, skb);
5462                         queue_work(hdev->workqueue, &hdev->cmd_work);
5463                 }
5464         }
5465 }
5466
5467 void hci_req_add_le_scan_disable(struct hci_request *req)
5468 {
5469         struct hci_cp_le_set_scan_enable cp;
5470
5471         memset(&cp, 0, sizeof(cp));
5472         cp.enable = LE_SCAN_DISABLE;
5473         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5474 }
5475
5476 static void add_to_white_list(struct hci_request *req,
5477                               struct hci_conn_params *params)
5478 {
5479         struct hci_cp_le_add_to_white_list cp;
5480
5481         cp.bdaddr_type = params->addr_type;
5482         bacpy(&cp.bdaddr, &params->addr);
5483
5484         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5485 }
5486
5487 static u8 update_white_list(struct hci_request *req)
5488 {
5489         struct hci_dev *hdev = req->hdev;
5490         struct hci_conn_params *params;
5491         struct bdaddr_list *b;
5492         uint8_t white_list_entries = 0;
5493
5494         /* Go through the current white list programmed into the
5495          * controller one by one and check if that address is still
5496          * in the list of pending connections or list of devices to
5497          * report. If not present in either list, then queue the
5498          * command to remove it from the controller.
5499          */
5500         list_for_each_entry(b, &hdev->le_white_list, list) {
5501                 struct hci_cp_le_del_from_white_list cp;
5502
5503                 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5504                                               &b->bdaddr, b->bdaddr_type) ||
5505                     hci_pend_le_action_lookup(&hdev->pend_le_reports,
5506                                               &b->bdaddr, b->bdaddr_type)) {
5507                         white_list_entries++;
5508                         continue;
5509                 }
5510
5511                 cp.bdaddr_type = b->bdaddr_type;
5512                 bacpy(&cp.bdaddr, &b->bdaddr);
5513
5514                 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5515                             sizeof(cp), &cp);
5516         }
5517
5518         /* Since all no longer valid white list entries have been
5519          * removed, walk through the list of pending connections
5520          * and ensure that any new device gets programmed into
5521          * the controller.
5522          *
5523          * If the list of the devices is larger than the list of
5524          * available white list entries in the controller, then
5525          * just abort and return filer policy value to not use the
5526          * white list.
5527          */
5528         list_for_each_entry(params, &hdev->pend_le_conns, action) {
5529                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5530                                            &params->addr, params->addr_type))
5531                         continue;
5532
5533                 if (white_list_entries >= hdev->le_white_list_size) {
5534                         /* Select filter policy to accept all advertising */
5535                         return 0x00;
5536                 }
5537
5538                 if (hci_find_irk_by_addr(hdev, &params->addr,
5539                                          params->addr_type)) {
5540                         /* White list can not be used with RPAs */
5541                         return 0x00;
5542                 }
5543
5544                 white_list_entries++;
5545                 add_to_white_list(req, params);
5546         }
5547
5548         /* After adding all new pending connections, walk through
5549          * the list of pending reports and also add these to the
5550          * white list if there is still space.
5551          */
5552         list_for_each_entry(params, &hdev->pend_le_reports, action) {
5553                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5554                                            &params->addr, params->addr_type))
5555                         continue;
5556
5557                 if (white_list_entries >= hdev->le_white_list_size) {
5558                         /* Select filter policy to accept all advertising */
5559                         return 0x00;
5560                 }
5561
5562                 if (hci_find_irk_by_addr(hdev, &params->addr,
5563                                          params->addr_type)) {
5564                         /* White list can not be used with RPAs */
5565                         return 0x00;
5566                 }
5567
5568                 white_list_entries++;
5569                 add_to_white_list(req, params);
5570         }
5571
5572         /* Select filter policy to use white list */
5573         return 0x01;
5574 }
5575
5576 void hci_req_add_le_passive_scan(struct hci_request *req)
5577 {
5578         struct hci_cp_le_set_scan_param param_cp;
5579         struct hci_cp_le_set_scan_enable enable_cp;
5580         struct hci_dev *hdev = req->hdev;
5581         u8 own_addr_type;
5582         u8 filter_policy;
5583
5584         /* Set require_privacy to false since no SCAN_REQ are send
5585          * during passive scanning. Not using an unresolvable address
5586          * here is important so that peer devices using direct
5587          * advertising with our address will be correctly reported
5588          * by the controller.
5589          */
5590         if (hci_update_random_address(req, false, &own_addr_type))
5591                 return;
5592
5593         /* Adding or removing entries from the white list must
5594          * happen before enabling scanning. The controller does
5595          * not allow white list modification while scanning.
5596          */
5597         filter_policy = update_white_list(req);
5598
5599         memset(&param_cp, 0, sizeof(param_cp));
5600         param_cp.type = LE_SCAN_PASSIVE;
5601         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5602         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5603         param_cp.own_address_type = own_addr_type;
5604         param_cp.filter_policy = filter_policy;
5605         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5606                     &param_cp);
5607
5608         memset(&enable_cp, 0, sizeof(enable_cp));
5609         enable_cp.enable = LE_SCAN_ENABLE;
5610         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5611         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5612                     &enable_cp);
5613 }
5614
5615 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5616 {
5617         if (status)
5618                 BT_DBG("HCI request failed to update background scanning: "
5619                        "status 0x%2.2x", status);
5620 }
5621
5622 /* This function controls the background scanning based on hdev->pend_le_conns
5623  * list. If there are pending LE connection we start the background scanning,
5624  * otherwise we stop it.
5625  *
5626  * This function requires the caller holds hdev->lock.
5627  */
5628 void hci_update_background_scan(struct hci_dev *hdev)
5629 {
5630         struct hci_request req;
5631         struct hci_conn *conn;
5632         int err;
5633
5634         if (!test_bit(HCI_UP, &hdev->flags) ||
5635             test_bit(HCI_INIT, &hdev->flags) ||
5636             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5637             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5638             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5639             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5640                 return;
5641
5642         /* No point in doing scanning if LE support hasn't been enabled */
5643         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5644                 return;
5645
5646         /* If discovery is active don't interfere with it */
5647         if (hdev->discovery.state != DISCOVERY_STOPPED)
5648                 return;
5649
5650         hci_req_init(&req, hdev);
5651
5652         if (list_empty(&hdev->pend_le_conns) &&
5653             list_empty(&hdev->pend_le_reports)) {
5654                 /* If there is no pending LE connections or devices
5655                  * to be scanned for, we should stop the background
5656                  * scanning.
5657                  */
5658
5659                 /* If controller is not scanning we are done. */
5660                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5661                         return;
5662
5663                 hci_req_add_le_scan_disable(&req);
5664
5665                 BT_DBG("%s stopping background scanning", hdev->name);
5666         } else {
5667                 /* If there is at least one pending LE connection, we should
5668                  * keep the background scan running.
5669                  */
5670
5671                 /* If controller is connecting, we should not start scanning
5672                  * since some controllers are not able to scan and connect at
5673                  * the same time.
5674                  */
5675                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5676                 if (conn)
5677                         return;
5678
5679                 /* If controller is currently scanning, we stop it to ensure we
5680                  * don't miss any advertising (due to duplicates filter).
5681                  */
5682                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5683                         hci_req_add_le_scan_disable(&req);
5684
5685                 hci_req_add_le_passive_scan(&req);
5686
5687                 BT_DBG("%s starting background scanning", hdev->name);
5688         }
5689
5690         err = hci_req_run(&req, update_background_scan_complete);
5691         if (err)
5692                 BT_ERR("Failed to run HCI request: err %d", err);
5693 }
5694
5695 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5696 {
5697         struct bdaddr_list *b;
5698
5699         list_for_each_entry(b, &hdev->whitelist, list) {
5700                 struct hci_conn *conn;
5701
5702                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5703                 if (!conn)
5704                         return true;
5705
5706                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5707                         return true;
5708         }
5709
5710         return false;
5711 }
5712
5713 void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5714 {
5715         u8 scan;
5716
5717         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5718                 return;
5719
5720         if (!hdev_is_powered(hdev))
5721                 return;
5722
5723         if (mgmt_powering_down(hdev))
5724                 return;
5725
5726         if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
5727             disconnected_whitelist_entries(hdev))
5728                 scan = SCAN_PAGE;
5729         else
5730                 scan = SCAN_DISABLED;
5731
5732         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5733                 return;
5734
5735         if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5736                 scan |= SCAN_INQUIRY;
5737
5738         if (req)
5739                 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5740         else
5741                 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5742 }