e42824e8275897f506623cce26eec39cf058530c
[linux-2.6-block.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65
66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
67 {
68         __u8 scan = opt;
69
70         BT_DBG("%s %x", req->hdev->name, scan);
71
72         /* Inquiry and Page scans */
73         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
74         return 0;
75 }
76
77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
78 {
79         __u8 auth = opt;
80
81         BT_DBG("%s %x", req->hdev->name, auth);
82
83         /* Authentication */
84         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
85         return 0;
86 }
87
88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
89 {
90         __u8 encrypt = opt;
91
92         BT_DBG("%s %x", req->hdev->name, encrypt);
93
94         /* Encryption */
95         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
96         return 0;
97 }
98
99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
100 {
101         __le16 policy = cpu_to_le16(opt);
102
103         BT_DBG("%s %x", req->hdev->name, policy);
104
105         /* Default link policy */
106         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
107         return 0;
108 }
109
110 /* Get HCI device by index.
111  * Device is held on return. */
112 struct hci_dev *hci_dev_get(int index)
113 {
114         struct hci_dev *hdev = NULL, *d;
115
116         BT_DBG("%d", index);
117
118         if (index < 0)
119                 return NULL;
120
121         read_lock(&hci_dev_list_lock);
122         list_for_each_entry(d, &hci_dev_list, list) {
123                 if (d->id == index) {
124                         hdev = hci_dev_hold(d);
125                         break;
126                 }
127         }
128         read_unlock(&hci_dev_list_lock);
129         return hdev;
130 }
131
132 /* ---- Inquiry support ---- */
133
134 bool hci_discovery_active(struct hci_dev *hdev)
135 {
136         struct discovery_state *discov = &hdev->discovery;
137
138         switch (discov->state) {
139         case DISCOVERY_FINDING:
140         case DISCOVERY_RESOLVING:
141                 return true;
142
143         default:
144                 return false;
145         }
146 }
147
148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
149 {
150         int old_state = hdev->discovery.state;
151
152         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
153
154         if (old_state == state)
155                 return;
156
157         hdev->discovery.state = state;
158
159         switch (state) {
160         case DISCOVERY_STOPPED:
161                 hci_update_passive_scan(hdev);
162
163                 if (old_state != DISCOVERY_STARTING)
164                         mgmt_discovering(hdev, 0);
165                 break;
166         case DISCOVERY_STARTING:
167                 break;
168         case DISCOVERY_FINDING:
169                 mgmt_discovering(hdev, 1);
170                 break;
171         case DISCOVERY_RESOLVING:
172                 break;
173         case DISCOVERY_STOPPING:
174                 break;
175         }
176 }
177
178 void hci_inquiry_cache_flush(struct hci_dev *hdev)
179 {
180         struct discovery_state *cache = &hdev->discovery;
181         struct inquiry_entry *p, *n;
182
183         list_for_each_entry_safe(p, n, &cache->all, all) {
184                 list_del(&p->all);
185                 kfree(p);
186         }
187
188         INIT_LIST_HEAD(&cache->unknown);
189         INIT_LIST_HEAD(&cache->resolve);
190 }
191
192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
193                                                bdaddr_t *bdaddr)
194 {
195         struct discovery_state *cache = &hdev->discovery;
196         struct inquiry_entry *e;
197
198         BT_DBG("cache %p, %pMR", cache, bdaddr);
199
200         list_for_each_entry(e, &cache->all, all) {
201                 if (!bacmp(&e->data.bdaddr, bdaddr))
202                         return e;
203         }
204
205         return NULL;
206 }
207
208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
209                                                        bdaddr_t *bdaddr)
210 {
211         struct discovery_state *cache = &hdev->discovery;
212         struct inquiry_entry *e;
213
214         BT_DBG("cache %p, %pMR", cache, bdaddr);
215
216         list_for_each_entry(e, &cache->unknown, list) {
217                 if (!bacmp(&e->data.bdaddr, bdaddr))
218                         return e;
219         }
220
221         return NULL;
222 }
223
224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
225                                                        bdaddr_t *bdaddr,
226                                                        int state)
227 {
228         struct discovery_state *cache = &hdev->discovery;
229         struct inquiry_entry *e;
230
231         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
232
233         list_for_each_entry(e, &cache->resolve, list) {
234                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
235                         return e;
236                 if (!bacmp(&e->data.bdaddr, bdaddr))
237                         return e;
238         }
239
240         return NULL;
241 }
242
243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
244                                       struct inquiry_entry *ie)
245 {
246         struct discovery_state *cache = &hdev->discovery;
247         struct list_head *pos = &cache->resolve;
248         struct inquiry_entry *p;
249
250         list_del(&ie->list);
251
252         list_for_each_entry(p, &cache->resolve, list) {
253                 if (p->name_state != NAME_PENDING &&
254                     abs(p->data.rssi) >= abs(ie->data.rssi))
255                         break;
256                 pos = &p->list;
257         }
258
259         list_add(&ie->list, pos);
260 }
261
262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
263                              bool name_known)
264 {
265         struct discovery_state *cache = &hdev->discovery;
266         struct inquiry_entry *ie;
267         u32 flags = 0;
268
269         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
270
271         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
272
273         if (!data->ssp_mode)
274                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
275
276         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
277         if (ie) {
278                 if (!ie->data.ssp_mode)
279                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
280
281                 if (ie->name_state == NAME_NEEDED &&
282                     data->rssi != ie->data.rssi) {
283                         ie->data.rssi = data->rssi;
284                         hci_inquiry_cache_update_resolve(hdev, ie);
285                 }
286
287                 goto update;
288         }
289
290         /* Entry not in the cache. Add new one. */
291         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
292         if (!ie) {
293                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
294                 goto done;
295         }
296
297         list_add(&ie->all, &cache->all);
298
299         if (name_known) {
300                 ie->name_state = NAME_KNOWN;
301         } else {
302                 ie->name_state = NAME_NOT_KNOWN;
303                 list_add(&ie->list, &cache->unknown);
304         }
305
306 update:
307         if (name_known && ie->name_state != NAME_KNOWN &&
308             ie->name_state != NAME_PENDING) {
309                 ie->name_state = NAME_KNOWN;
310                 list_del(&ie->list);
311         }
312
313         memcpy(&ie->data, data, sizeof(*data));
314         ie->timestamp = jiffies;
315         cache->timestamp = jiffies;
316
317         if (ie->name_state == NAME_NOT_KNOWN)
318                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
319
320 done:
321         return flags;
322 }
323
324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
325 {
326         struct discovery_state *cache = &hdev->discovery;
327         struct inquiry_info *info = (struct inquiry_info *) buf;
328         struct inquiry_entry *e;
329         int copied = 0;
330
331         list_for_each_entry(e, &cache->all, all) {
332                 struct inquiry_data *data = &e->data;
333
334                 if (copied >= num)
335                         break;
336
337                 bacpy(&info->bdaddr, &data->bdaddr);
338                 info->pscan_rep_mode    = data->pscan_rep_mode;
339                 info->pscan_period_mode = data->pscan_period_mode;
340                 info->pscan_mode        = data->pscan_mode;
341                 memcpy(info->dev_class, data->dev_class, 3);
342                 info->clock_offset      = data->clock_offset;
343
344                 info++;
345                 copied++;
346         }
347
348         BT_DBG("cache %p, copied %d", cache, copied);
349         return copied;
350 }
351
352 static int hci_inq_req(struct hci_request *req, unsigned long opt)
353 {
354         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355         struct hci_dev *hdev = req->hdev;
356         struct hci_cp_inquiry cp;
357
358         BT_DBG("%s", hdev->name);
359
360         if (test_bit(HCI_INQUIRY, &hdev->flags))
361                 return 0;
362
363         /* Start Inquiry */
364         memcpy(&cp.lap, &ir->lap, 3);
365         cp.length  = ir->length;
366         cp.num_rsp = ir->num_rsp;
367         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
368
369         return 0;
370 }
371
372 int hci_inquiry(void __user *arg)
373 {
374         __u8 __user *ptr = arg;
375         struct hci_inquiry_req ir;
376         struct hci_dev *hdev;
377         int err = 0, do_inquiry = 0, max_rsp;
378         long timeo;
379         __u8 *buf;
380
381         if (copy_from_user(&ir, ptr, sizeof(ir)))
382                 return -EFAULT;
383
384         hdev = hci_dev_get(ir.dev_id);
385         if (!hdev)
386                 return -ENODEV;
387
388         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
389                 err = -EBUSY;
390                 goto done;
391         }
392
393         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
394                 err = -EOPNOTSUPP;
395                 goto done;
396         }
397
398         if (hdev->dev_type != HCI_PRIMARY) {
399                 err = -EOPNOTSUPP;
400                 goto done;
401         }
402
403         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
404                 err = -EOPNOTSUPP;
405                 goto done;
406         }
407
408         /* Restrict maximum inquiry length to 60 seconds */
409         if (ir.length > 60) {
410                 err = -EINVAL;
411                 goto done;
412         }
413
414         hci_dev_lock(hdev);
415         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
416             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
417                 hci_inquiry_cache_flush(hdev);
418                 do_inquiry = 1;
419         }
420         hci_dev_unlock(hdev);
421
422         timeo = ir.length * msecs_to_jiffies(2000);
423
424         if (do_inquiry) {
425                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
426                                    timeo, NULL);
427                 if (err < 0)
428                         goto done;
429
430                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
431                  * cleared). If it is interrupted by a signal, return -EINTR.
432                  */
433                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
434                                 TASK_INTERRUPTIBLE)) {
435                         err = -EINTR;
436                         goto done;
437                 }
438         }
439
440         /* for unlimited number of responses we will use buffer with
441          * 255 entries
442          */
443         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
444
445         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
446          * copy it to the user space.
447          */
448         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
449         if (!buf) {
450                 err = -ENOMEM;
451                 goto done;
452         }
453
454         hci_dev_lock(hdev);
455         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
456         hci_dev_unlock(hdev);
457
458         BT_DBG("num_rsp %d", ir.num_rsp);
459
460         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
461                 ptr += sizeof(ir);
462                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
463                                  ir.num_rsp))
464                         err = -EFAULT;
465         } else
466                 err = -EFAULT;
467
468         kfree(buf);
469
470 done:
471         hci_dev_put(hdev);
472         return err;
473 }
474
475 static int hci_dev_do_open(struct hci_dev *hdev)
476 {
477         int ret = 0;
478
479         BT_DBG("%s %p", hdev->name, hdev);
480
481         hci_req_sync_lock(hdev);
482
483         ret = hci_dev_open_sync(hdev);
484
485         hci_req_sync_unlock(hdev);
486         return ret;
487 }
488
489 /* ---- HCI ioctl helpers ---- */
490
491 int hci_dev_open(__u16 dev)
492 {
493         struct hci_dev *hdev;
494         int err;
495
496         hdev = hci_dev_get(dev);
497         if (!hdev)
498                 return -ENODEV;
499
500         /* Devices that are marked as unconfigured can only be powered
501          * up as user channel. Trying to bring them up as normal devices
502          * will result into a failure. Only user channel operation is
503          * possible.
504          *
505          * When this function is called for a user channel, the flag
506          * HCI_USER_CHANNEL will be set first before attempting to
507          * open the device.
508          */
509         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
510             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
511                 err = -EOPNOTSUPP;
512                 goto done;
513         }
514
515         /* We need to ensure that no other power on/off work is pending
516          * before proceeding to call hci_dev_do_open. This is
517          * particularly important if the setup procedure has not yet
518          * completed.
519          */
520         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
521                 cancel_delayed_work(&hdev->power_off);
522
523         /* After this call it is guaranteed that the setup procedure
524          * has finished. This means that error conditions like RFKILL
525          * or no valid public or static random address apply.
526          */
527         flush_workqueue(hdev->req_workqueue);
528
529         /* For controllers not using the management interface and that
530          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
531          * so that pairing works for them. Once the management interface
532          * is in use this bit will be cleared again and userspace has
533          * to explicitly enable it.
534          */
535         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
536             !hci_dev_test_flag(hdev, HCI_MGMT))
537                 hci_dev_set_flag(hdev, HCI_BONDABLE);
538
539         err = hci_dev_do_open(hdev);
540
541 done:
542         hci_dev_put(hdev);
543         return err;
544 }
545
546 int hci_dev_do_close(struct hci_dev *hdev)
547 {
548         int err;
549
550         BT_DBG("%s %p", hdev->name, hdev);
551
552         hci_req_sync_lock(hdev);
553
554         err = hci_dev_close_sync(hdev);
555
556         hci_req_sync_unlock(hdev);
557
558         return err;
559 }
560
561 int hci_dev_close(__u16 dev)
562 {
563         struct hci_dev *hdev;
564         int err;
565
566         hdev = hci_dev_get(dev);
567         if (!hdev)
568                 return -ENODEV;
569
570         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
571                 err = -EBUSY;
572                 goto done;
573         }
574
575         cancel_work_sync(&hdev->power_on);
576         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
577                 cancel_delayed_work(&hdev->power_off);
578
579         err = hci_dev_do_close(hdev);
580
581 done:
582         hci_dev_put(hdev);
583         return err;
584 }
585
586 static int hci_dev_do_reset(struct hci_dev *hdev)
587 {
588         int ret;
589
590         BT_DBG("%s %p", hdev->name, hdev);
591
592         hci_req_sync_lock(hdev);
593
594         /* Drop queues */
595         skb_queue_purge(&hdev->rx_q);
596         skb_queue_purge(&hdev->cmd_q);
597
598         /* Cancel these to avoid queueing non-chained pending work */
599         hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
600         cancel_delayed_work(&hdev->cmd_timer);
601         cancel_delayed_work(&hdev->ncmd_timer);
602
603         /* Avoid potential lockdep warnings from the *_flush() calls by
604          * ensuring the workqueue is empty up front.
605          */
606         drain_workqueue(hdev->workqueue);
607
608         hci_dev_lock(hdev);
609         hci_inquiry_cache_flush(hdev);
610         hci_conn_hash_flush(hdev);
611         hci_dev_unlock(hdev);
612
613         if (hdev->flush)
614                 hdev->flush(hdev);
615
616         hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
617
618         atomic_set(&hdev->cmd_cnt, 1);
619         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
620
621         ret = hci_reset_sync(hdev);
622
623         hci_req_sync_unlock(hdev);
624         return ret;
625 }
626
627 int hci_dev_reset(__u16 dev)
628 {
629         struct hci_dev *hdev;
630         int err;
631
632         hdev = hci_dev_get(dev);
633         if (!hdev)
634                 return -ENODEV;
635
636         if (!test_bit(HCI_UP, &hdev->flags)) {
637                 err = -ENETDOWN;
638                 goto done;
639         }
640
641         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
642                 err = -EBUSY;
643                 goto done;
644         }
645
646         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
647                 err = -EOPNOTSUPP;
648                 goto done;
649         }
650
651         err = hci_dev_do_reset(hdev);
652
653 done:
654         hci_dev_put(hdev);
655         return err;
656 }
657
658 int hci_dev_reset_stat(__u16 dev)
659 {
660         struct hci_dev *hdev;
661         int ret = 0;
662
663         hdev = hci_dev_get(dev);
664         if (!hdev)
665                 return -ENODEV;
666
667         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
668                 ret = -EBUSY;
669                 goto done;
670         }
671
672         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
673                 ret = -EOPNOTSUPP;
674                 goto done;
675         }
676
677         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
678
679 done:
680         hci_dev_put(hdev);
681         return ret;
682 }
683
684 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
685 {
686         bool conn_changed, discov_changed;
687
688         BT_DBG("%s scan 0x%02x", hdev->name, scan);
689
690         if ((scan & SCAN_PAGE))
691                 conn_changed = !hci_dev_test_and_set_flag(hdev,
692                                                           HCI_CONNECTABLE);
693         else
694                 conn_changed = hci_dev_test_and_clear_flag(hdev,
695                                                            HCI_CONNECTABLE);
696
697         if ((scan & SCAN_INQUIRY)) {
698                 discov_changed = !hci_dev_test_and_set_flag(hdev,
699                                                             HCI_DISCOVERABLE);
700         } else {
701                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
702                 discov_changed = hci_dev_test_and_clear_flag(hdev,
703                                                              HCI_DISCOVERABLE);
704         }
705
706         if (!hci_dev_test_flag(hdev, HCI_MGMT))
707                 return;
708
709         if (conn_changed || discov_changed) {
710                 /* In case this was disabled through mgmt */
711                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
712
713                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
714                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
715
716                 mgmt_new_settings(hdev);
717         }
718 }
719
720 int hci_dev_cmd(unsigned int cmd, void __user *arg)
721 {
722         struct hci_dev *hdev;
723         struct hci_dev_req dr;
724         int err = 0;
725
726         if (copy_from_user(&dr, arg, sizeof(dr)))
727                 return -EFAULT;
728
729         hdev = hci_dev_get(dr.dev_id);
730         if (!hdev)
731                 return -ENODEV;
732
733         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
734                 err = -EBUSY;
735                 goto done;
736         }
737
738         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
739                 err = -EOPNOTSUPP;
740                 goto done;
741         }
742
743         if (hdev->dev_type != HCI_PRIMARY) {
744                 err = -EOPNOTSUPP;
745                 goto done;
746         }
747
748         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
749                 err = -EOPNOTSUPP;
750                 goto done;
751         }
752
753         switch (cmd) {
754         case HCISETAUTH:
755                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
756                                    HCI_INIT_TIMEOUT, NULL);
757                 break;
758
759         case HCISETENCRYPT:
760                 if (!lmp_encrypt_capable(hdev)) {
761                         err = -EOPNOTSUPP;
762                         break;
763                 }
764
765                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
766                         /* Auth must be enabled first */
767                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
768                                            HCI_INIT_TIMEOUT, NULL);
769                         if (err)
770                                 break;
771                 }
772
773                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
774                                    HCI_INIT_TIMEOUT, NULL);
775                 break;
776
777         case HCISETSCAN:
778                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
779                                    HCI_INIT_TIMEOUT, NULL);
780
781                 /* Ensure that the connectable and discoverable states
782                  * get correctly modified as this was a non-mgmt change.
783                  */
784                 if (!err)
785                         hci_update_passive_scan_state(hdev, dr.dev_opt);
786                 break;
787
788         case HCISETLINKPOL:
789                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
790                                    HCI_INIT_TIMEOUT, NULL);
791                 break;
792
793         case HCISETLINKMODE:
794                 hdev->link_mode = ((__u16) dr.dev_opt) &
795                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
796                 break;
797
798         case HCISETPTYPE:
799                 if (hdev->pkt_type == (__u16) dr.dev_opt)
800                         break;
801
802                 hdev->pkt_type = (__u16) dr.dev_opt;
803                 mgmt_phy_configuration_changed(hdev, NULL);
804                 break;
805
806         case HCISETACLMTU:
807                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
808                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
809                 break;
810
811         case HCISETSCOMTU:
812                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
813                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
814                 break;
815
816         default:
817                 err = -EINVAL;
818                 break;
819         }
820
821 done:
822         hci_dev_put(hdev);
823         return err;
824 }
825
826 int hci_get_dev_list(void __user *arg)
827 {
828         struct hci_dev *hdev;
829         struct hci_dev_list_req *dl;
830         struct hci_dev_req *dr;
831         int n = 0, size, err;
832         __u16 dev_num;
833
834         if (get_user(dev_num, (__u16 __user *) arg))
835                 return -EFAULT;
836
837         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
838                 return -EINVAL;
839
840         size = sizeof(*dl) + dev_num * sizeof(*dr);
841
842         dl = kzalloc(size, GFP_KERNEL);
843         if (!dl)
844                 return -ENOMEM;
845
846         dr = dl->dev_req;
847
848         read_lock(&hci_dev_list_lock);
849         list_for_each_entry(hdev, &hci_dev_list, list) {
850                 unsigned long flags = hdev->flags;
851
852                 /* When the auto-off is configured it means the transport
853                  * is running, but in that case still indicate that the
854                  * device is actually down.
855                  */
856                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
857                         flags &= ~BIT(HCI_UP);
858
859                 (dr + n)->dev_id  = hdev->id;
860                 (dr + n)->dev_opt = flags;
861
862                 if (++n >= dev_num)
863                         break;
864         }
865         read_unlock(&hci_dev_list_lock);
866
867         dl->dev_num = n;
868         size = sizeof(*dl) + n * sizeof(*dr);
869
870         err = copy_to_user(arg, dl, size);
871         kfree(dl);
872
873         return err ? -EFAULT : 0;
874 }
875
876 int hci_get_dev_info(void __user *arg)
877 {
878         struct hci_dev *hdev;
879         struct hci_dev_info di;
880         unsigned long flags;
881         int err = 0;
882
883         if (copy_from_user(&di, arg, sizeof(di)))
884                 return -EFAULT;
885
886         hdev = hci_dev_get(di.dev_id);
887         if (!hdev)
888                 return -ENODEV;
889
890         /* When the auto-off is configured it means the transport
891          * is running, but in that case still indicate that the
892          * device is actually down.
893          */
894         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
895                 flags = hdev->flags & ~BIT(HCI_UP);
896         else
897                 flags = hdev->flags;
898
899         strcpy(di.name, hdev->name);
900         di.bdaddr   = hdev->bdaddr;
901         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
902         di.flags    = flags;
903         di.pkt_type = hdev->pkt_type;
904         if (lmp_bredr_capable(hdev)) {
905                 di.acl_mtu  = hdev->acl_mtu;
906                 di.acl_pkts = hdev->acl_pkts;
907                 di.sco_mtu  = hdev->sco_mtu;
908                 di.sco_pkts = hdev->sco_pkts;
909         } else {
910                 di.acl_mtu  = hdev->le_mtu;
911                 di.acl_pkts = hdev->le_pkts;
912                 di.sco_mtu  = 0;
913                 di.sco_pkts = 0;
914         }
915         di.link_policy = hdev->link_policy;
916         di.link_mode   = hdev->link_mode;
917
918         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
919         memcpy(&di.features, &hdev->features, sizeof(di.features));
920
921         if (copy_to_user(arg, &di, sizeof(di)))
922                 err = -EFAULT;
923
924         hci_dev_put(hdev);
925
926         return err;
927 }
928
929 /* ---- Interface to HCI drivers ---- */
930
931 static int hci_rfkill_set_block(void *data, bool blocked)
932 {
933         struct hci_dev *hdev = data;
934
935         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
936
937         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
938                 return -EBUSY;
939
940         if (blocked) {
941                 hci_dev_set_flag(hdev, HCI_RFKILLED);
942                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
943                     !hci_dev_test_flag(hdev, HCI_CONFIG))
944                         hci_dev_do_close(hdev);
945         } else {
946                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
947         }
948
949         return 0;
950 }
951
952 static const struct rfkill_ops hci_rfkill_ops = {
953         .set_block = hci_rfkill_set_block,
954 };
955
956 static void hci_power_on(struct work_struct *work)
957 {
958         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
959         int err;
960
961         BT_DBG("%s", hdev->name);
962
963         if (test_bit(HCI_UP, &hdev->flags) &&
964             hci_dev_test_flag(hdev, HCI_MGMT) &&
965             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
966                 cancel_delayed_work(&hdev->power_off);
967                 err = hci_powered_update_sync(hdev);
968                 mgmt_power_on(hdev, err);
969                 return;
970         }
971
972         err = hci_dev_do_open(hdev);
973         if (err < 0) {
974                 hci_dev_lock(hdev);
975                 mgmt_set_powered_failed(hdev, err);
976                 hci_dev_unlock(hdev);
977                 return;
978         }
979
980         /* During the HCI setup phase, a few error conditions are
981          * ignored and they need to be checked now. If they are still
982          * valid, it is important to turn the device back off.
983          */
984         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
985             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
986             (hdev->dev_type == HCI_PRIMARY &&
987              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
988              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
989                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
990                 hci_dev_do_close(hdev);
991         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
992                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
993                                    HCI_AUTO_OFF_TIMEOUT);
994         }
995
996         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
997                 /* For unconfigured devices, set the HCI_RAW flag
998                  * so that userspace can easily identify them.
999                  */
1000                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1001                         set_bit(HCI_RAW, &hdev->flags);
1002
1003                 /* For fully configured devices, this will send
1004                  * the Index Added event. For unconfigured devices,
1005                  * it will send Unconfigued Index Added event.
1006                  *
1007                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1008                  * and no event will be send.
1009                  */
1010                 mgmt_index_added(hdev);
1011         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1012                 /* When the controller is now configured, then it
1013                  * is important to clear the HCI_RAW flag.
1014                  */
1015                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1016                         clear_bit(HCI_RAW, &hdev->flags);
1017
1018                 /* Powering on the controller with HCI_CONFIG set only
1019                  * happens with the transition from unconfigured to
1020                  * configured. This will send the Index Added event.
1021                  */
1022                 mgmt_index_added(hdev);
1023         }
1024 }
1025
1026 static void hci_power_off(struct work_struct *work)
1027 {
1028         struct hci_dev *hdev = container_of(work, struct hci_dev,
1029                                             power_off.work);
1030
1031         BT_DBG("%s", hdev->name);
1032
1033         hci_dev_do_close(hdev);
1034 }
1035
1036 static void hci_error_reset(struct work_struct *work)
1037 {
1038         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1039
1040         BT_DBG("%s", hdev->name);
1041
1042         if (hdev->hw_error)
1043                 hdev->hw_error(hdev, hdev->hw_error_code);
1044         else
1045                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1046
1047         if (hci_dev_do_close(hdev))
1048                 return;
1049
1050         hci_dev_do_open(hdev);
1051 }
1052
1053 void hci_uuids_clear(struct hci_dev *hdev)
1054 {
1055         struct bt_uuid *uuid, *tmp;
1056
1057         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1058                 list_del(&uuid->list);
1059                 kfree(uuid);
1060         }
1061 }
1062
1063 void hci_link_keys_clear(struct hci_dev *hdev)
1064 {
1065         struct link_key *key;
1066
1067         list_for_each_entry(key, &hdev->link_keys, list) {
1068                 list_del_rcu(&key->list);
1069                 kfree_rcu(key, rcu);
1070         }
1071 }
1072
1073 void hci_smp_ltks_clear(struct hci_dev *hdev)
1074 {
1075         struct smp_ltk *k;
1076
1077         list_for_each_entry(k, &hdev->long_term_keys, list) {
1078                 list_del_rcu(&k->list);
1079                 kfree_rcu(k, rcu);
1080         }
1081 }
1082
1083 void hci_smp_irks_clear(struct hci_dev *hdev)
1084 {
1085         struct smp_irk *k;
1086
1087         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1088                 list_del_rcu(&k->list);
1089                 kfree_rcu(k, rcu);
1090         }
1091 }
1092
1093 void hci_blocked_keys_clear(struct hci_dev *hdev)
1094 {
1095         struct blocked_key *b;
1096
1097         list_for_each_entry(b, &hdev->blocked_keys, list) {
1098                 list_del_rcu(&b->list);
1099                 kfree_rcu(b, rcu);
1100         }
1101 }
1102
1103 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1104 {
1105         bool blocked = false;
1106         struct blocked_key *b;
1107
1108         rcu_read_lock();
1109         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1110                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1111                         blocked = true;
1112                         break;
1113                 }
1114         }
1115
1116         rcu_read_unlock();
1117         return blocked;
1118 }
1119
1120 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1121 {
1122         struct link_key *k;
1123
1124         rcu_read_lock();
1125         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1126                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1127                         rcu_read_unlock();
1128
1129                         if (hci_is_blocked_key(hdev,
1130                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
1131                                                k->val)) {
1132                                 bt_dev_warn_ratelimited(hdev,
1133                                                         "Link key blocked for %pMR",
1134                                                         &k->bdaddr);
1135                                 return NULL;
1136                         }
1137
1138                         return k;
1139                 }
1140         }
1141         rcu_read_unlock();
1142
1143         return NULL;
1144 }
1145
1146 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1147                                u8 key_type, u8 old_key_type)
1148 {
1149         /* Legacy key */
1150         if (key_type < 0x03)
1151                 return true;
1152
1153         /* Debug keys are insecure so don't store them persistently */
1154         if (key_type == HCI_LK_DEBUG_COMBINATION)
1155                 return false;
1156
1157         /* Changed combination key and there's no previous one */
1158         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1159                 return false;
1160
1161         /* Security mode 3 case */
1162         if (!conn)
1163                 return true;
1164
1165         /* BR/EDR key derived using SC from an LE link */
1166         if (conn->type == LE_LINK)
1167                 return true;
1168
1169         /* Neither local nor remote side had no-bonding as requirement */
1170         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1171                 return true;
1172
1173         /* Local side had dedicated bonding as requirement */
1174         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1175                 return true;
1176
1177         /* Remote side had dedicated bonding as requirement */
1178         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1179                 return true;
1180
1181         /* If none of the above criteria match, then don't store the key
1182          * persistently */
1183         return false;
1184 }
1185
1186 static u8 ltk_role(u8 type)
1187 {
1188         if (type == SMP_LTK)
1189                 return HCI_ROLE_MASTER;
1190
1191         return HCI_ROLE_SLAVE;
1192 }
1193
1194 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1195                              u8 addr_type, u8 role)
1196 {
1197         struct smp_ltk *k;
1198
1199         rcu_read_lock();
1200         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1201                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1202                         continue;
1203
1204                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1205                         rcu_read_unlock();
1206
1207                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1208                                                k->val)) {
1209                                 bt_dev_warn_ratelimited(hdev,
1210                                                         "LTK blocked for %pMR",
1211                                                         &k->bdaddr);
1212                                 return NULL;
1213                         }
1214
1215                         return k;
1216                 }
1217         }
1218         rcu_read_unlock();
1219
1220         return NULL;
1221 }
1222
1223 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1224 {
1225         struct smp_irk *irk_to_return = NULL;
1226         struct smp_irk *irk;
1227
1228         rcu_read_lock();
1229         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1230                 if (!bacmp(&irk->rpa, rpa)) {
1231                         irk_to_return = irk;
1232                         goto done;
1233                 }
1234         }
1235
1236         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1237                 if (smp_irk_matches(hdev, irk->val, rpa)) {
1238                         bacpy(&irk->rpa, rpa);
1239                         irk_to_return = irk;
1240                         goto done;
1241                 }
1242         }
1243
1244 done:
1245         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1246                                                 irk_to_return->val)) {
1247                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1248                                         &irk_to_return->bdaddr);
1249                 irk_to_return = NULL;
1250         }
1251
1252         rcu_read_unlock();
1253
1254         return irk_to_return;
1255 }
1256
1257 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1258                                      u8 addr_type)
1259 {
1260         struct smp_irk *irk_to_return = NULL;
1261         struct smp_irk *irk;
1262
1263         /* Identity Address must be public or static random */
1264         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1265                 return NULL;
1266
1267         rcu_read_lock();
1268         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1269                 if (addr_type == irk->addr_type &&
1270                     bacmp(bdaddr, &irk->bdaddr) == 0) {
1271                         irk_to_return = irk;
1272                         goto done;
1273                 }
1274         }
1275
1276 done:
1277
1278         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1279                                                 irk_to_return->val)) {
1280                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1281                                         &irk_to_return->bdaddr);
1282                 irk_to_return = NULL;
1283         }
1284
1285         rcu_read_unlock();
1286
1287         return irk_to_return;
1288 }
1289
1290 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1291                                   bdaddr_t *bdaddr, u8 *val, u8 type,
1292                                   u8 pin_len, bool *persistent)
1293 {
1294         struct link_key *key, *old_key;
1295         u8 old_key_type;
1296
1297         old_key = hci_find_link_key(hdev, bdaddr);
1298         if (old_key) {
1299                 old_key_type = old_key->type;
1300                 key = old_key;
1301         } else {
1302                 old_key_type = conn ? conn->key_type : 0xff;
1303                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1304                 if (!key)
1305                         return NULL;
1306                 list_add_rcu(&key->list, &hdev->link_keys);
1307         }
1308
1309         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1310
1311         /* Some buggy controller combinations generate a changed
1312          * combination key for legacy pairing even when there's no
1313          * previous key */
1314         if (type == HCI_LK_CHANGED_COMBINATION &&
1315             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1316                 type = HCI_LK_COMBINATION;
1317                 if (conn)
1318                         conn->key_type = type;
1319         }
1320
1321         bacpy(&key->bdaddr, bdaddr);
1322         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1323         key->pin_len = pin_len;
1324
1325         if (type == HCI_LK_CHANGED_COMBINATION)
1326                 key->type = old_key_type;
1327         else
1328                 key->type = type;
1329
1330         if (persistent)
1331                 *persistent = hci_persistent_key(hdev, conn, type,
1332                                                  old_key_type);
1333
1334         return key;
1335 }
1336
1337 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1338                             u8 addr_type, u8 type, u8 authenticated,
1339                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1340 {
1341         struct smp_ltk *key, *old_key;
1342         u8 role = ltk_role(type);
1343
1344         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1345         if (old_key)
1346                 key = old_key;
1347         else {
1348                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1349                 if (!key)
1350                         return NULL;
1351                 list_add_rcu(&key->list, &hdev->long_term_keys);
1352         }
1353
1354         bacpy(&key->bdaddr, bdaddr);
1355         key->bdaddr_type = addr_type;
1356         memcpy(key->val, tk, sizeof(key->val));
1357         key->authenticated = authenticated;
1358         key->ediv = ediv;
1359         key->rand = rand;
1360         key->enc_size = enc_size;
1361         key->type = type;
1362
1363         return key;
1364 }
1365
1366 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1367                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
1368 {
1369         struct smp_irk *irk;
1370
1371         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1372         if (!irk) {
1373                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1374                 if (!irk)
1375                         return NULL;
1376
1377                 bacpy(&irk->bdaddr, bdaddr);
1378                 irk->addr_type = addr_type;
1379
1380                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1381         }
1382
1383         memcpy(irk->val, val, 16);
1384         bacpy(&irk->rpa, rpa);
1385
1386         return irk;
1387 }
1388
1389 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1390 {
1391         struct link_key *key;
1392
1393         key = hci_find_link_key(hdev, bdaddr);
1394         if (!key)
1395                 return -ENOENT;
1396
1397         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1398
1399         list_del_rcu(&key->list);
1400         kfree_rcu(key, rcu);
1401
1402         return 0;
1403 }
1404
1405 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1406 {
1407         struct smp_ltk *k;
1408         int removed = 0;
1409
1410         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1411                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1412                         continue;
1413
1414                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1415
1416                 list_del_rcu(&k->list);
1417                 kfree_rcu(k, rcu);
1418                 removed++;
1419         }
1420
1421         return removed ? 0 : -ENOENT;
1422 }
1423
1424 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1425 {
1426         struct smp_irk *k;
1427
1428         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
1429                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1430                         continue;
1431
1432                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1433
1434                 list_del_rcu(&k->list);
1435                 kfree_rcu(k, rcu);
1436         }
1437 }
1438
1439 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1440 {
1441         struct smp_ltk *k;
1442         struct smp_irk *irk;
1443         u8 addr_type;
1444
1445         if (type == BDADDR_BREDR) {
1446                 if (hci_find_link_key(hdev, bdaddr))
1447                         return true;
1448                 return false;
1449         }
1450
1451         /* Convert to HCI addr type which struct smp_ltk uses */
1452         if (type == BDADDR_LE_PUBLIC)
1453                 addr_type = ADDR_LE_DEV_PUBLIC;
1454         else
1455                 addr_type = ADDR_LE_DEV_RANDOM;
1456
1457         irk = hci_get_irk(hdev, bdaddr, addr_type);
1458         if (irk) {
1459                 bdaddr = &irk->bdaddr;
1460                 addr_type = irk->addr_type;
1461         }
1462
1463         rcu_read_lock();
1464         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1465                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1466                         rcu_read_unlock();
1467                         return true;
1468                 }
1469         }
1470         rcu_read_unlock();
1471
1472         return false;
1473 }
1474
1475 /* HCI command timer function */
1476 static void hci_cmd_timeout(struct work_struct *work)
1477 {
1478         struct hci_dev *hdev = container_of(work, struct hci_dev,
1479                                             cmd_timer.work);
1480
1481         if (hdev->sent_cmd) {
1482                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1483                 u16 opcode = __le16_to_cpu(sent->opcode);
1484
1485                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1486         } else {
1487                 bt_dev_err(hdev, "command tx timeout");
1488         }
1489
1490         if (hdev->cmd_timeout)
1491                 hdev->cmd_timeout(hdev);
1492
1493         atomic_set(&hdev->cmd_cnt, 1);
1494         queue_work(hdev->workqueue, &hdev->cmd_work);
1495 }
1496
1497 /* HCI ncmd timer function */
1498 static void hci_ncmd_timeout(struct work_struct *work)
1499 {
1500         struct hci_dev *hdev = container_of(work, struct hci_dev,
1501                                             ncmd_timer.work);
1502
1503         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1504
1505         /* During HCI_INIT phase no events can be injected if the ncmd timer
1506          * triggers since the procedure has its own timeout handling.
1507          */
1508         if (test_bit(HCI_INIT, &hdev->flags))
1509                 return;
1510
1511         /* This is an irrecoverable state, inject hardware error event */
1512         hci_reset_dev(hdev);
1513 }
1514
1515 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1516                                           bdaddr_t *bdaddr, u8 bdaddr_type)
1517 {
1518         struct oob_data *data;
1519
1520         list_for_each_entry(data, &hdev->remote_oob_data, list) {
1521                 if (bacmp(bdaddr, &data->bdaddr) != 0)
1522                         continue;
1523                 if (data->bdaddr_type != bdaddr_type)
1524                         continue;
1525                 return data;
1526         }
1527
1528         return NULL;
1529 }
1530
1531 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1532                                u8 bdaddr_type)
1533 {
1534         struct oob_data *data;
1535
1536         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1537         if (!data)
1538                 return -ENOENT;
1539
1540         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1541
1542         list_del(&data->list);
1543         kfree(data);
1544
1545         return 0;
1546 }
1547
1548 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1549 {
1550         struct oob_data *data, *n;
1551
1552         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1553                 list_del(&data->list);
1554                 kfree(data);
1555         }
1556 }
1557
1558 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1559                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
1560                             u8 *hash256, u8 *rand256)
1561 {
1562         struct oob_data *data;
1563
1564         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1565         if (!data) {
1566                 data = kmalloc(sizeof(*data), GFP_KERNEL);
1567                 if (!data)
1568                         return -ENOMEM;
1569
1570                 bacpy(&data->bdaddr, bdaddr);
1571                 data->bdaddr_type = bdaddr_type;
1572                 list_add(&data->list, &hdev->remote_oob_data);
1573         }
1574
1575         if (hash192 && rand192) {
1576                 memcpy(data->hash192, hash192, sizeof(data->hash192));
1577                 memcpy(data->rand192, rand192, sizeof(data->rand192));
1578                 if (hash256 && rand256)
1579                         data->present = 0x03;
1580         } else {
1581                 memset(data->hash192, 0, sizeof(data->hash192));
1582                 memset(data->rand192, 0, sizeof(data->rand192));
1583                 if (hash256 && rand256)
1584                         data->present = 0x02;
1585                 else
1586                         data->present = 0x00;
1587         }
1588
1589         if (hash256 && rand256) {
1590                 memcpy(data->hash256, hash256, sizeof(data->hash256));
1591                 memcpy(data->rand256, rand256, sizeof(data->rand256));
1592         } else {
1593                 memset(data->hash256, 0, sizeof(data->hash256));
1594                 memset(data->rand256, 0, sizeof(data->rand256));
1595                 if (hash192 && rand192)
1596                         data->present = 0x01;
1597         }
1598
1599         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1600
1601         return 0;
1602 }
1603
1604 /* This function requires the caller holds hdev->lock */
1605 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1606 {
1607         struct adv_info *adv_instance;
1608
1609         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1610                 if (adv_instance->instance == instance)
1611                         return adv_instance;
1612         }
1613
1614         return NULL;
1615 }
1616
1617 /* This function requires the caller holds hdev->lock */
1618 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1619 {
1620         struct adv_info *cur_instance;
1621
1622         cur_instance = hci_find_adv_instance(hdev, instance);
1623         if (!cur_instance)
1624                 return NULL;
1625
1626         if (cur_instance == list_last_entry(&hdev->adv_instances,
1627                                             struct adv_info, list))
1628                 return list_first_entry(&hdev->adv_instances,
1629                                                  struct adv_info, list);
1630         else
1631                 return list_next_entry(cur_instance, list);
1632 }
1633
1634 /* This function requires the caller holds hdev->lock */
1635 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1636 {
1637         struct adv_info *adv_instance;
1638
1639         adv_instance = hci_find_adv_instance(hdev, instance);
1640         if (!adv_instance)
1641                 return -ENOENT;
1642
1643         BT_DBG("%s removing %dMR", hdev->name, instance);
1644
1645         if (hdev->cur_adv_instance == instance) {
1646                 if (hdev->adv_instance_timeout) {
1647                         cancel_delayed_work(&hdev->adv_instance_expire);
1648                         hdev->adv_instance_timeout = 0;
1649                 }
1650                 hdev->cur_adv_instance = 0x00;
1651         }
1652
1653         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1654
1655         list_del(&adv_instance->list);
1656         kfree(adv_instance);
1657
1658         hdev->adv_instance_cnt--;
1659
1660         return 0;
1661 }
1662
1663 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1664 {
1665         struct adv_info *adv_instance, *n;
1666
1667         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1668                 adv_instance->rpa_expired = rpa_expired;
1669 }
1670
1671 /* This function requires the caller holds hdev->lock */
1672 void hci_adv_instances_clear(struct hci_dev *hdev)
1673 {
1674         struct adv_info *adv_instance, *n;
1675
1676         if (hdev->adv_instance_timeout) {
1677                 cancel_delayed_work(&hdev->adv_instance_expire);
1678                 hdev->adv_instance_timeout = 0;
1679         }
1680
1681         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1682                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1683                 list_del(&adv_instance->list);
1684                 kfree(adv_instance);
1685         }
1686
1687         hdev->adv_instance_cnt = 0;
1688         hdev->cur_adv_instance = 0x00;
1689 }
1690
1691 static void adv_instance_rpa_expired(struct work_struct *work)
1692 {
1693         struct adv_info *adv_instance = container_of(work, struct adv_info,
1694                                                      rpa_expired_cb.work);
1695
1696         BT_DBG("");
1697
1698         adv_instance->rpa_expired = true;
1699 }
1700
1701 /* This function requires the caller holds hdev->lock */
1702 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
1703                          u16 adv_data_len, u8 *adv_data,
1704                          u16 scan_rsp_len, u8 *scan_rsp_data,
1705                          u16 timeout, u16 duration, s8 tx_power,
1706                          u32 min_interval, u32 max_interval)
1707 {
1708         struct adv_info *adv_instance;
1709
1710         adv_instance = hci_find_adv_instance(hdev, instance);
1711         if (adv_instance) {
1712                 memset(adv_instance->adv_data, 0,
1713                        sizeof(adv_instance->adv_data));
1714                 memset(adv_instance->scan_rsp_data, 0,
1715                        sizeof(adv_instance->scan_rsp_data));
1716         } else {
1717                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1718                     instance < 1 || instance > hdev->le_num_of_adv_sets)
1719                         return -EOVERFLOW;
1720
1721                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
1722                 if (!adv_instance)
1723                         return -ENOMEM;
1724
1725                 adv_instance->pending = true;
1726                 adv_instance->instance = instance;
1727                 list_add(&adv_instance->list, &hdev->adv_instances);
1728                 hdev->adv_instance_cnt++;
1729         }
1730
1731         adv_instance->flags = flags;
1732         adv_instance->min_interval = min_interval;
1733         adv_instance->max_interval = max_interval;
1734         adv_instance->tx_power = tx_power;
1735
1736         hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1737                                   scan_rsp_len, scan_rsp_data);
1738
1739         adv_instance->timeout = timeout;
1740         adv_instance->remaining_time = timeout;
1741
1742         if (duration == 0)
1743                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
1744         else
1745                 adv_instance->duration = duration;
1746
1747         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
1748                           adv_instance_rpa_expired);
1749
1750         BT_DBG("%s for %dMR", hdev->name, instance);
1751
1752         return 0;
1753 }
1754
1755 /* This function requires the caller holds hdev->lock */
1756 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1757                               u16 adv_data_len, u8 *adv_data,
1758                               u16 scan_rsp_len, u8 *scan_rsp_data)
1759 {
1760         struct adv_info *adv;
1761
1762         adv = hci_find_adv_instance(hdev, instance);
1763
1764         /* If advertisement doesn't exist, we can't modify its data */
1765         if (!adv)
1766                 return -ENOENT;
1767
1768         if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1769                 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1770                 memcpy(adv->adv_data, adv_data, adv_data_len);
1771                 adv->adv_data_len = adv_data_len;
1772                 adv->adv_data_changed = true;
1773         }
1774
1775         if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1776                 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1777                 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1778                 adv->scan_rsp_len = scan_rsp_len;
1779                 adv->scan_rsp_changed = true;
1780         }
1781
1782         /* Mark as changed if there are flags which would affect it */
1783         if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1784             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1785                 adv->scan_rsp_changed = true;
1786
1787         return 0;
1788 }
1789
1790 /* This function requires the caller holds hdev->lock */
1791 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1792 {
1793         u32 flags;
1794         struct adv_info *adv;
1795
1796         if (instance == 0x00) {
1797                 /* Instance 0 always manages the "Tx Power" and "Flags"
1798                  * fields
1799                  */
1800                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1801
1802                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1803                  * corresponds to the "connectable" instance flag.
1804                  */
1805                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1806                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1807
1808                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1809                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1810                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1811                         flags |= MGMT_ADV_FLAG_DISCOV;
1812
1813                 return flags;
1814         }
1815
1816         adv = hci_find_adv_instance(hdev, instance);
1817
1818         /* Return 0 when we got an invalid instance identifier. */
1819         if (!adv)
1820                 return 0;
1821
1822         return adv->flags;
1823 }
1824
1825 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1826 {
1827         struct adv_info *adv;
1828
1829         /* Instance 0x00 always set local name */
1830         if (instance == 0x00)
1831                 return true;
1832
1833         adv = hci_find_adv_instance(hdev, instance);
1834         if (!adv)
1835                 return false;
1836
1837         if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1838             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1839                 return true;
1840
1841         return adv->scan_rsp_len ? true : false;
1842 }
1843
1844 /* This function requires the caller holds hdev->lock */
1845 void hci_adv_monitors_clear(struct hci_dev *hdev)
1846 {
1847         struct adv_monitor *monitor;
1848         int handle;
1849
1850         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1851                 hci_free_adv_monitor(hdev, monitor);
1852
1853         idr_destroy(&hdev->adv_monitors_idr);
1854 }
1855
1856 /* Frees the monitor structure and do some bookkeepings.
1857  * This function requires the caller holds hdev->lock.
1858  */
1859 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1860 {
1861         struct adv_pattern *pattern;
1862         struct adv_pattern *tmp;
1863
1864         if (!monitor)
1865                 return;
1866
1867         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1868                 list_del(&pattern->list);
1869                 kfree(pattern);
1870         }
1871
1872         if (monitor->handle)
1873                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1874
1875         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1876                 hdev->adv_monitors_cnt--;
1877                 mgmt_adv_monitor_removed(hdev, monitor->handle);
1878         }
1879
1880         kfree(monitor);
1881 }
1882
1883 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1884  * also attempts to forward the request to the controller.
1885  * This function requires the caller holds hci_req_sync_lock.
1886  */
1887 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1888 {
1889         int min, max, handle;
1890         int status = 0;
1891
1892         if (!monitor)
1893                 return -EINVAL;
1894
1895         hci_dev_lock(hdev);
1896
1897         min = HCI_MIN_ADV_MONITOR_HANDLE;
1898         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1899         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1900                            GFP_KERNEL);
1901
1902         hci_dev_unlock(hdev);
1903
1904         if (handle < 0)
1905                 return handle;
1906
1907         monitor->handle = handle;
1908
1909         if (!hdev_is_powered(hdev))
1910                 return status;
1911
1912         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1913         case HCI_ADV_MONITOR_EXT_NONE:
1914                 bt_dev_dbg(hdev, "%s add monitor %d status %d", hdev->name,
1915                            monitor->handle, status);
1916                 /* Message was not forwarded to controller - not an error */
1917                 break;
1918
1919         case HCI_ADV_MONITOR_EXT_MSFT:
1920                 status = msft_add_monitor_pattern(hdev, monitor);
1921                 bt_dev_dbg(hdev, "%s add monitor %d msft status %d", hdev->name,
1922                            monitor->handle, status);
1923                 break;
1924         }
1925
1926         return status;
1927 }
1928
1929 /* Attempts to tell the controller and free the monitor. If somehow the
1930  * controller doesn't have a corresponding handle, remove anyway.
1931  * This function requires the caller holds hci_req_sync_lock.
1932  */
1933 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1934                                   struct adv_monitor *monitor)
1935 {
1936         int status = 0;
1937
1938         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1939         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1940                 bt_dev_dbg(hdev, "%s remove monitor %d status %d", hdev->name,
1941                            monitor->handle, status);
1942                 goto free_monitor;
1943
1944         case HCI_ADV_MONITOR_EXT_MSFT:
1945                 status = msft_remove_monitor(hdev, monitor);
1946                 bt_dev_dbg(hdev, "%s remove monitor %d msft status %d",
1947                            hdev->name, monitor->handle, status);
1948                 break;
1949         }
1950
1951         /* In case no matching handle registered, just free the monitor */
1952         if (status == -ENOENT)
1953                 goto free_monitor;
1954
1955         return status;
1956
1957 free_monitor:
1958         if (status == -ENOENT)
1959                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1960                             monitor->handle);
1961         hci_free_adv_monitor(hdev, monitor);
1962
1963         return status;
1964 }
1965
1966 /* This function requires the caller holds hci_req_sync_lock */
1967 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1968 {
1969         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1970
1971         if (!monitor)
1972                 return -EINVAL;
1973
1974         return hci_remove_adv_monitor(hdev, monitor);
1975 }
1976
1977 /* This function requires the caller holds hci_req_sync_lock */
1978 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
1979 {
1980         struct adv_monitor *monitor;
1981         int idr_next_id = 0;
1982         int status = 0;
1983
1984         while (1) {
1985                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
1986                 if (!monitor)
1987                         break;
1988
1989                 status = hci_remove_adv_monitor(hdev, monitor);
1990                 if (status)
1991                         return status;
1992
1993                 idr_next_id++;
1994         }
1995
1996         return status;
1997 }
1998
1999 /* This function requires the caller holds hdev->lock */
2000 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2001 {
2002         return !idr_is_empty(&hdev->adv_monitors_idr);
2003 }
2004
2005 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2006 {
2007         if (msft_monitor_supported(hdev))
2008                 return HCI_ADV_MONITOR_EXT_MSFT;
2009
2010         return HCI_ADV_MONITOR_EXT_NONE;
2011 }
2012
2013 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2014                                          bdaddr_t *bdaddr, u8 type)
2015 {
2016         struct bdaddr_list *b;
2017
2018         list_for_each_entry(b, bdaddr_list, list) {
2019                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2020                         return b;
2021         }
2022
2023         return NULL;
2024 }
2025
2026 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2027                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2028                                 u8 type)
2029 {
2030         struct bdaddr_list_with_irk *b;
2031
2032         list_for_each_entry(b, bdaddr_list, list) {
2033                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2034                         return b;
2035         }
2036
2037         return NULL;
2038 }
2039
2040 struct bdaddr_list_with_flags *
2041 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2042                                   bdaddr_t *bdaddr, u8 type)
2043 {
2044         struct bdaddr_list_with_flags *b;
2045
2046         list_for_each_entry(b, bdaddr_list, list) {
2047                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2048                         return b;
2049         }
2050
2051         return NULL;
2052 }
2053
2054 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2055 {
2056         struct bdaddr_list *b, *n;
2057
2058         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2059                 list_del(&b->list);
2060                 kfree(b);
2061         }
2062 }
2063
2064 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2065 {
2066         struct bdaddr_list *entry;
2067
2068         if (!bacmp(bdaddr, BDADDR_ANY))
2069                 return -EBADF;
2070
2071         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2072                 return -EEXIST;
2073
2074         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2075         if (!entry)
2076                 return -ENOMEM;
2077
2078         bacpy(&entry->bdaddr, bdaddr);
2079         entry->bdaddr_type = type;
2080
2081         list_add(&entry->list, list);
2082
2083         return 0;
2084 }
2085
2086 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2087                                         u8 type, u8 *peer_irk, u8 *local_irk)
2088 {
2089         struct bdaddr_list_with_irk *entry;
2090
2091         if (!bacmp(bdaddr, BDADDR_ANY))
2092                 return -EBADF;
2093
2094         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2095                 return -EEXIST;
2096
2097         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2098         if (!entry)
2099                 return -ENOMEM;
2100
2101         bacpy(&entry->bdaddr, bdaddr);
2102         entry->bdaddr_type = type;
2103
2104         if (peer_irk)
2105                 memcpy(entry->peer_irk, peer_irk, 16);
2106
2107         if (local_irk)
2108                 memcpy(entry->local_irk, local_irk, 16);
2109
2110         list_add(&entry->list, list);
2111
2112         return 0;
2113 }
2114
2115 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2116                                    u8 type, u32 flags)
2117 {
2118         struct bdaddr_list_with_flags *entry;
2119
2120         if (!bacmp(bdaddr, BDADDR_ANY))
2121                 return -EBADF;
2122
2123         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2124                 return -EEXIST;
2125
2126         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2127         if (!entry)
2128                 return -ENOMEM;
2129
2130         bacpy(&entry->bdaddr, bdaddr);
2131         entry->bdaddr_type = type;
2132         entry->flags = flags;
2133
2134         list_add(&entry->list, list);
2135
2136         return 0;
2137 }
2138
2139 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2140 {
2141         struct bdaddr_list *entry;
2142
2143         if (!bacmp(bdaddr, BDADDR_ANY)) {
2144                 hci_bdaddr_list_clear(list);
2145                 return 0;
2146         }
2147
2148         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2149         if (!entry)
2150                 return -ENOENT;
2151
2152         list_del(&entry->list);
2153         kfree(entry);
2154
2155         return 0;
2156 }
2157
2158 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2159                                                         u8 type)
2160 {
2161         struct bdaddr_list_with_irk *entry;
2162
2163         if (!bacmp(bdaddr, BDADDR_ANY)) {
2164                 hci_bdaddr_list_clear(list);
2165                 return 0;
2166         }
2167
2168         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2169         if (!entry)
2170                 return -ENOENT;
2171
2172         list_del(&entry->list);
2173         kfree(entry);
2174
2175         return 0;
2176 }
2177
2178 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2179                                    u8 type)
2180 {
2181         struct bdaddr_list_with_flags *entry;
2182
2183         if (!bacmp(bdaddr, BDADDR_ANY)) {
2184                 hci_bdaddr_list_clear(list);
2185                 return 0;
2186         }
2187
2188         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2189         if (!entry)
2190                 return -ENOENT;
2191
2192         list_del(&entry->list);
2193         kfree(entry);
2194
2195         return 0;
2196 }
2197
2198 /* This function requires the caller holds hdev->lock */
2199 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2200                                                bdaddr_t *addr, u8 addr_type)
2201 {
2202         struct hci_conn_params *params;
2203
2204         list_for_each_entry(params, &hdev->le_conn_params, list) {
2205                 if (bacmp(&params->addr, addr) == 0 &&
2206                     params->addr_type == addr_type) {
2207                         return params;
2208                 }
2209         }
2210
2211         return NULL;
2212 }
2213
2214 /* This function requires the caller holds hdev->lock */
2215 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2216                                                   bdaddr_t *addr, u8 addr_type)
2217 {
2218         struct hci_conn_params *param;
2219
2220         list_for_each_entry(param, list, action) {
2221                 if (bacmp(&param->addr, addr) == 0 &&
2222                     param->addr_type == addr_type)
2223                         return param;
2224         }
2225
2226         return NULL;
2227 }
2228
2229 /* This function requires the caller holds hdev->lock */
2230 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2231                                             bdaddr_t *addr, u8 addr_type)
2232 {
2233         struct hci_conn_params *params;
2234
2235         params = hci_conn_params_lookup(hdev, addr, addr_type);
2236         if (params)
2237                 return params;
2238
2239         params = kzalloc(sizeof(*params), GFP_KERNEL);
2240         if (!params) {
2241                 bt_dev_err(hdev, "out of memory");
2242                 return NULL;
2243         }
2244
2245         bacpy(&params->addr, addr);
2246         params->addr_type = addr_type;
2247
2248         list_add(&params->list, &hdev->le_conn_params);
2249         INIT_LIST_HEAD(&params->action);
2250
2251         params->conn_min_interval = hdev->le_conn_min_interval;
2252         params->conn_max_interval = hdev->le_conn_max_interval;
2253         params->conn_latency = hdev->le_conn_latency;
2254         params->supervision_timeout = hdev->le_supv_timeout;
2255         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2256
2257         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2258
2259         return params;
2260 }
2261
2262 static void hci_conn_params_free(struct hci_conn_params *params)
2263 {
2264         if (params->conn) {
2265                 hci_conn_drop(params->conn);
2266                 hci_conn_put(params->conn);
2267         }
2268
2269         list_del(&params->action);
2270         list_del(&params->list);
2271         kfree(params);
2272 }
2273
2274 /* This function requires the caller holds hdev->lock */
2275 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2276 {
2277         struct hci_conn_params *params;
2278
2279         params = hci_conn_params_lookup(hdev, addr, addr_type);
2280         if (!params)
2281                 return;
2282
2283         hci_conn_params_free(params);
2284
2285         hci_update_passive_scan(hdev);
2286
2287         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2288 }
2289
2290 /* This function requires the caller holds hdev->lock */
2291 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2292 {
2293         struct hci_conn_params *params, *tmp;
2294
2295         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2296                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2297                         continue;
2298
2299                 /* If trying to establish one time connection to disabled
2300                  * device, leave the params, but mark them as just once.
2301                  */
2302                 if (params->explicit_connect) {
2303                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2304                         continue;
2305                 }
2306
2307                 list_del(&params->list);
2308                 kfree(params);
2309         }
2310
2311         BT_DBG("All LE disabled connection parameters were removed");
2312 }
2313
2314 /* This function requires the caller holds hdev->lock */
2315 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2316 {
2317         struct hci_conn_params *params, *tmp;
2318
2319         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2320                 hci_conn_params_free(params);
2321
2322         BT_DBG("All LE connection parameters were removed");
2323 }
2324
2325 /* Copy the Identity Address of the controller.
2326  *
2327  * If the controller has a public BD_ADDR, then by default use that one.
2328  * If this is a LE only controller without a public address, default to
2329  * the static random address.
2330  *
2331  * For debugging purposes it is possible to force controllers with a
2332  * public address to use the static random address instead.
2333  *
2334  * In case BR/EDR has been disabled on a dual-mode controller and
2335  * userspace has configured a static address, then that address
2336  * becomes the identity address instead of the public BR/EDR address.
2337  */
2338 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2339                                u8 *bdaddr_type)
2340 {
2341         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2342             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2343             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2344              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2345                 bacpy(bdaddr, &hdev->static_addr);
2346                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2347         } else {
2348                 bacpy(bdaddr, &hdev->bdaddr);
2349                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2350         }
2351 }
2352
2353 static void hci_clear_wake_reason(struct hci_dev *hdev)
2354 {
2355         hci_dev_lock(hdev);
2356
2357         hdev->wake_reason = 0;
2358         bacpy(&hdev->wake_addr, BDADDR_ANY);
2359         hdev->wake_addr_type = 0;
2360
2361         hci_dev_unlock(hdev);
2362 }
2363
2364 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2365                                 void *data)
2366 {
2367         struct hci_dev *hdev =
2368                 container_of(nb, struct hci_dev, suspend_notifier);
2369         int ret = 0;
2370
2371         if (action == PM_SUSPEND_PREPARE)
2372                 ret = hci_suspend_dev(hdev);
2373         else if (action == PM_POST_SUSPEND)
2374                 ret = hci_resume_dev(hdev);
2375
2376         if (ret)
2377                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2378                            action, ret);
2379
2380         return NOTIFY_DONE;
2381 }
2382
2383 /* Alloc HCI device */
2384 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2385 {
2386         struct hci_dev *hdev;
2387         unsigned int alloc_size;
2388
2389         alloc_size = sizeof(*hdev);
2390         if (sizeof_priv) {
2391                 /* Fixme: May need ALIGN-ment? */
2392                 alloc_size += sizeof_priv;
2393         }
2394
2395         hdev = kzalloc(alloc_size, GFP_KERNEL);
2396         if (!hdev)
2397                 return NULL;
2398
2399         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2400         hdev->esco_type = (ESCO_HV1);
2401         hdev->link_mode = (HCI_LM_ACCEPT);
2402         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2403         hdev->io_capability = 0x03;     /* No Input No Output */
2404         hdev->manufacturer = 0xffff;    /* Default to internal use */
2405         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2406         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2407         hdev->adv_instance_cnt = 0;
2408         hdev->cur_adv_instance = 0x00;
2409         hdev->adv_instance_timeout = 0;
2410
2411         hdev->advmon_allowlist_duration = 300;
2412         hdev->advmon_no_filter_duration = 500;
2413         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
2414
2415         hdev->sniff_max_interval = 800;
2416         hdev->sniff_min_interval = 80;
2417
2418         hdev->le_adv_channel_map = 0x07;
2419         hdev->le_adv_min_interval = 0x0800;
2420         hdev->le_adv_max_interval = 0x0800;
2421         hdev->le_scan_interval = 0x0060;
2422         hdev->le_scan_window = 0x0030;
2423         hdev->le_scan_int_suspend = 0x0400;
2424         hdev->le_scan_window_suspend = 0x0012;
2425         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2426         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2427         hdev->le_scan_int_adv_monitor = 0x0060;
2428         hdev->le_scan_window_adv_monitor = 0x0030;
2429         hdev->le_scan_int_connect = 0x0060;
2430         hdev->le_scan_window_connect = 0x0060;
2431         hdev->le_conn_min_interval = 0x0018;
2432         hdev->le_conn_max_interval = 0x0028;
2433         hdev->le_conn_latency = 0x0000;
2434         hdev->le_supv_timeout = 0x002a;
2435         hdev->le_def_tx_len = 0x001b;
2436         hdev->le_def_tx_time = 0x0148;
2437         hdev->le_max_tx_len = 0x001b;
2438         hdev->le_max_tx_time = 0x0148;
2439         hdev->le_max_rx_len = 0x001b;
2440         hdev->le_max_rx_time = 0x0148;
2441         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2442         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2443         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2444         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2445         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2446         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2447         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2448         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2449         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2450
2451         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2452         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2453         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2454         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2455         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2456         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2457
2458         /* default 1.28 sec page scan */
2459         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2460         hdev->def_page_scan_int = 0x0800;
2461         hdev->def_page_scan_window = 0x0012;
2462
2463         mutex_init(&hdev->lock);
2464         mutex_init(&hdev->req_lock);
2465
2466         INIT_LIST_HEAD(&hdev->mgmt_pending);
2467         INIT_LIST_HEAD(&hdev->reject_list);
2468         INIT_LIST_HEAD(&hdev->accept_list);
2469         INIT_LIST_HEAD(&hdev->uuids);
2470         INIT_LIST_HEAD(&hdev->link_keys);
2471         INIT_LIST_HEAD(&hdev->long_term_keys);
2472         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2473         INIT_LIST_HEAD(&hdev->remote_oob_data);
2474         INIT_LIST_HEAD(&hdev->le_accept_list);
2475         INIT_LIST_HEAD(&hdev->le_resolv_list);
2476         INIT_LIST_HEAD(&hdev->le_conn_params);
2477         INIT_LIST_HEAD(&hdev->pend_le_conns);
2478         INIT_LIST_HEAD(&hdev->pend_le_reports);
2479         INIT_LIST_HEAD(&hdev->conn_hash.list);
2480         INIT_LIST_HEAD(&hdev->adv_instances);
2481         INIT_LIST_HEAD(&hdev->blocked_keys);
2482         INIT_LIST_HEAD(&hdev->monitored_devices);
2483
2484         INIT_LIST_HEAD(&hdev->local_codecs);
2485         INIT_WORK(&hdev->rx_work, hci_rx_work);
2486         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2487         INIT_WORK(&hdev->tx_work, hci_tx_work);
2488         INIT_WORK(&hdev->power_on, hci_power_on);
2489         INIT_WORK(&hdev->error_reset, hci_error_reset);
2490
2491         hci_cmd_sync_init(hdev);
2492
2493         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2494
2495         skb_queue_head_init(&hdev->rx_q);
2496         skb_queue_head_init(&hdev->cmd_q);
2497         skb_queue_head_init(&hdev->raw_q);
2498
2499         init_waitqueue_head(&hdev->req_wait_q);
2500
2501         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2502         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2503
2504         hci_request_setup(hdev);
2505
2506         hci_init_sysfs(hdev);
2507         discovery_init(hdev);
2508
2509         return hdev;
2510 }
2511 EXPORT_SYMBOL(hci_alloc_dev_priv);
2512
2513 /* Free HCI device */
2514 void hci_free_dev(struct hci_dev *hdev)
2515 {
2516         /* will free via device release */
2517         put_device(&hdev->dev);
2518 }
2519 EXPORT_SYMBOL(hci_free_dev);
2520
2521 /* Register HCI device */
2522 int hci_register_dev(struct hci_dev *hdev)
2523 {
2524         int id, error;
2525
2526         if (!hdev->open || !hdev->close || !hdev->send)
2527                 return -EINVAL;
2528
2529         /* Do not allow HCI_AMP devices to register at index 0,
2530          * so the index can be used as the AMP controller ID.
2531          */
2532         switch (hdev->dev_type) {
2533         case HCI_PRIMARY:
2534                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2535                 break;
2536         case HCI_AMP:
2537                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2538                 break;
2539         default:
2540                 return -EINVAL;
2541         }
2542
2543         if (id < 0)
2544                 return id;
2545
2546         snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2547         hdev->id = id;
2548
2549         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2550
2551         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2552         if (!hdev->workqueue) {
2553                 error = -ENOMEM;
2554                 goto err;
2555         }
2556
2557         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2558                                                       hdev->name);
2559         if (!hdev->req_workqueue) {
2560                 destroy_workqueue(hdev->workqueue);
2561                 error = -ENOMEM;
2562                 goto err;
2563         }
2564
2565         if (!IS_ERR_OR_NULL(bt_debugfs))
2566                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2567
2568         dev_set_name(&hdev->dev, "%s", hdev->name);
2569
2570         error = device_add(&hdev->dev);
2571         if (error < 0)
2572                 goto err_wqueue;
2573
2574         hci_leds_init(hdev);
2575
2576         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2577                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2578                                     hdev);
2579         if (hdev->rfkill) {
2580                 if (rfkill_register(hdev->rfkill) < 0) {
2581                         rfkill_destroy(hdev->rfkill);
2582                         hdev->rfkill = NULL;
2583                 }
2584         }
2585
2586         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2587                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2588
2589         hci_dev_set_flag(hdev, HCI_SETUP);
2590         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2591
2592         if (hdev->dev_type == HCI_PRIMARY) {
2593                 /* Assume BR/EDR support until proven otherwise (such as
2594                  * through reading supported features during init.
2595                  */
2596                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2597         }
2598
2599         write_lock(&hci_dev_list_lock);
2600         list_add(&hdev->list, &hci_dev_list);
2601         write_unlock(&hci_dev_list_lock);
2602
2603         /* Devices that are marked for raw-only usage are unconfigured
2604          * and should not be included in normal operation.
2605          */
2606         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2607                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2608
2609         /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2610          * callback.
2611          */
2612         if (hdev->wakeup)
2613                 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2614
2615         hci_sock_dev_event(hdev, HCI_DEV_REG);
2616         hci_dev_hold(hdev);
2617
2618         error = hci_register_suspend_notifier(hdev);
2619         if (error)
2620                 goto err_wqueue;
2621
2622         queue_work(hdev->req_workqueue, &hdev->power_on);
2623
2624         idr_init(&hdev->adv_monitors_idr);
2625         msft_register(hdev);
2626
2627         return id;
2628
2629 err_wqueue:
2630         debugfs_remove_recursive(hdev->debugfs);
2631         destroy_workqueue(hdev->workqueue);
2632         destroy_workqueue(hdev->req_workqueue);
2633 err:
2634         ida_simple_remove(&hci_index_ida, hdev->id);
2635
2636         return error;
2637 }
2638 EXPORT_SYMBOL(hci_register_dev);
2639
2640 /* Unregister HCI device */
2641 void hci_unregister_dev(struct hci_dev *hdev)
2642 {
2643         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2644
2645         hci_dev_set_flag(hdev, HCI_UNREGISTER);
2646
2647         write_lock(&hci_dev_list_lock);
2648         list_del(&hdev->list);
2649         write_unlock(&hci_dev_list_lock);
2650
2651         cancel_work_sync(&hdev->power_on);
2652
2653         hci_cmd_sync_clear(hdev);
2654
2655         hci_unregister_suspend_notifier(hdev);
2656
2657         msft_unregister(hdev);
2658
2659         hci_dev_do_close(hdev);
2660
2661         if (!test_bit(HCI_INIT, &hdev->flags) &&
2662             !hci_dev_test_flag(hdev, HCI_SETUP) &&
2663             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2664                 hci_dev_lock(hdev);
2665                 mgmt_index_removed(hdev);
2666                 hci_dev_unlock(hdev);
2667         }
2668
2669         /* mgmt_index_removed should take care of emptying the
2670          * pending list */
2671         BUG_ON(!list_empty(&hdev->mgmt_pending));
2672
2673         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2674
2675         if (hdev->rfkill) {
2676                 rfkill_unregister(hdev->rfkill);
2677                 rfkill_destroy(hdev->rfkill);
2678         }
2679
2680         device_del(&hdev->dev);
2681         /* Actual cleanup is deferred until hci_release_dev(). */
2682         hci_dev_put(hdev);
2683 }
2684 EXPORT_SYMBOL(hci_unregister_dev);
2685
2686 /* Release HCI device */
2687 void hci_release_dev(struct hci_dev *hdev)
2688 {
2689         debugfs_remove_recursive(hdev->debugfs);
2690         kfree_const(hdev->hw_info);
2691         kfree_const(hdev->fw_info);
2692
2693         destroy_workqueue(hdev->workqueue);
2694         destroy_workqueue(hdev->req_workqueue);
2695
2696         hci_dev_lock(hdev);
2697         hci_bdaddr_list_clear(&hdev->reject_list);
2698         hci_bdaddr_list_clear(&hdev->accept_list);
2699         hci_uuids_clear(hdev);
2700         hci_link_keys_clear(hdev);
2701         hci_smp_ltks_clear(hdev);
2702         hci_smp_irks_clear(hdev);
2703         hci_remote_oob_data_clear(hdev);
2704         hci_adv_instances_clear(hdev);
2705         hci_adv_monitors_clear(hdev);
2706         hci_bdaddr_list_clear(&hdev->le_accept_list);
2707         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2708         hci_conn_params_clear_all(hdev);
2709         hci_discovery_filter_clear(hdev);
2710         hci_blocked_keys_clear(hdev);
2711         hci_dev_unlock(hdev);
2712
2713         ida_simple_remove(&hci_index_ida, hdev->id);
2714         kfree_skb(hdev->sent_cmd);
2715         kfree(hdev);
2716 }
2717 EXPORT_SYMBOL(hci_release_dev);
2718
2719 int hci_register_suspend_notifier(struct hci_dev *hdev)
2720 {
2721         int ret = 0;
2722
2723         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2724                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2725                 ret = register_pm_notifier(&hdev->suspend_notifier);
2726         }
2727
2728         return ret;
2729 }
2730
2731 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2732 {
2733         int ret = 0;
2734
2735         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks))
2736                 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2737
2738         return ret;
2739 }
2740
2741 /* Suspend HCI device */
2742 int hci_suspend_dev(struct hci_dev *hdev)
2743 {
2744         int ret;
2745
2746         bt_dev_dbg(hdev, "");
2747
2748         /* Suspend should only act on when powered. */
2749         if (!hdev_is_powered(hdev) ||
2750             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2751                 return 0;
2752
2753         /* If powering down don't attempt to suspend */
2754         if (mgmt_powering_down(hdev))
2755                 return 0;
2756
2757         hci_req_sync_lock(hdev);
2758         ret = hci_suspend_sync(hdev);
2759         hci_req_sync_unlock(hdev);
2760
2761         hci_clear_wake_reason(hdev);
2762         mgmt_suspending(hdev, hdev->suspend_state);
2763
2764         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2765         return ret;
2766 }
2767 EXPORT_SYMBOL(hci_suspend_dev);
2768
2769 /* Resume HCI device */
2770 int hci_resume_dev(struct hci_dev *hdev)
2771 {
2772         int ret;
2773
2774         bt_dev_dbg(hdev, "");
2775
2776         /* Resume should only act on when powered. */
2777         if (!hdev_is_powered(hdev) ||
2778             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2779                 return 0;
2780
2781         /* If powering down don't attempt to resume */
2782         if (mgmt_powering_down(hdev))
2783                 return 0;
2784
2785         hci_req_sync_lock(hdev);
2786         ret = hci_resume_sync(hdev);
2787         hci_req_sync_unlock(hdev);
2788
2789         mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2790                       hdev->wake_addr_type);
2791
2792         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2793         return ret;
2794 }
2795 EXPORT_SYMBOL(hci_resume_dev);
2796
2797 /* Reset HCI device */
2798 int hci_reset_dev(struct hci_dev *hdev)
2799 {
2800         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2801         struct sk_buff *skb;
2802
2803         skb = bt_skb_alloc(3, GFP_ATOMIC);
2804         if (!skb)
2805                 return -ENOMEM;
2806
2807         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2808         skb_put_data(skb, hw_err, 3);
2809
2810         bt_dev_err(hdev, "Injecting HCI hardware error event");
2811
2812         /* Send Hardware Error to upper stack */
2813         return hci_recv_frame(hdev, skb);
2814 }
2815 EXPORT_SYMBOL(hci_reset_dev);
2816
2817 /* Receive frame from HCI drivers */
2818 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2819 {
2820         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2821                       && !test_bit(HCI_INIT, &hdev->flags))) {
2822                 kfree_skb(skb);
2823                 return -ENXIO;
2824         }
2825
2826         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
2827             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
2828             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
2829             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
2830                 kfree_skb(skb);
2831                 return -EINVAL;
2832         }
2833
2834         /* Incoming skb */
2835         bt_cb(skb)->incoming = 1;
2836
2837         /* Time stamp */
2838         __net_timestamp(skb);
2839
2840         skb_queue_tail(&hdev->rx_q, skb);
2841         queue_work(hdev->workqueue, &hdev->rx_work);
2842
2843         return 0;
2844 }
2845 EXPORT_SYMBOL(hci_recv_frame);
2846
2847 /* Receive diagnostic message from HCI drivers */
2848 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2849 {
2850         /* Mark as diagnostic packet */
2851         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2852
2853         /* Time stamp */
2854         __net_timestamp(skb);
2855
2856         skb_queue_tail(&hdev->rx_q, skb);
2857         queue_work(hdev->workqueue, &hdev->rx_work);
2858
2859         return 0;
2860 }
2861 EXPORT_SYMBOL(hci_recv_diag);
2862
2863 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2864 {
2865         va_list vargs;
2866
2867         va_start(vargs, fmt);
2868         kfree_const(hdev->hw_info);
2869         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2870         va_end(vargs);
2871 }
2872 EXPORT_SYMBOL(hci_set_hw_info);
2873
2874 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2875 {
2876         va_list vargs;
2877
2878         va_start(vargs, fmt);
2879         kfree_const(hdev->fw_info);
2880         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2881         va_end(vargs);
2882 }
2883 EXPORT_SYMBOL(hci_set_fw_info);
2884
2885 /* ---- Interface to upper protocols ---- */
2886
2887 int hci_register_cb(struct hci_cb *cb)
2888 {
2889         BT_DBG("%p name %s", cb, cb->name);
2890
2891         mutex_lock(&hci_cb_list_lock);
2892         list_add_tail(&cb->list, &hci_cb_list);
2893         mutex_unlock(&hci_cb_list_lock);
2894
2895         return 0;
2896 }
2897 EXPORT_SYMBOL(hci_register_cb);
2898
2899 int hci_unregister_cb(struct hci_cb *cb)
2900 {
2901         BT_DBG("%p name %s", cb, cb->name);
2902
2903         mutex_lock(&hci_cb_list_lock);
2904         list_del(&cb->list);
2905         mutex_unlock(&hci_cb_list_lock);
2906
2907         return 0;
2908 }
2909 EXPORT_SYMBOL(hci_unregister_cb);
2910
2911 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2912 {
2913         int err;
2914
2915         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2916                skb->len);
2917
2918         /* Time stamp */
2919         __net_timestamp(skb);
2920
2921         /* Send copy to monitor */
2922         hci_send_to_monitor(hdev, skb);
2923
2924         if (atomic_read(&hdev->promisc)) {
2925                 /* Send copy to the sockets */
2926                 hci_send_to_sock(hdev, skb);
2927         }
2928
2929         /* Get rid of skb owner, prior to sending to the driver. */
2930         skb_orphan(skb);
2931
2932         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
2933                 kfree_skb(skb);
2934                 return -EINVAL;
2935         }
2936
2937         err = hdev->send(hdev, skb);
2938         if (err < 0) {
2939                 bt_dev_err(hdev, "sending frame failed (%d)", err);
2940                 kfree_skb(skb);
2941                 return err;
2942         }
2943
2944         return 0;
2945 }
2946
2947 /* Send HCI command */
2948 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2949                  const void *param)
2950 {
2951         struct sk_buff *skb;
2952
2953         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2954
2955         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2956         if (!skb) {
2957                 bt_dev_err(hdev, "no memory for command");
2958                 return -ENOMEM;
2959         }
2960
2961         /* Stand-alone HCI commands must be flagged as
2962          * single-command requests.
2963          */
2964         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
2965
2966         skb_queue_tail(&hdev->cmd_q, skb);
2967         queue_work(hdev->workqueue, &hdev->cmd_work);
2968
2969         return 0;
2970 }
2971
2972 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
2973                    const void *param)
2974 {
2975         struct sk_buff *skb;
2976
2977         if (hci_opcode_ogf(opcode) != 0x3f) {
2978                 /* A controller receiving a command shall respond with either
2979                  * a Command Status Event or a Command Complete Event.
2980                  * Therefore, all standard HCI commands must be sent via the
2981                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
2982                  * Some vendors do not comply with this rule for vendor-specific
2983                  * commands and do not return any event. We want to support
2984                  * unresponded commands for such cases only.
2985                  */
2986                 bt_dev_err(hdev, "unresponded command not supported");
2987                 return -EINVAL;
2988         }
2989
2990         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2991         if (!skb) {
2992                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
2993                            opcode);
2994                 return -ENOMEM;
2995         }
2996
2997         hci_send_frame(hdev, skb);
2998
2999         return 0;
3000 }
3001 EXPORT_SYMBOL(__hci_cmd_send);
3002
3003 /* Get data from the previously sent command */
3004 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3005 {
3006         struct hci_command_hdr *hdr;
3007
3008         if (!hdev->sent_cmd)
3009                 return NULL;
3010
3011         hdr = (void *) hdev->sent_cmd->data;
3012
3013         if (hdr->opcode != cpu_to_le16(opcode))
3014                 return NULL;
3015
3016         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3017
3018         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3019 }
3020
3021 /* Send ACL data */
3022 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3023 {
3024         struct hci_acl_hdr *hdr;
3025         int len = skb->len;
3026
3027         skb_push(skb, HCI_ACL_HDR_SIZE);
3028         skb_reset_transport_header(skb);
3029         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3030         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3031         hdr->dlen   = cpu_to_le16(len);
3032 }
3033
3034 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3035                           struct sk_buff *skb, __u16 flags)
3036 {
3037         struct hci_conn *conn = chan->conn;
3038         struct hci_dev *hdev = conn->hdev;
3039         struct sk_buff *list;
3040
3041         skb->len = skb_headlen(skb);
3042         skb->data_len = 0;
3043
3044         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3045
3046         switch (hdev->dev_type) {
3047         case HCI_PRIMARY:
3048                 hci_add_acl_hdr(skb, conn->handle, flags);
3049                 break;
3050         case HCI_AMP:
3051                 hci_add_acl_hdr(skb, chan->handle, flags);
3052                 break;
3053         default:
3054                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3055                 return;
3056         }
3057
3058         list = skb_shinfo(skb)->frag_list;
3059         if (!list) {
3060                 /* Non fragmented */
3061                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3062
3063                 skb_queue_tail(queue, skb);
3064         } else {
3065                 /* Fragmented */
3066                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3067
3068                 skb_shinfo(skb)->frag_list = NULL;
3069
3070                 /* Queue all fragments atomically. We need to use spin_lock_bh
3071                  * here because of 6LoWPAN links, as there this function is
3072                  * called from softirq and using normal spin lock could cause
3073                  * deadlocks.
3074                  */
3075                 spin_lock_bh(&queue->lock);
3076
3077                 __skb_queue_tail(queue, skb);
3078
3079                 flags &= ~ACL_START;
3080                 flags |= ACL_CONT;
3081                 do {
3082                         skb = list; list = list->next;
3083
3084                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3085                         hci_add_acl_hdr(skb, conn->handle, flags);
3086
3087                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3088
3089                         __skb_queue_tail(queue, skb);
3090                 } while (list);
3091
3092                 spin_unlock_bh(&queue->lock);
3093         }
3094 }
3095
3096 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3097 {
3098         struct hci_dev *hdev = chan->conn->hdev;
3099
3100         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3101
3102         hci_queue_acl(chan, &chan->data_q, skb, flags);
3103
3104         queue_work(hdev->workqueue, &hdev->tx_work);
3105 }
3106
3107 /* Send SCO data */
3108 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3109 {
3110         struct hci_dev *hdev = conn->hdev;
3111         struct hci_sco_hdr hdr;
3112
3113         BT_DBG("%s len %d", hdev->name, skb->len);
3114
3115         hdr.handle = cpu_to_le16(conn->handle);
3116         hdr.dlen   = skb->len;
3117
3118         skb_push(skb, HCI_SCO_HDR_SIZE);
3119         skb_reset_transport_header(skb);
3120         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3121
3122         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3123
3124         skb_queue_tail(&conn->data_q, skb);
3125         queue_work(hdev->workqueue, &hdev->tx_work);
3126 }
3127
3128 /* ---- HCI TX task (outgoing data) ---- */
3129
3130 /* HCI Connection scheduler */
3131 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3132                                      int *quote)
3133 {
3134         struct hci_conn_hash *h = &hdev->conn_hash;
3135         struct hci_conn *conn = NULL, *c;
3136         unsigned int num = 0, min = ~0;
3137
3138         /* We don't have to lock device here. Connections are always
3139          * added and removed with TX task disabled. */
3140
3141         rcu_read_lock();
3142
3143         list_for_each_entry_rcu(c, &h->list, list) {
3144                 if (c->type != type || skb_queue_empty(&c->data_q))
3145                         continue;
3146
3147                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3148                         continue;
3149
3150                 num++;
3151
3152                 if (c->sent < min) {
3153                         min  = c->sent;
3154                         conn = c;
3155                 }
3156
3157                 if (hci_conn_num(hdev, type) == num)
3158                         break;
3159         }
3160
3161         rcu_read_unlock();
3162
3163         if (conn) {
3164                 int cnt, q;
3165
3166                 switch (conn->type) {
3167                 case ACL_LINK:
3168                         cnt = hdev->acl_cnt;
3169                         break;
3170                 case SCO_LINK:
3171                 case ESCO_LINK:
3172                         cnt = hdev->sco_cnt;
3173                         break;
3174                 case LE_LINK:
3175                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3176                         break;
3177                 default:
3178                         cnt = 0;
3179                         bt_dev_err(hdev, "unknown link type %d", conn->type);
3180                 }
3181
3182                 q = cnt / num;
3183                 *quote = q ? q : 1;
3184         } else
3185                 *quote = 0;
3186
3187         BT_DBG("conn %p quote %d", conn, *quote);
3188         return conn;
3189 }
3190
3191 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3192 {
3193         struct hci_conn_hash *h = &hdev->conn_hash;
3194         struct hci_conn *c;
3195
3196         bt_dev_err(hdev, "link tx timeout");
3197
3198         rcu_read_lock();
3199
3200         /* Kill stalled connections */
3201         list_for_each_entry_rcu(c, &h->list, list) {
3202                 if (c->type == type && c->sent) {
3203                         bt_dev_err(hdev, "killing stalled connection %pMR",
3204                                    &c->dst);
3205                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3206                 }
3207         }
3208
3209         rcu_read_unlock();
3210 }
3211
3212 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3213                                       int *quote)
3214 {
3215         struct hci_conn_hash *h = &hdev->conn_hash;
3216         struct hci_chan *chan = NULL;
3217         unsigned int num = 0, min = ~0, cur_prio = 0;
3218         struct hci_conn *conn;
3219         int cnt, q, conn_num = 0;
3220
3221         BT_DBG("%s", hdev->name);
3222
3223         rcu_read_lock();
3224
3225         list_for_each_entry_rcu(conn, &h->list, list) {
3226                 struct hci_chan *tmp;
3227
3228                 if (conn->type != type)
3229                         continue;
3230
3231                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3232                         continue;
3233
3234                 conn_num++;
3235
3236                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3237                         struct sk_buff *skb;
3238
3239                         if (skb_queue_empty(&tmp->data_q))
3240                                 continue;
3241
3242                         skb = skb_peek(&tmp->data_q);
3243                         if (skb->priority < cur_prio)
3244                                 continue;
3245
3246                         if (skb->priority > cur_prio) {
3247                                 num = 0;
3248                                 min = ~0;
3249                                 cur_prio = skb->priority;
3250                         }
3251
3252                         num++;
3253
3254                         if (conn->sent < min) {
3255                                 min  = conn->sent;
3256                                 chan = tmp;
3257                         }
3258                 }
3259
3260                 if (hci_conn_num(hdev, type) == conn_num)
3261                         break;
3262         }
3263
3264         rcu_read_unlock();
3265
3266         if (!chan)
3267                 return NULL;
3268
3269         switch (chan->conn->type) {
3270         case ACL_LINK:
3271                 cnt = hdev->acl_cnt;
3272                 break;
3273         case AMP_LINK:
3274                 cnt = hdev->block_cnt;
3275                 break;
3276         case SCO_LINK:
3277         case ESCO_LINK:
3278                 cnt = hdev->sco_cnt;
3279                 break;
3280         case LE_LINK:
3281                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3282                 break;
3283         default:
3284                 cnt = 0;
3285                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3286         }
3287
3288         q = cnt / num;
3289         *quote = q ? q : 1;
3290         BT_DBG("chan %p quote %d", chan, *quote);
3291         return chan;
3292 }
3293
3294 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3295 {
3296         struct hci_conn_hash *h = &hdev->conn_hash;
3297         struct hci_conn *conn;
3298         int num = 0;
3299
3300         BT_DBG("%s", hdev->name);
3301
3302         rcu_read_lock();
3303
3304         list_for_each_entry_rcu(conn, &h->list, list) {
3305                 struct hci_chan *chan;
3306
3307                 if (conn->type != type)
3308                         continue;
3309
3310                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3311                         continue;
3312
3313                 num++;
3314
3315                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3316                         struct sk_buff *skb;
3317
3318                         if (chan->sent) {
3319                                 chan->sent = 0;
3320                                 continue;
3321                         }
3322
3323                         if (skb_queue_empty(&chan->data_q))
3324                                 continue;
3325
3326                         skb = skb_peek(&chan->data_q);
3327                         if (skb->priority >= HCI_PRIO_MAX - 1)
3328                                 continue;
3329
3330                         skb->priority = HCI_PRIO_MAX - 1;
3331
3332                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3333                                skb->priority);
3334                 }
3335
3336                 if (hci_conn_num(hdev, type) == num)
3337                         break;
3338         }
3339
3340         rcu_read_unlock();
3341
3342 }
3343
3344 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3345 {
3346         /* Calculate count of blocks used by this packet */
3347         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3348 }
3349
3350 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3351 {
3352         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3353                 /* ACL tx timeout must be longer than maximum
3354                  * link supervision timeout (40.9 seconds) */
3355                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3356                                        HCI_ACL_TX_TIMEOUT))
3357                         hci_link_tx_to(hdev, ACL_LINK);
3358         }
3359 }
3360
3361 /* Schedule SCO */
3362 static void hci_sched_sco(struct hci_dev *hdev)
3363 {
3364         struct hci_conn *conn;
3365         struct sk_buff *skb;
3366         int quote;
3367
3368         BT_DBG("%s", hdev->name);
3369
3370         if (!hci_conn_num(hdev, SCO_LINK))
3371                 return;
3372
3373         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3374                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3375                         BT_DBG("skb %p len %d", skb, skb->len);
3376                         hci_send_frame(hdev, skb);
3377
3378                         conn->sent++;
3379                         if (conn->sent == ~0)
3380                                 conn->sent = 0;
3381                 }
3382         }
3383 }
3384
3385 static void hci_sched_esco(struct hci_dev *hdev)
3386 {
3387         struct hci_conn *conn;
3388         struct sk_buff *skb;
3389         int quote;
3390
3391         BT_DBG("%s", hdev->name);
3392
3393         if (!hci_conn_num(hdev, ESCO_LINK))
3394                 return;
3395
3396         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3397                                                      &quote))) {
3398                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3399                         BT_DBG("skb %p len %d", skb, skb->len);
3400                         hci_send_frame(hdev, skb);
3401
3402                         conn->sent++;
3403                         if (conn->sent == ~0)
3404                                 conn->sent = 0;
3405                 }
3406         }
3407 }
3408
3409 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3410 {
3411         unsigned int cnt = hdev->acl_cnt;
3412         struct hci_chan *chan;
3413         struct sk_buff *skb;
3414         int quote;
3415
3416         __check_timeout(hdev, cnt);
3417
3418         while (hdev->acl_cnt &&
3419                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3420                 u32 priority = (skb_peek(&chan->data_q))->priority;
3421                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3422                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3423                                skb->len, skb->priority);
3424
3425                         /* Stop if priority has changed */
3426                         if (skb->priority < priority)
3427                                 break;
3428
3429                         skb = skb_dequeue(&chan->data_q);
3430
3431                         hci_conn_enter_active_mode(chan->conn,
3432                                                    bt_cb(skb)->force_active);
3433
3434                         hci_send_frame(hdev, skb);
3435                         hdev->acl_last_tx = jiffies;
3436
3437                         hdev->acl_cnt--;
3438                         chan->sent++;
3439                         chan->conn->sent++;
3440
3441                         /* Send pending SCO packets right away */
3442                         hci_sched_sco(hdev);
3443                         hci_sched_esco(hdev);
3444                 }
3445         }
3446
3447         if (cnt != hdev->acl_cnt)
3448                 hci_prio_recalculate(hdev, ACL_LINK);
3449 }
3450
3451 static void hci_sched_acl_blk(struct hci_dev *hdev)
3452 {
3453         unsigned int cnt = hdev->block_cnt;
3454         struct hci_chan *chan;
3455         struct sk_buff *skb;
3456         int quote;
3457         u8 type;
3458
3459         __check_timeout(hdev, cnt);
3460
3461         BT_DBG("%s", hdev->name);
3462
3463         if (hdev->dev_type == HCI_AMP)
3464                 type = AMP_LINK;
3465         else
3466                 type = ACL_LINK;
3467
3468         while (hdev->block_cnt > 0 &&
3469                (chan = hci_chan_sent(hdev, type, &quote))) {
3470                 u32 priority = (skb_peek(&chan->data_q))->priority;
3471                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3472                         int blocks;
3473
3474                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3475                                skb->len, skb->priority);
3476
3477                         /* Stop if priority has changed */
3478                         if (skb->priority < priority)
3479                                 break;
3480
3481                         skb = skb_dequeue(&chan->data_q);
3482
3483                         blocks = __get_blocks(hdev, skb);
3484                         if (blocks > hdev->block_cnt)
3485                                 return;
3486
3487                         hci_conn_enter_active_mode(chan->conn,
3488                                                    bt_cb(skb)->force_active);
3489
3490                         hci_send_frame(hdev, skb);
3491                         hdev->acl_last_tx = jiffies;
3492
3493                         hdev->block_cnt -= blocks;
3494                         quote -= blocks;
3495
3496                         chan->sent += blocks;
3497                         chan->conn->sent += blocks;
3498                 }
3499         }
3500
3501         if (cnt != hdev->block_cnt)
3502                 hci_prio_recalculate(hdev, type);
3503 }
3504
3505 static void hci_sched_acl(struct hci_dev *hdev)
3506 {
3507         BT_DBG("%s", hdev->name);
3508
3509         /* No ACL link over BR/EDR controller */
3510         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3511                 return;
3512
3513         /* No AMP link over AMP controller */
3514         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3515                 return;
3516
3517         switch (hdev->flow_ctl_mode) {
3518         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3519                 hci_sched_acl_pkt(hdev);
3520                 break;
3521
3522         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3523                 hci_sched_acl_blk(hdev);
3524                 break;
3525         }
3526 }
3527
3528 static void hci_sched_le(struct hci_dev *hdev)
3529 {
3530         struct hci_chan *chan;
3531         struct sk_buff *skb;
3532         int quote, cnt, tmp;
3533
3534         BT_DBG("%s", hdev->name);
3535
3536         if (!hci_conn_num(hdev, LE_LINK))
3537                 return;
3538
3539         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3540
3541         __check_timeout(hdev, cnt);
3542
3543         tmp = cnt;
3544         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3545                 u32 priority = (skb_peek(&chan->data_q))->priority;
3546                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3547                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3548                                skb->len, skb->priority);
3549
3550                         /* Stop if priority has changed */
3551                         if (skb->priority < priority)
3552                                 break;
3553
3554                         skb = skb_dequeue(&chan->data_q);
3555
3556                         hci_send_frame(hdev, skb);
3557                         hdev->le_last_tx = jiffies;
3558
3559                         cnt--;
3560                         chan->sent++;
3561                         chan->conn->sent++;
3562
3563                         /* Send pending SCO packets right away */
3564                         hci_sched_sco(hdev);
3565                         hci_sched_esco(hdev);
3566                 }
3567         }
3568
3569         if (hdev->le_pkts)
3570                 hdev->le_cnt = cnt;
3571         else
3572                 hdev->acl_cnt = cnt;
3573
3574         if (cnt != tmp)
3575                 hci_prio_recalculate(hdev, LE_LINK);
3576 }
3577
3578 static void hci_tx_work(struct work_struct *work)
3579 {
3580         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3581         struct sk_buff *skb;
3582
3583         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3584                hdev->sco_cnt, hdev->le_cnt);
3585
3586         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3587                 /* Schedule queues and send stuff to HCI driver */
3588                 hci_sched_sco(hdev);
3589                 hci_sched_esco(hdev);
3590                 hci_sched_acl(hdev);
3591                 hci_sched_le(hdev);
3592         }
3593
3594         /* Send next queued raw (unknown type) packet */
3595         while ((skb = skb_dequeue(&hdev->raw_q)))
3596                 hci_send_frame(hdev, skb);
3597 }
3598
3599 /* ----- HCI RX task (incoming data processing) ----- */
3600
3601 /* ACL data packet */
3602 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3603 {
3604         struct hci_acl_hdr *hdr = (void *) skb->data;
3605         struct hci_conn *conn;
3606         __u16 handle, flags;
3607
3608         skb_pull(skb, HCI_ACL_HDR_SIZE);
3609
3610         handle = __le16_to_cpu(hdr->handle);
3611         flags  = hci_flags(handle);
3612         handle = hci_handle(handle);
3613
3614         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3615                handle, flags);
3616
3617         hdev->stat.acl_rx++;
3618
3619         hci_dev_lock(hdev);
3620         conn = hci_conn_hash_lookup_handle(hdev, handle);
3621         hci_dev_unlock(hdev);
3622
3623         if (conn) {
3624                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3625
3626                 /* Send to upper protocol */
3627                 l2cap_recv_acldata(conn, skb, flags);
3628                 return;
3629         } else {
3630                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3631                            handle);
3632         }
3633
3634         kfree_skb(skb);
3635 }
3636
3637 /* SCO data packet */
3638 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3639 {
3640         struct hci_sco_hdr *hdr = (void *) skb->data;
3641         struct hci_conn *conn;
3642         __u16 handle, flags;
3643
3644         skb_pull(skb, HCI_SCO_HDR_SIZE);
3645
3646         handle = __le16_to_cpu(hdr->handle);
3647         flags  = hci_flags(handle);
3648         handle = hci_handle(handle);
3649
3650         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3651                handle, flags);
3652
3653         hdev->stat.sco_rx++;
3654
3655         hci_dev_lock(hdev);
3656         conn = hci_conn_hash_lookup_handle(hdev, handle);
3657         hci_dev_unlock(hdev);
3658
3659         if (conn) {
3660                 /* Send to upper protocol */
3661                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3662                 sco_recv_scodata(conn, skb);
3663                 return;
3664         } else {
3665                 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3666                                        handle);
3667         }
3668
3669         kfree_skb(skb);
3670 }
3671
3672 static bool hci_req_is_complete(struct hci_dev *hdev)
3673 {
3674         struct sk_buff *skb;
3675
3676         skb = skb_peek(&hdev->cmd_q);
3677         if (!skb)
3678                 return true;
3679
3680         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3681 }
3682
3683 static void hci_resend_last(struct hci_dev *hdev)
3684 {
3685         struct hci_command_hdr *sent;
3686         struct sk_buff *skb;
3687         u16 opcode;
3688
3689         if (!hdev->sent_cmd)
3690                 return;
3691
3692         sent = (void *) hdev->sent_cmd->data;
3693         opcode = __le16_to_cpu(sent->opcode);
3694         if (opcode == HCI_OP_RESET)
3695                 return;
3696
3697         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3698         if (!skb)
3699                 return;
3700
3701         skb_queue_head(&hdev->cmd_q, skb);
3702         queue_work(hdev->workqueue, &hdev->cmd_work);
3703 }
3704
3705 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3706                           hci_req_complete_t *req_complete,
3707                           hci_req_complete_skb_t *req_complete_skb)
3708 {
3709         struct sk_buff *skb;
3710         unsigned long flags;
3711
3712         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3713
3714         /* If the completed command doesn't match the last one that was
3715          * sent we need to do special handling of it.
3716          */
3717         if (!hci_sent_cmd_data(hdev, opcode)) {
3718                 /* Some CSR based controllers generate a spontaneous
3719                  * reset complete event during init and any pending
3720                  * command will never be completed. In such a case we
3721                  * need to resend whatever was the last sent
3722                  * command.
3723                  */
3724                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3725                         hci_resend_last(hdev);
3726
3727                 return;
3728         }
3729
3730         /* If we reach this point this event matches the last command sent */
3731         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3732
3733         /* If the command succeeded and there's still more commands in
3734          * this request the request is not yet complete.
3735          */
3736         if (!status && !hci_req_is_complete(hdev))
3737                 return;
3738
3739         /* If this was the last command in a request the complete
3740          * callback would be found in hdev->sent_cmd instead of the
3741          * command queue (hdev->cmd_q).
3742          */
3743         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
3744                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
3745                 return;
3746         }
3747
3748         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
3749                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
3750                 return;
3751         }
3752
3753         /* Remove all pending commands belonging to this request */
3754         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3755         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3756                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3757                         __skb_queue_head(&hdev->cmd_q, skb);
3758                         break;
3759                 }
3760
3761                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3762                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3763                 else
3764                         *req_complete = bt_cb(skb)->hci.req_complete;
3765                 kfree_skb(skb);
3766         }
3767         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3768 }
3769
3770 static void hci_rx_work(struct work_struct *work)
3771 {
3772         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3773         struct sk_buff *skb;
3774
3775         BT_DBG("%s", hdev->name);
3776
3777         /* The kcov_remote functions used for collecting packet parsing
3778          * coverage information from this background thread and associate
3779          * the coverage with the syscall's thread which originally injected
3780          * the packet. This helps fuzzing the kernel.
3781          */
3782         for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
3783                 kcov_remote_start_common(skb_get_kcov_handle(skb));
3784
3785                 /* Send copy to monitor */
3786                 hci_send_to_monitor(hdev, skb);
3787
3788                 if (atomic_read(&hdev->promisc)) {
3789                         /* Send copy to the sockets */
3790                         hci_send_to_sock(hdev, skb);
3791                 }
3792
3793                 /* If the device has been opened in HCI_USER_CHANNEL,
3794                  * the userspace has exclusive access to device.
3795                  * When device is HCI_INIT, we still need to process
3796                  * the data packets to the driver in order
3797                  * to complete its setup().
3798                  */
3799                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3800                     !test_bit(HCI_INIT, &hdev->flags)) {
3801                         kfree_skb(skb);
3802                         continue;
3803                 }
3804
3805                 if (test_bit(HCI_INIT, &hdev->flags)) {
3806                         /* Don't process data packets in this states. */
3807                         switch (hci_skb_pkt_type(skb)) {
3808                         case HCI_ACLDATA_PKT:
3809                         case HCI_SCODATA_PKT:
3810                         case HCI_ISODATA_PKT:
3811                                 kfree_skb(skb);
3812                                 continue;
3813                         }
3814                 }
3815
3816                 /* Process frame */
3817                 switch (hci_skb_pkt_type(skb)) {
3818                 case HCI_EVENT_PKT:
3819                         BT_DBG("%s Event packet", hdev->name);
3820                         hci_event_packet(hdev, skb);
3821                         break;
3822
3823                 case HCI_ACLDATA_PKT:
3824                         BT_DBG("%s ACL data packet", hdev->name);
3825                         hci_acldata_packet(hdev, skb);
3826                         break;
3827
3828                 case HCI_SCODATA_PKT:
3829                         BT_DBG("%s SCO data packet", hdev->name);
3830                         hci_scodata_packet(hdev, skb);
3831                         break;
3832
3833                 default:
3834                         kfree_skb(skb);
3835                         break;
3836                 }
3837         }
3838 }
3839
3840 static void hci_cmd_work(struct work_struct *work)
3841 {
3842         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3843         struct sk_buff *skb;
3844
3845         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3846                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3847
3848         /* Send queued commands */
3849         if (atomic_read(&hdev->cmd_cnt)) {
3850                 skb = skb_dequeue(&hdev->cmd_q);
3851                 if (!skb)
3852                         return;
3853
3854                 kfree_skb(hdev->sent_cmd);
3855
3856                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3857                 if (hdev->sent_cmd) {
3858                         int res;
3859                         if (hci_req_status_pend(hdev))
3860                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
3861                         atomic_dec(&hdev->cmd_cnt);
3862
3863                         res = hci_send_frame(hdev, skb);
3864                         if (res < 0)
3865                                 __hci_cmd_sync_cancel(hdev, -res);
3866
3867                         if (test_bit(HCI_RESET, &hdev->flags) ||
3868                             hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3869                                 cancel_delayed_work(&hdev->cmd_timer);
3870                         else
3871                                 schedule_delayed_work(&hdev->cmd_timer,
3872                                                       HCI_CMD_TIMEOUT);
3873                 } else {
3874                         skb_queue_head(&hdev->cmd_q, skb);
3875                         queue_work(hdev->workqueue, &hdev->cmd_work);
3876                 }
3877         }
3878 }