Linux 6.10-rc3
[linux-block.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65
66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
67 {
68         __u8 scan = opt;
69
70         BT_DBG("%s %x", req->hdev->name, scan);
71
72         /* Inquiry and Page scans */
73         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
74         return 0;
75 }
76
77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
78 {
79         __u8 auth = opt;
80
81         BT_DBG("%s %x", req->hdev->name, auth);
82
83         /* Authentication */
84         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
85         return 0;
86 }
87
88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
89 {
90         __u8 encrypt = opt;
91
92         BT_DBG("%s %x", req->hdev->name, encrypt);
93
94         /* Encryption */
95         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
96         return 0;
97 }
98
99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
100 {
101         __le16 policy = cpu_to_le16(opt);
102
103         BT_DBG("%s %x", req->hdev->name, policy);
104
105         /* Default link policy */
106         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
107         return 0;
108 }
109
110 /* Get HCI device by index.
111  * Device is held on return. */
112 struct hci_dev *hci_dev_get(int index)
113 {
114         struct hci_dev *hdev = NULL, *d;
115
116         BT_DBG("%d", index);
117
118         if (index < 0)
119                 return NULL;
120
121         read_lock(&hci_dev_list_lock);
122         list_for_each_entry(d, &hci_dev_list, list) {
123                 if (d->id == index) {
124                         hdev = hci_dev_hold(d);
125                         break;
126                 }
127         }
128         read_unlock(&hci_dev_list_lock);
129         return hdev;
130 }
131
132 /* ---- Inquiry support ---- */
133
134 bool hci_discovery_active(struct hci_dev *hdev)
135 {
136         struct discovery_state *discov = &hdev->discovery;
137
138         switch (discov->state) {
139         case DISCOVERY_FINDING:
140         case DISCOVERY_RESOLVING:
141                 return true;
142
143         default:
144                 return false;
145         }
146 }
147
148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
149 {
150         int old_state = hdev->discovery.state;
151
152         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
153
154         if (old_state == state)
155                 return;
156
157         hdev->discovery.state = state;
158
159         switch (state) {
160         case DISCOVERY_STOPPED:
161                 hci_update_passive_scan(hdev);
162
163                 if (old_state != DISCOVERY_STARTING)
164                         mgmt_discovering(hdev, 0);
165                 break;
166         case DISCOVERY_STARTING:
167                 break;
168         case DISCOVERY_FINDING:
169                 mgmt_discovering(hdev, 1);
170                 break;
171         case DISCOVERY_RESOLVING:
172                 break;
173         case DISCOVERY_STOPPING:
174                 break;
175         }
176 }
177
178 void hci_inquiry_cache_flush(struct hci_dev *hdev)
179 {
180         struct discovery_state *cache = &hdev->discovery;
181         struct inquiry_entry *p, *n;
182
183         list_for_each_entry_safe(p, n, &cache->all, all) {
184                 list_del(&p->all);
185                 kfree(p);
186         }
187
188         INIT_LIST_HEAD(&cache->unknown);
189         INIT_LIST_HEAD(&cache->resolve);
190 }
191
192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
193                                                bdaddr_t *bdaddr)
194 {
195         struct discovery_state *cache = &hdev->discovery;
196         struct inquiry_entry *e;
197
198         BT_DBG("cache %p, %pMR", cache, bdaddr);
199
200         list_for_each_entry(e, &cache->all, all) {
201                 if (!bacmp(&e->data.bdaddr, bdaddr))
202                         return e;
203         }
204
205         return NULL;
206 }
207
208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
209                                                        bdaddr_t *bdaddr)
210 {
211         struct discovery_state *cache = &hdev->discovery;
212         struct inquiry_entry *e;
213
214         BT_DBG("cache %p, %pMR", cache, bdaddr);
215
216         list_for_each_entry(e, &cache->unknown, list) {
217                 if (!bacmp(&e->data.bdaddr, bdaddr))
218                         return e;
219         }
220
221         return NULL;
222 }
223
224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
225                                                        bdaddr_t *bdaddr,
226                                                        int state)
227 {
228         struct discovery_state *cache = &hdev->discovery;
229         struct inquiry_entry *e;
230
231         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
232
233         list_for_each_entry(e, &cache->resolve, list) {
234                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
235                         return e;
236                 if (!bacmp(&e->data.bdaddr, bdaddr))
237                         return e;
238         }
239
240         return NULL;
241 }
242
243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
244                                       struct inquiry_entry *ie)
245 {
246         struct discovery_state *cache = &hdev->discovery;
247         struct list_head *pos = &cache->resolve;
248         struct inquiry_entry *p;
249
250         list_del(&ie->list);
251
252         list_for_each_entry(p, &cache->resolve, list) {
253                 if (p->name_state != NAME_PENDING &&
254                     abs(p->data.rssi) >= abs(ie->data.rssi))
255                         break;
256                 pos = &p->list;
257         }
258
259         list_add(&ie->list, pos);
260 }
261
262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
263                              bool name_known)
264 {
265         struct discovery_state *cache = &hdev->discovery;
266         struct inquiry_entry *ie;
267         u32 flags = 0;
268
269         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
270
271         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
272
273         if (!data->ssp_mode)
274                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
275
276         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
277         if (ie) {
278                 if (!ie->data.ssp_mode)
279                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
280
281                 if (ie->name_state == NAME_NEEDED &&
282                     data->rssi != ie->data.rssi) {
283                         ie->data.rssi = data->rssi;
284                         hci_inquiry_cache_update_resolve(hdev, ie);
285                 }
286
287                 goto update;
288         }
289
290         /* Entry not in the cache. Add new one. */
291         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
292         if (!ie) {
293                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
294                 goto done;
295         }
296
297         list_add(&ie->all, &cache->all);
298
299         if (name_known) {
300                 ie->name_state = NAME_KNOWN;
301         } else {
302                 ie->name_state = NAME_NOT_KNOWN;
303                 list_add(&ie->list, &cache->unknown);
304         }
305
306 update:
307         if (name_known && ie->name_state != NAME_KNOWN &&
308             ie->name_state != NAME_PENDING) {
309                 ie->name_state = NAME_KNOWN;
310                 list_del(&ie->list);
311         }
312
313         memcpy(&ie->data, data, sizeof(*data));
314         ie->timestamp = jiffies;
315         cache->timestamp = jiffies;
316
317         if (ie->name_state == NAME_NOT_KNOWN)
318                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
319
320 done:
321         return flags;
322 }
323
324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
325 {
326         struct discovery_state *cache = &hdev->discovery;
327         struct inquiry_info *info = (struct inquiry_info *) buf;
328         struct inquiry_entry *e;
329         int copied = 0;
330
331         list_for_each_entry(e, &cache->all, all) {
332                 struct inquiry_data *data = &e->data;
333
334                 if (copied >= num)
335                         break;
336
337                 bacpy(&info->bdaddr, &data->bdaddr);
338                 info->pscan_rep_mode    = data->pscan_rep_mode;
339                 info->pscan_period_mode = data->pscan_period_mode;
340                 info->pscan_mode        = data->pscan_mode;
341                 memcpy(info->dev_class, data->dev_class, 3);
342                 info->clock_offset      = data->clock_offset;
343
344                 info++;
345                 copied++;
346         }
347
348         BT_DBG("cache %p, copied %d", cache, copied);
349         return copied;
350 }
351
352 static int hci_inq_req(struct hci_request *req, unsigned long opt)
353 {
354         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355         struct hci_dev *hdev = req->hdev;
356         struct hci_cp_inquiry cp;
357
358         BT_DBG("%s", hdev->name);
359
360         if (test_bit(HCI_INQUIRY, &hdev->flags))
361                 return 0;
362
363         /* Start Inquiry */
364         memcpy(&cp.lap, &ir->lap, 3);
365         cp.length  = ir->length;
366         cp.num_rsp = ir->num_rsp;
367         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
368
369         return 0;
370 }
371
372 int hci_inquiry(void __user *arg)
373 {
374         __u8 __user *ptr = arg;
375         struct hci_inquiry_req ir;
376         struct hci_dev *hdev;
377         int err = 0, do_inquiry = 0, max_rsp;
378         long timeo;
379         __u8 *buf;
380
381         if (copy_from_user(&ir, ptr, sizeof(ir)))
382                 return -EFAULT;
383
384         hdev = hci_dev_get(ir.dev_id);
385         if (!hdev)
386                 return -ENODEV;
387
388         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
389                 err = -EBUSY;
390                 goto done;
391         }
392
393         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
394                 err = -EOPNOTSUPP;
395                 goto done;
396         }
397
398         if (hdev->dev_type != HCI_PRIMARY) {
399                 err = -EOPNOTSUPP;
400                 goto done;
401         }
402
403         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
404                 err = -EOPNOTSUPP;
405                 goto done;
406         }
407
408         /* Restrict maximum inquiry length to 60 seconds */
409         if (ir.length > 60) {
410                 err = -EINVAL;
411                 goto done;
412         }
413
414         hci_dev_lock(hdev);
415         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
416             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
417                 hci_inquiry_cache_flush(hdev);
418                 do_inquiry = 1;
419         }
420         hci_dev_unlock(hdev);
421
422         timeo = ir.length * msecs_to_jiffies(2000);
423
424         if (do_inquiry) {
425                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
426                                    timeo, NULL);
427                 if (err < 0)
428                         goto done;
429
430                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
431                  * cleared). If it is interrupted by a signal, return -EINTR.
432                  */
433                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
434                                 TASK_INTERRUPTIBLE)) {
435                         err = -EINTR;
436                         goto done;
437                 }
438         }
439
440         /* for unlimited number of responses we will use buffer with
441          * 255 entries
442          */
443         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
444
445         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
446          * copy it to the user space.
447          */
448         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
449         if (!buf) {
450                 err = -ENOMEM;
451                 goto done;
452         }
453
454         hci_dev_lock(hdev);
455         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
456         hci_dev_unlock(hdev);
457
458         BT_DBG("num_rsp %d", ir.num_rsp);
459
460         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
461                 ptr += sizeof(ir);
462                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
463                                  ir.num_rsp))
464                         err = -EFAULT;
465         } else
466                 err = -EFAULT;
467
468         kfree(buf);
469
470 done:
471         hci_dev_put(hdev);
472         return err;
473 }
474
475 static int hci_dev_do_open(struct hci_dev *hdev)
476 {
477         int ret = 0;
478
479         BT_DBG("%s %p", hdev->name, hdev);
480
481         hci_req_sync_lock(hdev);
482
483         ret = hci_dev_open_sync(hdev);
484
485         hci_req_sync_unlock(hdev);
486         return ret;
487 }
488
489 /* ---- HCI ioctl helpers ---- */
490
491 int hci_dev_open(__u16 dev)
492 {
493         struct hci_dev *hdev;
494         int err;
495
496         hdev = hci_dev_get(dev);
497         if (!hdev)
498                 return -ENODEV;
499
500         /* Devices that are marked as unconfigured can only be powered
501          * up as user channel. Trying to bring them up as normal devices
502          * will result into a failure. Only user channel operation is
503          * possible.
504          *
505          * When this function is called for a user channel, the flag
506          * HCI_USER_CHANNEL will be set first before attempting to
507          * open the device.
508          */
509         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
510             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
511                 err = -EOPNOTSUPP;
512                 goto done;
513         }
514
515         /* We need to ensure that no other power on/off work is pending
516          * before proceeding to call hci_dev_do_open. This is
517          * particularly important if the setup procedure has not yet
518          * completed.
519          */
520         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
521                 cancel_delayed_work(&hdev->power_off);
522
523         /* After this call it is guaranteed that the setup procedure
524          * has finished. This means that error conditions like RFKILL
525          * or no valid public or static random address apply.
526          */
527         flush_workqueue(hdev->req_workqueue);
528
529         /* For controllers not using the management interface and that
530          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
531          * so that pairing works for them. Once the management interface
532          * is in use this bit will be cleared again and userspace has
533          * to explicitly enable it.
534          */
535         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
536             !hci_dev_test_flag(hdev, HCI_MGMT))
537                 hci_dev_set_flag(hdev, HCI_BONDABLE);
538
539         err = hci_dev_do_open(hdev);
540
541 done:
542         hci_dev_put(hdev);
543         return err;
544 }
545
546 int hci_dev_do_close(struct hci_dev *hdev)
547 {
548         int err;
549
550         BT_DBG("%s %p", hdev->name, hdev);
551
552         hci_req_sync_lock(hdev);
553
554         err = hci_dev_close_sync(hdev);
555
556         hci_req_sync_unlock(hdev);
557
558         return err;
559 }
560
561 int hci_dev_close(__u16 dev)
562 {
563         struct hci_dev *hdev;
564         int err;
565
566         hdev = hci_dev_get(dev);
567         if (!hdev)
568                 return -ENODEV;
569
570         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
571                 err = -EBUSY;
572                 goto done;
573         }
574
575         cancel_work_sync(&hdev->power_on);
576         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
577                 cancel_delayed_work(&hdev->power_off);
578
579         err = hci_dev_do_close(hdev);
580
581 done:
582         hci_dev_put(hdev);
583         return err;
584 }
585
586 static int hci_dev_do_reset(struct hci_dev *hdev)
587 {
588         int ret;
589
590         BT_DBG("%s %p", hdev->name, hdev);
591
592         hci_req_sync_lock(hdev);
593
594         /* Drop queues */
595         skb_queue_purge(&hdev->rx_q);
596         skb_queue_purge(&hdev->cmd_q);
597
598         /* Cancel these to avoid queueing non-chained pending work */
599         hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
600         /* Wait for
601          *
602          *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
603          *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
604          *
605          * inside RCU section to see the flag or complete scheduling.
606          */
607         synchronize_rcu();
608         /* Explicitly cancel works in case scheduled after setting the flag. */
609         cancel_delayed_work(&hdev->cmd_timer);
610         cancel_delayed_work(&hdev->ncmd_timer);
611
612         /* Avoid potential lockdep warnings from the *_flush() calls by
613          * ensuring the workqueue is empty up front.
614          */
615         drain_workqueue(hdev->workqueue);
616
617         hci_dev_lock(hdev);
618         hci_inquiry_cache_flush(hdev);
619         hci_conn_hash_flush(hdev);
620         hci_dev_unlock(hdev);
621
622         if (hdev->flush)
623                 hdev->flush(hdev);
624
625         hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
626
627         atomic_set(&hdev->cmd_cnt, 1);
628         hdev->acl_cnt = 0;
629         hdev->sco_cnt = 0;
630         hdev->le_cnt = 0;
631         hdev->iso_cnt = 0;
632
633         ret = hci_reset_sync(hdev);
634
635         hci_req_sync_unlock(hdev);
636         return ret;
637 }
638
639 int hci_dev_reset(__u16 dev)
640 {
641         struct hci_dev *hdev;
642         int err;
643
644         hdev = hci_dev_get(dev);
645         if (!hdev)
646                 return -ENODEV;
647
648         if (!test_bit(HCI_UP, &hdev->flags)) {
649                 err = -ENETDOWN;
650                 goto done;
651         }
652
653         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
654                 err = -EBUSY;
655                 goto done;
656         }
657
658         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
659                 err = -EOPNOTSUPP;
660                 goto done;
661         }
662
663         err = hci_dev_do_reset(hdev);
664
665 done:
666         hci_dev_put(hdev);
667         return err;
668 }
669
670 int hci_dev_reset_stat(__u16 dev)
671 {
672         struct hci_dev *hdev;
673         int ret = 0;
674
675         hdev = hci_dev_get(dev);
676         if (!hdev)
677                 return -ENODEV;
678
679         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
680                 ret = -EBUSY;
681                 goto done;
682         }
683
684         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
685                 ret = -EOPNOTSUPP;
686                 goto done;
687         }
688
689         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
690
691 done:
692         hci_dev_put(hdev);
693         return ret;
694 }
695
696 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
697 {
698         bool conn_changed, discov_changed;
699
700         BT_DBG("%s scan 0x%02x", hdev->name, scan);
701
702         if ((scan & SCAN_PAGE))
703                 conn_changed = !hci_dev_test_and_set_flag(hdev,
704                                                           HCI_CONNECTABLE);
705         else
706                 conn_changed = hci_dev_test_and_clear_flag(hdev,
707                                                            HCI_CONNECTABLE);
708
709         if ((scan & SCAN_INQUIRY)) {
710                 discov_changed = !hci_dev_test_and_set_flag(hdev,
711                                                             HCI_DISCOVERABLE);
712         } else {
713                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
714                 discov_changed = hci_dev_test_and_clear_flag(hdev,
715                                                              HCI_DISCOVERABLE);
716         }
717
718         if (!hci_dev_test_flag(hdev, HCI_MGMT))
719                 return;
720
721         if (conn_changed || discov_changed) {
722                 /* In case this was disabled through mgmt */
723                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
724
725                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
726                         hci_update_adv_data(hdev, hdev->cur_adv_instance);
727
728                 mgmt_new_settings(hdev);
729         }
730 }
731
732 int hci_dev_cmd(unsigned int cmd, void __user *arg)
733 {
734         struct hci_dev *hdev;
735         struct hci_dev_req dr;
736         int err = 0;
737
738         if (copy_from_user(&dr, arg, sizeof(dr)))
739                 return -EFAULT;
740
741         hdev = hci_dev_get(dr.dev_id);
742         if (!hdev)
743                 return -ENODEV;
744
745         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
746                 err = -EBUSY;
747                 goto done;
748         }
749
750         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
751                 err = -EOPNOTSUPP;
752                 goto done;
753         }
754
755         if (hdev->dev_type != HCI_PRIMARY) {
756                 err = -EOPNOTSUPP;
757                 goto done;
758         }
759
760         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
761                 err = -EOPNOTSUPP;
762                 goto done;
763         }
764
765         switch (cmd) {
766         case HCISETAUTH:
767                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
768                                    HCI_INIT_TIMEOUT, NULL);
769                 break;
770
771         case HCISETENCRYPT:
772                 if (!lmp_encrypt_capable(hdev)) {
773                         err = -EOPNOTSUPP;
774                         break;
775                 }
776
777                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
778                         /* Auth must be enabled first */
779                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
780                                            HCI_INIT_TIMEOUT, NULL);
781                         if (err)
782                                 break;
783                 }
784
785                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
786                                    HCI_INIT_TIMEOUT, NULL);
787                 break;
788
789         case HCISETSCAN:
790                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
791                                    HCI_INIT_TIMEOUT, NULL);
792
793                 /* Ensure that the connectable and discoverable states
794                  * get correctly modified as this was a non-mgmt change.
795                  */
796                 if (!err)
797                         hci_update_passive_scan_state(hdev, dr.dev_opt);
798                 break;
799
800         case HCISETLINKPOL:
801                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
802                                    HCI_INIT_TIMEOUT, NULL);
803                 break;
804
805         case HCISETLINKMODE:
806                 hdev->link_mode = ((__u16) dr.dev_opt) &
807                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
808                 break;
809
810         case HCISETPTYPE:
811                 if (hdev->pkt_type == (__u16) dr.dev_opt)
812                         break;
813
814                 hdev->pkt_type = (__u16) dr.dev_opt;
815                 mgmt_phy_configuration_changed(hdev, NULL);
816                 break;
817
818         case HCISETACLMTU:
819                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
820                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
821                 break;
822
823         case HCISETSCOMTU:
824                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
825                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
826                 break;
827
828         default:
829                 err = -EINVAL;
830                 break;
831         }
832
833 done:
834         hci_dev_put(hdev);
835         return err;
836 }
837
838 int hci_get_dev_list(void __user *arg)
839 {
840         struct hci_dev *hdev;
841         struct hci_dev_list_req *dl;
842         struct hci_dev_req *dr;
843         int n = 0, size, err;
844         __u16 dev_num;
845
846         if (get_user(dev_num, (__u16 __user *) arg))
847                 return -EFAULT;
848
849         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
850                 return -EINVAL;
851
852         size = sizeof(*dl) + dev_num * sizeof(*dr);
853
854         dl = kzalloc(size, GFP_KERNEL);
855         if (!dl)
856                 return -ENOMEM;
857
858         dr = dl->dev_req;
859
860         read_lock(&hci_dev_list_lock);
861         list_for_each_entry(hdev, &hci_dev_list, list) {
862                 unsigned long flags = hdev->flags;
863
864                 /* When the auto-off is configured it means the transport
865                  * is running, but in that case still indicate that the
866                  * device is actually down.
867                  */
868                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
869                         flags &= ~BIT(HCI_UP);
870
871                 (dr + n)->dev_id  = hdev->id;
872                 (dr + n)->dev_opt = flags;
873
874                 if (++n >= dev_num)
875                         break;
876         }
877         read_unlock(&hci_dev_list_lock);
878
879         dl->dev_num = n;
880         size = sizeof(*dl) + n * sizeof(*dr);
881
882         err = copy_to_user(arg, dl, size);
883         kfree(dl);
884
885         return err ? -EFAULT : 0;
886 }
887
888 int hci_get_dev_info(void __user *arg)
889 {
890         struct hci_dev *hdev;
891         struct hci_dev_info di;
892         unsigned long flags;
893         int err = 0;
894
895         if (copy_from_user(&di, arg, sizeof(di)))
896                 return -EFAULT;
897
898         hdev = hci_dev_get(di.dev_id);
899         if (!hdev)
900                 return -ENODEV;
901
902         /* When the auto-off is configured it means the transport
903          * is running, but in that case still indicate that the
904          * device is actually down.
905          */
906         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
907                 flags = hdev->flags & ~BIT(HCI_UP);
908         else
909                 flags = hdev->flags;
910
911         strscpy(di.name, hdev->name, sizeof(di.name));
912         di.bdaddr   = hdev->bdaddr;
913         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
914         di.flags    = flags;
915         di.pkt_type = hdev->pkt_type;
916         if (lmp_bredr_capable(hdev)) {
917                 di.acl_mtu  = hdev->acl_mtu;
918                 di.acl_pkts = hdev->acl_pkts;
919                 di.sco_mtu  = hdev->sco_mtu;
920                 di.sco_pkts = hdev->sco_pkts;
921         } else {
922                 di.acl_mtu  = hdev->le_mtu;
923                 di.acl_pkts = hdev->le_pkts;
924                 di.sco_mtu  = 0;
925                 di.sco_pkts = 0;
926         }
927         di.link_policy = hdev->link_policy;
928         di.link_mode   = hdev->link_mode;
929
930         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
931         memcpy(&di.features, &hdev->features, sizeof(di.features));
932
933         if (copy_to_user(arg, &di, sizeof(di)))
934                 err = -EFAULT;
935
936         hci_dev_put(hdev);
937
938         return err;
939 }
940
941 /* ---- Interface to HCI drivers ---- */
942
943 static int hci_dev_do_poweroff(struct hci_dev *hdev)
944 {
945         int err;
946
947         BT_DBG("%s %p", hdev->name, hdev);
948
949         hci_req_sync_lock(hdev);
950
951         err = hci_set_powered_sync(hdev, false);
952
953         hci_req_sync_unlock(hdev);
954
955         return err;
956 }
957
958 static int hci_rfkill_set_block(void *data, bool blocked)
959 {
960         struct hci_dev *hdev = data;
961         int err;
962
963         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
964
965         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
966                 return -EBUSY;
967
968         if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
969                 return 0;
970
971         if (blocked) {
972                 hci_dev_set_flag(hdev, HCI_RFKILLED);
973
974                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
975                     !hci_dev_test_flag(hdev, HCI_CONFIG)) {
976                         err = hci_dev_do_poweroff(hdev);
977                         if (err) {
978                                 bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
979                                            err);
980
981                                 /* Make sure the device is still closed even if
982                                  * anything during power off sequence (eg.
983                                  * disconnecting devices) failed.
984                                  */
985                                 hci_dev_do_close(hdev);
986                         }
987                 }
988         } else {
989                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
990         }
991
992         return 0;
993 }
994
995 static const struct rfkill_ops hci_rfkill_ops = {
996         .set_block = hci_rfkill_set_block,
997 };
998
999 static void hci_power_on(struct work_struct *work)
1000 {
1001         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1002         int err;
1003
1004         BT_DBG("%s", hdev->name);
1005
1006         if (test_bit(HCI_UP, &hdev->flags) &&
1007             hci_dev_test_flag(hdev, HCI_MGMT) &&
1008             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1009                 cancel_delayed_work(&hdev->power_off);
1010                 err = hci_powered_update_sync(hdev);
1011                 mgmt_power_on(hdev, err);
1012                 return;
1013         }
1014
1015         err = hci_dev_do_open(hdev);
1016         if (err < 0) {
1017                 hci_dev_lock(hdev);
1018                 mgmt_set_powered_failed(hdev, err);
1019                 hci_dev_unlock(hdev);
1020                 return;
1021         }
1022
1023         /* During the HCI setup phase, a few error conditions are
1024          * ignored and they need to be checked now. If they are still
1025          * valid, it is important to turn the device back off.
1026          */
1027         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
1028             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
1029             (hdev->dev_type == HCI_PRIMARY &&
1030              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1031              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1032                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1033                 hci_dev_do_close(hdev);
1034         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1035                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1036                                    HCI_AUTO_OFF_TIMEOUT);
1037         }
1038
1039         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1040                 /* For unconfigured devices, set the HCI_RAW flag
1041                  * so that userspace can easily identify them.
1042                  */
1043                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1044                         set_bit(HCI_RAW, &hdev->flags);
1045
1046                 /* For fully configured devices, this will send
1047                  * the Index Added event. For unconfigured devices,
1048                  * it will send Unconfigued Index Added event.
1049                  *
1050                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1051                  * and no event will be send.
1052                  */
1053                 mgmt_index_added(hdev);
1054         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1055                 /* When the controller is now configured, then it
1056                  * is important to clear the HCI_RAW flag.
1057                  */
1058                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1059                         clear_bit(HCI_RAW, &hdev->flags);
1060
1061                 /* Powering on the controller with HCI_CONFIG set only
1062                  * happens with the transition from unconfigured to
1063                  * configured. This will send the Index Added event.
1064                  */
1065                 mgmt_index_added(hdev);
1066         }
1067 }
1068
1069 static void hci_power_off(struct work_struct *work)
1070 {
1071         struct hci_dev *hdev = container_of(work, struct hci_dev,
1072                                             power_off.work);
1073
1074         BT_DBG("%s", hdev->name);
1075
1076         hci_dev_do_close(hdev);
1077 }
1078
1079 static void hci_error_reset(struct work_struct *work)
1080 {
1081         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1082
1083         hci_dev_hold(hdev);
1084         BT_DBG("%s", hdev->name);
1085
1086         if (hdev->hw_error)
1087                 hdev->hw_error(hdev, hdev->hw_error_code);
1088         else
1089                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1090
1091         if (!hci_dev_do_close(hdev))
1092                 hci_dev_do_open(hdev);
1093
1094         hci_dev_put(hdev);
1095 }
1096
1097 void hci_uuids_clear(struct hci_dev *hdev)
1098 {
1099         struct bt_uuid *uuid, *tmp;
1100
1101         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1102                 list_del(&uuid->list);
1103                 kfree(uuid);
1104         }
1105 }
1106
1107 void hci_link_keys_clear(struct hci_dev *hdev)
1108 {
1109         struct link_key *key, *tmp;
1110
1111         list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1112                 list_del_rcu(&key->list);
1113                 kfree_rcu(key, rcu);
1114         }
1115 }
1116
1117 void hci_smp_ltks_clear(struct hci_dev *hdev)
1118 {
1119         struct smp_ltk *k, *tmp;
1120
1121         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1122                 list_del_rcu(&k->list);
1123                 kfree_rcu(k, rcu);
1124         }
1125 }
1126
1127 void hci_smp_irks_clear(struct hci_dev *hdev)
1128 {
1129         struct smp_irk *k, *tmp;
1130
1131         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1132                 list_del_rcu(&k->list);
1133                 kfree_rcu(k, rcu);
1134         }
1135 }
1136
1137 void hci_blocked_keys_clear(struct hci_dev *hdev)
1138 {
1139         struct blocked_key *b, *tmp;
1140
1141         list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1142                 list_del_rcu(&b->list);
1143                 kfree_rcu(b, rcu);
1144         }
1145 }
1146
1147 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1148 {
1149         bool blocked = false;
1150         struct blocked_key *b;
1151
1152         rcu_read_lock();
1153         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1154                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1155                         blocked = true;
1156                         break;
1157                 }
1158         }
1159
1160         rcu_read_unlock();
1161         return blocked;
1162 }
1163
1164 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1165 {
1166         struct link_key *k;
1167
1168         rcu_read_lock();
1169         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1170                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1171                         rcu_read_unlock();
1172
1173                         if (hci_is_blocked_key(hdev,
1174                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
1175                                                k->val)) {
1176                                 bt_dev_warn_ratelimited(hdev,
1177                                                         "Link key blocked for %pMR",
1178                                                         &k->bdaddr);
1179                                 return NULL;
1180                         }
1181
1182                         return k;
1183                 }
1184         }
1185         rcu_read_unlock();
1186
1187         return NULL;
1188 }
1189
1190 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1191                                u8 key_type, u8 old_key_type)
1192 {
1193         /* Legacy key */
1194         if (key_type < 0x03)
1195                 return true;
1196
1197         /* Debug keys are insecure so don't store them persistently */
1198         if (key_type == HCI_LK_DEBUG_COMBINATION)
1199                 return false;
1200
1201         /* Changed combination key and there's no previous one */
1202         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1203                 return false;
1204
1205         /* Security mode 3 case */
1206         if (!conn)
1207                 return true;
1208
1209         /* BR/EDR key derived using SC from an LE link */
1210         if (conn->type == LE_LINK)
1211                 return true;
1212
1213         /* Neither local nor remote side had no-bonding as requirement */
1214         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1215                 return true;
1216
1217         /* Local side had dedicated bonding as requirement */
1218         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1219                 return true;
1220
1221         /* Remote side had dedicated bonding as requirement */
1222         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1223                 return true;
1224
1225         /* If none of the above criteria match, then don't store the key
1226          * persistently */
1227         return false;
1228 }
1229
1230 static u8 ltk_role(u8 type)
1231 {
1232         if (type == SMP_LTK)
1233                 return HCI_ROLE_MASTER;
1234
1235         return HCI_ROLE_SLAVE;
1236 }
1237
1238 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1239                              u8 addr_type, u8 role)
1240 {
1241         struct smp_ltk *k;
1242
1243         rcu_read_lock();
1244         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1245                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1246                         continue;
1247
1248                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1249                         rcu_read_unlock();
1250
1251                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1252                                                k->val)) {
1253                                 bt_dev_warn_ratelimited(hdev,
1254                                                         "LTK blocked for %pMR",
1255                                                         &k->bdaddr);
1256                                 return NULL;
1257                         }
1258
1259                         return k;
1260                 }
1261         }
1262         rcu_read_unlock();
1263
1264         return NULL;
1265 }
1266
1267 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1268 {
1269         struct smp_irk *irk_to_return = NULL;
1270         struct smp_irk *irk;
1271
1272         rcu_read_lock();
1273         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1274                 if (!bacmp(&irk->rpa, rpa)) {
1275                         irk_to_return = irk;
1276                         goto done;
1277                 }
1278         }
1279
1280         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1281                 if (smp_irk_matches(hdev, irk->val, rpa)) {
1282                         bacpy(&irk->rpa, rpa);
1283                         irk_to_return = irk;
1284                         goto done;
1285                 }
1286         }
1287
1288 done:
1289         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1290                                                 irk_to_return->val)) {
1291                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1292                                         &irk_to_return->bdaddr);
1293                 irk_to_return = NULL;
1294         }
1295
1296         rcu_read_unlock();
1297
1298         return irk_to_return;
1299 }
1300
1301 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1302                                      u8 addr_type)
1303 {
1304         struct smp_irk *irk_to_return = NULL;
1305         struct smp_irk *irk;
1306
1307         /* Identity Address must be public or static random */
1308         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1309                 return NULL;
1310
1311         rcu_read_lock();
1312         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1313                 if (addr_type == irk->addr_type &&
1314                     bacmp(bdaddr, &irk->bdaddr) == 0) {
1315                         irk_to_return = irk;
1316                         goto done;
1317                 }
1318         }
1319
1320 done:
1321
1322         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1323                                                 irk_to_return->val)) {
1324                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1325                                         &irk_to_return->bdaddr);
1326                 irk_to_return = NULL;
1327         }
1328
1329         rcu_read_unlock();
1330
1331         return irk_to_return;
1332 }
1333
1334 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1335                                   bdaddr_t *bdaddr, u8 *val, u8 type,
1336                                   u8 pin_len, bool *persistent)
1337 {
1338         struct link_key *key, *old_key;
1339         u8 old_key_type;
1340
1341         old_key = hci_find_link_key(hdev, bdaddr);
1342         if (old_key) {
1343                 old_key_type = old_key->type;
1344                 key = old_key;
1345         } else {
1346                 old_key_type = conn ? conn->key_type : 0xff;
1347                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1348                 if (!key)
1349                         return NULL;
1350                 list_add_rcu(&key->list, &hdev->link_keys);
1351         }
1352
1353         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1354
1355         /* Some buggy controller combinations generate a changed
1356          * combination key for legacy pairing even when there's no
1357          * previous key */
1358         if (type == HCI_LK_CHANGED_COMBINATION &&
1359             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1360                 type = HCI_LK_COMBINATION;
1361                 if (conn)
1362                         conn->key_type = type;
1363         }
1364
1365         bacpy(&key->bdaddr, bdaddr);
1366         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1367         key->pin_len = pin_len;
1368
1369         if (type == HCI_LK_CHANGED_COMBINATION)
1370                 key->type = old_key_type;
1371         else
1372                 key->type = type;
1373
1374         if (persistent)
1375                 *persistent = hci_persistent_key(hdev, conn, type,
1376                                                  old_key_type);
1377
1378         return key;
1379 }
1380
1381 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1382                             u8 addr_type, u8 type, u8 authenticated,
1383                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1384 {
1385         struct smp_ltk *key, *old_key;
1386         u8 role = ltk_role(type);
1387
1388         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1389         if (old_key)
1390                 key = old_key;
1391         else {
1392                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1393                 if (!key)
1394                         return NULL;
1395                 list_add_rcu(&key->list, &hdev->long_term_keys);
1396         }
1397
1398         bacpy(&key->bdaddr, bdaddr);
1399         key->bdaddr_type = addr_type;
1400         memcpy(key->val, tk, sizeof(key->val));
1401         key->authenticated = authenticated;
1402         key->ediv = ediv;
1403         key->rand = rand;
1404         key->enc_size = enc_size;
1405         key->type = type;
1406
1407         return key;
1408 }
1409
1410 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1411                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
1412 {
1413         struct smp_irk *irk;
1414
1415         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1416         if (!irk) {
1417                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1418                 if (!irk)
1419                         return NULL;
1420
1421                 bacpy(&irk->bdaddr, bdaddr);
1422                 irk->addr_type = addr_type;
1423
1424                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1425         }
1426
1427         memcpy(irk->val, val, 16);
1428         bacpy(&irk->rpa, rpa);
1429
1430         return irk;
1431 }
1432
1433 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1434 {
1435         struct link_key *key;
1436
1437         key = hci_find_link_key(hdev, bdaddr);
1438         if (!key)
1439                 return -ENOENT;
1440
1441         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1442
1443         list_del_rcu(&key->list);
1444         kfree_rcu(key, rcu);
1445
1446         return 0;
1447 }
1448
1449 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1450 {
1451         struct smp_ltk *k, *tmp;
1452         int removed = 0;
1453
1454         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1455                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1456                         continue;
1457
1458                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1459
1460                 list_del_rcu(&k->list);
1461                 kfree_rcu(k, rcu);
1462                 removed++;
1463         }
1464
1465         return removed ? 0 : -ENOENT;
1466 }
1467
1468 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1469 {
1470         struct smp_irk *k, *tmp;
1471
1472         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1473                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1474                         continue;
1475
1476                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1477
1478                 list_del_rcu(&k->list);
1479                 kfree_rcu(k, rcu);
1480         }
1481 }
1482
1483 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1484 {
1485         struct smp_ltk *k;
1486         struct smp_irk *irk;
1487         u8 addr_type;
1488
1489         if (type == BDADDR_BREDR) {
1490                 if (hci_find_link_key(hdev, bdaddr))
1491                         return true;
1492                 return false;
1493         }
1494
1495         /* Convert to HCI addr type which struct smp_ltk uses */
1496         if (type == BDADDR_LE_PUBLIC)
1497                 addr_type = ADDR_LE_DEV_PUBLIC;
1498         else
1499                 addr_type = ADDR_LE_DEV_RANDOM;
1500
1501         irk = hci_get_irk(hdev, bdaddr, addr_type);
1502         if (irk) {
1503                 bdaddr = &irk->bdaddr;
1504                 addr_type = irk->addr_type;
1505         }
1506
1507         rcu_read_lock();
1508         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1509                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1510                         rcu_read_unlock();
1511                         return true;
1512                 }
1513         }
1514         rcu_read_unlock();
1515
1516         return false;
1517 }
1518
1519 /* HCI command timer function */
1520 static void hci_cmd_timeout(struct work_struct *work)
1521 {
1522         struct hci_dev *hdev = container_of(work, struct hci_dev,
1523                                             cmd_timer.work);
1524
1525         if (hdev->req_skb) {
1526                 u16 opcode = hci_skb_opcode(hdev->req_skb);
1527
1528                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1529
1530                 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1531         } else {
1532                 bt_dev_err(hdev, "command tx timeout");
1533         }
1534
1535         if (hdev->cmd_timeout)
1536                 hdev->cmd_timeout(hdev);
1537
1538         atomic_set(&hdev->cmd_cnt, 1);
1539         queue_work(hdev->workqueue, &hdev->cmd_work);
1540 }
1541
1542 /* HCI ncmd timer function */
1543 static void hci_ncmd_timeout(struct work_struct *work)
1544 {
1545         struct hci_dev *hdev = container_of(work, struct hci_dev,
1546                                             ncmd_timer.work);
1547
1548         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1549
1550         /* During HCI_INIT phase no events can be injected if the ncmd timer
1551          * triggers since the procedure has its own timeout handling.
1552          */
1553         if (test_bit(HCI_INIT, &hdev->flags))
1554                 return;
1555
1556         /* This is an irrecoverable state, inject hardware error event */
1557         hci_reset_dev(hdev);
1558 }
1559
1560 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1561                                           bdaddr_t *bdaddr, u8 bdaddr_type)
1562 {
1563         struct oob_data *data;
1564
1565         list_for_each_entry(data, &hdev->remote_oob_data, list) {
1566                 if (bacmp(bdaddr, &data->bdaddr) != 0)
1567                         continue;
1568                 if (data->bdaddr_type != bdaddr_type)
1569                         continue;
1570                 return data;
1571         }
1572
1573         return NULL;
1574 }
1575
1576 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1577                                u8 bdaddr_type)
1578 {
1579         struct oob_data *data;
1580
1581         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1582         if (!data)
1583                 return -ENOENT;
1584
1585         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1586
1587         list_del(&data->list);
1588         kfree(data);
1589
1590         return 0;
1591 }
1592
1593 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1594 {
1595         struct oob_data *data, *n;
1596
1597         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1598                 list_del(&data->list);
1599                 kfree(data);
1600         }
1601 }
1602
1603 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1604                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
1605                             u8 *hash256, u8 *rand256)
1606 {
1607         struct oob_data *data;
1608
1609         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1610         if (!data) {
1611                 data = kmalloc(sizeof(*data), GFP_KERNEL);
1612                 if (!data)
1613                         return -ENOMEM;
1614
1615                 bacpy(&data->bdaddr, bdaddr);
1616                 data->bdaddr_type = bdaddr_type;
1617                 list_add(&data->list, &hdev->remote_oob_data);
1618         }
1619
1620         if (hash192 && rand192) {
1621                 memcpy(data->hash192, hash192, sizeof(data->hash192));
1622                 memcpy(data->rand192, rand192, sizeof(data->rand192));
1623                 if (hash256 && rand256)
1624                         data->present = 0x03;
1625         } else {
1626                 memset(data->hash192, 0, sizeof(data->hash192));
1627                 memset(data->rand192, 0, sizeof(data->rand192));
1628                 if (hash256 && rand256)
1629                         data->present = 0x02;
1630                 else
1631                         data->present = 0x00;
1632         }
1633
1634         if (hash256 && rand256) {
1635                 memcpy(data->hash256, hash256, sizeof(data->hash256));
1636                 memcpy(data->rand256, rand256, sizeof(data->rand256));
1637         } else {
1638                 memset(data->hash256, 0, sizeof(data->hash256));
1639                 memset(data->rand256, 0, sizeof(data->rand256));
1640                 if (hash192 && rand192)
1641                         data->present = 0x01;
1642         }
1643
1644         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1645
1646         return 0;
1647 }
1648
1649 /* This function requires the caller holds hdev->lock */
1650 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1651 {
1652         struct adv_info *adv_instance;
1653
1654         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1655                 if (adv_instance->instance == instance)
1656                         return adv_instance;
1657         }
1658
1659         return NULL;
1660 }
1661
1662 /* This function requires the caller holds hdev->lock */
1663 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1664 {
1665         struct adv_info *cur_instance;
1666
1667         cur_instance = hci_find_adv_instance(hdev, instance);
1668         if (!cur_instance)
1669                 return NULL;
1670
1671         if (cur_instance == list_last_entry(&hdev->adv_instances,
1672                                             struct adv_info, list))
1673                 return list_first_entry(&hdev->adv_instances,
1674                                                  struct adv_info, list);
1675         else
1676                 return list_next_entry(cur_instance, list);
1677 }
1678
1679 /* This function requires the caller holds hdev->lock */
1680 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1681 {
1682         struct adv_info *adv_instance;
1683
1684         adv_instance = hci_find_adv_instance(hdev, instance);
1685         if (!adv_instance)
1686                 return -ENOENT;
1687
1688         BT_DBG("%s removing %dMR", hdev->name, instance);
1689
1690         if (hdev->cur_adv_instance == instance) {
1691                 if (hdev->adv_instance_timeout) {
1692                         cancel_delayed_work(&hdev->adv_instance_expire);
1693                         hdev->adv_instance_timeout = 0;
1694                 }
1695                 hdev->cur_adv_instance = 0x00;
1696         }
1697
1698         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1699
1700         list_del(&adv_instance->list);
1701         kfree(adv_instance);
1702
1703         hdev->adv_instance_cnt--;
1704
1705         return 0;
1706 }
1707
1708 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1709 {
1710         struct adv_info *adv_instance, *n;
1711
1712         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1713                 adv_instance->rpa_expired = rpa_expired;
1714 }
1715
1716 /* This function requires the caller holds hdev->lock */
1717 void hci_adv_instances_clear(struct hci_dev *hdev)
1718 {
1719         struct adv_info *adv_instance, *n;
1720
1721         if (hdev->adv_instance_timeout) {
1722                 cancel_delayed_work(&hdev->adv_instance_expire);
1723                 hdev->adv_instance_timeout = 0;
1724         }
1725
1726         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1727                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1728                 list_del(&adv_instance->list);
1729                 kfree(adv_instance);
1730         }
1731
1732         hdev->adv_instance_cnt = 0;
1733         hdev->cur_adv_instance = 0x00;
1734 }
1735
1736 static void adv_instance_rpa_expired(struct work_struct *work)
1737 {
1738         struct adv_info *adv_instance = container_of(work, struct adv_info,
1739                                                      rpa_expired_cb.work);
1740
1741         BT_DBG("");
1742
1743         adv_instance->rpa_expired = true;
1744 }
1745
1746 /* This function requires the caller holds hdev->lock */
1747 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1748                                       u32 flags, u16 adv_data_len, u8 *adv_data,
1749                                       u16 scan_rsp_len, u8 *scan_rsp_data,
1750                                       u16 timeout, u16 duration, s8 tx_power,
1751                                       u32 min_interval, u32 max_interval,
1752                                       u8 mesh_handle)
1753 {
1754         struct adv_info *adv;
1755
1756         adv = hci_find_adv_instance(hdev, instance);
1757         if (adv) {
1758                 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1759                 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1760                 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1761         } else {
1762                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1763                     instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1764                         return ERR_PTR(-EOVERFLOW);
1765
1766                 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1767                 if (!adv)
1768                         return ERR_PTR(-ENOMEM);
1769
1770                 adv->pending = true;
1771                 adv->instance = instance;
1772                 list_add(&adv->list, &hdev->adv_instances);
1773                 hdev->adv_instance_cnt++;
1774         }
1775
1776         adv->flags = flags;
1777         adv->min_interval = min_interval;
1778         adv->max_interval = max_interval;
1779         adv->tx_power = tx_power;
1780         /* Defining a mesh_handle changes the timing units to ms,
1781          * rather than seconds, and ties the instance to the requested
1782          * mesh_tx queue.
1783          */
1784         adv->mesh = mesh_handle;
1785
1786         hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1787                                   scan_rsp_len, scan_rsp_data);
1788
1789         adv->timeout = timeout;
1790         adv->remaining_time = timeout;
1791
1792         if (duration == 0)
1793                 adv->duration = hdev->def_multi_adv_rotation_duration;
1794         else
1795                 adv->duration = duration;
1796
1797         INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1798
1799         BT_DBG("%s for %dMR", hdev->name, instance);
1800
1801         return adv;
1802 }
1803
1804 /* This function requires the caller holds hdev->lock */
1805 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1806                                       u32 flags, u8 data_len, u8 *data,
1807                                       u32 min_interval, u32 max_interval)
1808 {
1809         struct adv_info *adv;
1810
1811         adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1812                                    0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1813                                    min_interval, max_interval, 0);
1814         if (IS_ERR(adv))
1815                 return adv;
1816
1817         adv->periodic = true;
1818         adv->per_adv_data_len = data_len;
1819
1820         if (data)
1821                 memcpy(adv->per_adv_data, data, data_len);
1822
1823         return adv;
1824 }
1825
1826 /* This function requires the caller holds hdev->lock */
1827 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1828                               u16 adv_data_len, u8 *adv_data,
1829                               u16 scan_rsp_len, u8 *scan_rsp_data)
1830 {
1831         struct adv_info *adv;
1832
1833         adv = hci_find_adv_instance(hdev, instance);
1834
1835         /* If advertisement doesn't exist, we can't modify its data */
1836         if (!adv)
1837                 return -ENOENT;
1838
1839         if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1840                 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1841                 memcpy(adv->adv_data, adv_data, adv_data_len);
1842                 adv->adv_data_len = adv_data_len;
1843                 adv->adv_data_changed = true;
1844         }
1845
1846         if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1847                 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1848                 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1849                 adv->scan_rsp_len = scan_rsp_len;
1850                 adv->scan_rsp_changed = true;
1851         }
1852
1853         /* Mark as changed if there are flags which would affect it */
1854         if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1855             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1856                 adv->scan_rsp_changed = true;
1857
1858         return 0;
1859 }
1860
1861 /* This function requires the caller holds hdev->lock */
1862 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1863 {
1864         u32 flags;
1865         struct adv_info *adv;
1866
1867         if (instance == 0x00) {
1868                 /* Instance 0 always manages the "Tx Power" and "Flags"
1869                  * fields
1870                  */
1871                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1872
1873                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1874                  * corresponds to the "connectable" instance flag.
1875                  */
1876                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1877                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1878
1879                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1880                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1881                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1882                         flags |= MGMT_ADV_FLAG_DISCOV;
1883
1884                 return flags;
1885         }
1886
1887         adv = hci_find_adv_instance(hdev, instance);
1888
1889         /* Return 0 when we got an invalid instance identifier. */
1890         if (!adv)
1891                 return 0;
1892
1893         return adv->flags;
1894 }
1895
1896 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1897 {
1898         struct adv_info *adv;
1899
1900         /* Instance 0x00 always set local name */
1901         if (instance == 0x00)
1902                 return true;
1903
1904         adv = hci_find_adv_instance(hdev, instance);
1905         if (!adv)
1906                 return false;
1907
1908         if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1909             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1910                 return true;
1911
1912         return adv->scan_rsp_len ? true : false;
1913 }
1914
1915 /* This function requires the caller holds hdev->lock */
1916 void hci_adv_monitors_clear(struct hci_dev *hdev)
1917 {
1918         struct adv_monitor *monitor;
1919         int handle;
1920
1921         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1922                 hci_free_adv_monitor(hdev, monitor);
1923
1924         idr_destroy(&hdev->adv_monitors_idr);
1925 }
1926
1927 /* Frees the monitor structure and do some bookkeepings.
1928  * This function requires the caller holds hdev->lock.
1929  */
1930 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1931 {
1932         struct adv_pattern *pattern;
1933         struct adv_pattern *tmp;
1934
1935         if (!monitor)
1936                 return;
1937
1938         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1939                 list_del(&pattern->list);
1940                 kfree(pattern);
1941         }
1942
1943         if (monitor->handle)
1944                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1945
1946         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1947                 hdev->adv_monitors_cnt--;
1948                 mgmt_adv_monitor_removed(hdev, monitor->handle);
1949         }
1950
1951         kfree(monitor);
1952 }
1953
1954 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1955  * also attempts to forward the request to the controller.
1956  * This function requires the caller holds hci_req_sync_lock.
1957  */
1958 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1959 {
1960         int min, max, handle;
1961         int status = 0;
1962
1963         if (!monitor)
1964                 return -EINVAL;
1965
1966         hci_dev_lock(hdev);
1967
1968         min = HCI_MIN_ADV_MONITOR_HANDLE;
1969         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1970         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1971                            GFP_KERNEL);
1972
1973         hci_dev_unlock(hdev);
1974
1975         if (handle < 0)
1976                 return handle;
1977
1978         monitor->handle = handle;
1979
1980         if (!hdev_is_powered(hdev))
1981                 return status;
1982
1983         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1984         case HCI_ADV_MONITOR_EXT_NONE:
1985                 bt_dev_dbg(hdev, "add monitor %d status %d",
1986                            monitor->handle, status);
1987                 /* Message was not forwarded to controller - not an error */
1988                 break;
1989
1990         case HCI_ADV_MONITOR_EXT_MSFT:
1991                 status = msft_add_monitor_pattern(hdev, monitor);
1992                 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1993                            handle, status);
1994                 break;
1995         }
1996
1997         return status;
1998 }
1999
2000 /* Attempts to tell the controller and free the monitor. If somehow the
2001  * controller doesn't have a corresponding handle, remove anyway.
2002  * This function requires the caller holds hci_req_sync_lock.
2003  */
2004 static int hci_remove_adv_monitor(struct hci_dev *hdev,
2005                                   struct adv_monitor *monitor)
2006 {
2007         int status = 0;
2008         int handle;
2009
2010         switch (hci_get_adv_monitor_offload_ext(hdev)) {
2011         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
2012                 bt_dev_dbg(hdev, "remove monitor %d status %d",
2013                            monitor->handle, status);
2014                 goto free_monitor;
2015
2016         case HCI_ADV_MONITOR_EXT_MSFT:
2017                 handle = monitor->handle;
2018                 status = msft_remove_monitor(hdev, monitor);
2019                 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
2020                            handle, status);
2021                 break;
2022         }
2023
2024         /* In case no matching handle registered, just free the monitor */
2025         if (status == -ENOENT)
2026                 goto free_monitor;
2027
2028         return status;
2029
2030 free_monitor:
2031         if (status == -ENOENT)
2032                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2033                             monitor->handle);
2034         hci_free_adv_monitor(hdev, monitor);
2035
2036         return status;
2037 }
2038
2039 /* This function requires the caller holds hci_req_sync_lock */
2040 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2041 {
2042         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2043
2044         if (!monitor)
2045                 return -EINVAL;
2046
2047         return hci_remove_adv_monitor(hdev, monitor);
2048 }
2049
2050 /* This function requires the caller holds hci_req_sync_lock */
2051 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2052 {
2053         struct adv_monitor *monitor;
2054         int idr_next_id = 0;
2055         int status = 0;
2056
2057         while (1) {
2058                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2059                 if (!monitor)
2060                         break;
2061
2062                 status = hci_remove_adv_monitor(hdev, monitor);
2063                 if (status)
2064                         return status;
2065
2066                 idr_next_id++;
2067         }
2068
2069         return status;
2070 }
2071
2072 /* This function requires the caller holds hdev->lock */
2073 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2074 {
2075         return !idr_is_empty(&hdev->adv_monitors_idr);
2076 }
2077
2078 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2079 {
2080         if (msft_monitor_supported(hdev))
2081                 return HCI_ADV_MONITOR_EXT_MSFT;
2082
2083         return HCI_ADV_MONITOR_EXT_NONE;
2084 }
2085
2086 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2087                                          bdaddr_t *bdaddr, u8 type)
2088 {
2089         struct bdaddr_list *b;
2090
2091         list_for_each_entry(b, bdaddr_list, list) {
2092                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2093                         return b;
2094         }
2095
2096         return NULL;
2097 }
2098
2099 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2100                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2101                                 u8 type)
2102 {
2103         struct bdaddr_list_with_irk *b;
2104
2105         list_for_each_entry(b, bdaddr_list, list) {
2106                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2107                         return b;
2108         }
2109
2110         return NULL;
2111 }
2112
2113 struct bdaddr_list_with_flags *
2114 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2115                                   bdaddr_t *bdaddr, u8 type)
2116 {
2117         struct bdaddr_list_with_flags *b;
2118
2119         list_for_each_entry(b, bdaddr_list, list) {
2120                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2121                         return b;
2122         }
2123
2124         return NULL;
2125 }
2126
2127 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2128 {
2129         struct bdaddr_list *b, *n;
2130
2131         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2132                 list_del(&b->list);
2133                 kfree(b);
2134         }
2135 }
2136
2137 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2138 {
2139         struct bdaddr_list *entry;
2140
2141         if (!bacmp(bdaddr, BDADDR_ANY))
2142                 return -EBADF;
2143
2144         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2145                 return -EEXIST;
2146
2147         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2148         if (!entry)
2149                 return -ENOMEM;
2150
2151         bacpy(&entry->bdaddr, bdaddr);
2152         entry->bdaddr_type = type;
2153
2154         list_add(&entry->list, list);
2155
2156         return 0;
2157 }
2158
2159 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2160                                         u8 type, u8 *peer_irk, u8 *local_irk)
2161 {
2162         struct bdaddr_list_with_irk *entry;
2163
2164         if (!bacmp(bdaddr, BDADDR_ANY))
2165                 return -EBADF;
2166
2167         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2168                 return -EEXIST;
2169
2170         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2171         if (!entry)
2172                 return -ENOMEM;
2173
2174         bacpy(&entry->bdaddr, bdaddr);
2175         entry->bdaddr_type = type;
2176
2177         if (peer_irk)
2178                 memcpy(entry->peer_irk, peer_irk, 16);
2179
2180         if (local_irk)
2181                 memcpy(entry->local_irk, local_irk, 16);
2182
2183         list_add(&entry->list, list);
2184
2185         return 0;
2186 }
2187
2188 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2189                                    u8 type, u32 flags)
2190 {
2191         struct bdaddr_list_with_flags *entry;
2192
2193         if (!bacmp(bdaddr, BDADDR_ANY))
2194                 return -EBADF;
2195
2196         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2197                 return -EEXIST;
2198
2199         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2200         if (!entry)
2201                 return -ENOMEM;
2202
2203         bacpy(&entry->bdaddr, bdaddr);
2204         entry->bdaddr_type = type;
2205         entry->flags = flags;
2206
2207         list_add(&entry->list, list);
2208
2209         return 0;
2210 }
2211
2212 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2213 {
2214         struct bdaddr_list *entry;
2215
2216         if (!bacmp(bdaddr, BDADDR_ANY)) {
2217                 hci_bdaddr_list_clear(list);
2218                 return 0;
2219         }
2220
2221         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2222         if (!entry)
2223                 return -ENOENT;
2224
2225         list_del(&entry->list);
2226         kfree(entry);
2227
2228         return 0;
2229 }
2230
2231 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2232                                                         u8 type)
2233 {
2234         struct bdaddr_list_with_irk *entry;
2235
2236         if (!bacmp(bdaddr, BDADDR_ANY)) {
2237                 hci_bdaddr_list_clear(list);
2238                 return 0;
2239         }
2240
2241         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2242         if (!entry)
2243                 return -ENOENT;
2244
2245         list_del(&entry->list);
2246         kfree(entry);
2247
2248         return 0;
2249 }
2250
2251 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2252                                    u8 type)
2253 {
2254         struct bdaddr_list_with_flags *entry;
2255
2256         if (!bacmp(bdaddr, BDADDR_ANY)) {
2257                 hci_bdaddr_list_clear(list);
2258                 return 0;
2259         }
2260
2261         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2262         if (!entry)
2263                 return -ENOENT;
2264
2265         list_del(&entry->list);
2266         kfree(entry);
2267
2268         return 0;
2269 }
2270
2271 /* This function requires the caller holds hdev->lock */
2272 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2273                                                bdaddr_t *addr, u8 addr_type)
2274 {
2275         struct hci_conn_params *params;
2276
2277         list_for_each_entry(params, &hdev->le_conn_params, list) {
2278                 if (bacmp(&params->addr, addr) == 0 &&
2279                     params->addr_type == addr_type) {
2280                         return params;
2281                 }
2282         }
2283
2284         return NULL;
2285 }
2286
2287 /* This function requires the caller holds hdev->lock or rcu_read_lock */
2288 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2289                                                   bdaddr_t *addr, u8 addr_type)
2290 {
2291         struct hci_conn_params *param;
2292
2293         rcu_read_lock();
2294
2295         list_for_each_entry_rcu(param, list, action) {
2296                 if (bacmp(&param->addr, addr) == 0 &&
2297                     param->addr_type == addr_type) {
2298                         rcu_read_unlock();
2299                         return param;
2300                 }
2301         }
2302
2303         rcu_read_unlock();
2304
2305         return NULL;
2306 }
2307
2308 /* This function requires the caller holds hdev->lock */
2309 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2310 {
2311         if (list_empty(&param->action))
2312                 return;
2313
2314         list_del_rcu(&param->action);
2315         synchronize_rcu();
2316         INIT_LIST_HEAD(&param->action);
2317 }
2318
2319 /* This function requires the caller holds hdev->lock */
2320 void hci_pend_le_list_add(struct hci_conn_params *param,
2321                           struct list_head *list)
2322 {
2323         list_add_rcu(&param->action, list);
2324 }
2325
2326 /* This function requires the caller holds hdev->lock */
2327 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2328                                             bdaddr_t *addr, u8 addr_type)
2329 {
2330         struct hci_conn_params *params;
2331
2332         params = hci_conn_params_lookup(hdev, addr, addr_type);
2333         if (params)
2334                 return params;
2335
2336         params = kzalloc(sizeof(*params), GFP_KERNEL);
2337         if (!params) {
2338                 bt_dev_err(hdev, "out of memory");
2339                 return NULL;
2340         }
2341
2342         bacpy(&params->addr, addr);
2343         params->addr_type = addr_type;
2344
2345         list_add(&params->list, &hdev->le_conn_params);
2346         INIT_LIST_HEAD(&params->action);
2347
2348         params->conn_min_interval = hdev->le_conn_min_interval;
2349         params->conn_max_interval = hdev->le_conn_max_interval;
2350         params->conn_latency = hdev->le_conn_latency;
2351         params->supervision_timeout = hdev->le_supv_timeout;
2352         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2353
2354         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2355
2356         return params;
2357 }
2358
2359 void hci_conn_params_free(struct hci_conn_params *params)
2360 {
2361         hci_pend_le_list_del_init(params);
2362
2363         if (params->conn) {
2364                 hci_conn_drop(params->conn);
2365                 hci_conn_put(params->conn);
2366         }
2367
2368         list_del(&params->list);
2369         kfree(params);
2370 }
2371
2372 /* This function requires the caller holds hdev->lock */
2373 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2374 {
2375         struct hci_conn_params *params;
2376
2377         params = hci_conn_params_lookup(hdev, addr, addr_type);
2378         if (!params)
2379                 return;
2380
2381         hci_conn_params_free(params);
2382
2383         hci_update_passive_scan(hdev);
2384
2385         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2386 }
2387
2388 /* This function requires the caller holds hdev->lock */
2389 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2390 {
2391         struct hci_conn_params *params, *tmp;
2392
2393         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2394                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2395                         continue;
2396
2397                 /* If trying to establish one time connection to disabled
2398                  * device, leave the params, but mark them as just once.
2399                  */
2400                 if (params->explicit_connect) {
2401                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2402                         continue;
2403                 }
2404
2405                 hci_conn_params_free(params);
2406         }
2407
2408         BT_DBG("All LE disabled connection parameters were removed");
2409 }
2410
2411 /* This function requires the caller holds hdev->lock */
2412 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2413 {
2414         struct hci_conn_params *params, *tmp;
2415
2416         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2417                 hci_conn_params_free(params);
2418
2419         BT_DBG("All LE connection parameters were removed");
2420 }
2421
2422 /* Copy the Identity Address of the controller.
2423  *
2424  * If the controller has a public BD_ADDR, then by default use that one.
2425  * If this is a LE only controller without a public address, default to
2426  * the static random address.
2427  *
2428  * For debugging purposes it is possible to force controllers with a
2429  * public address to use the static random address instead.
2430  *
2431  * In case BR/EDR has been disabled on a dual-mode controller and
2432  * userspace has configured a static address, then that address
2433  * becomes the identity address instead of the public BR/EDR address.
2434  */
2435 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2436                                u8 *bdaddr_type)
2437 {
2438         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2439             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2440             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2441              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2442                 bacpy(bdaddr, &hdev->static_addr);
2443                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2444         } else {
2445                 bacpy(bdaddr, &hdev->bdaddr);
2446                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2447         }
2448 }
2449
2450 static void hci_clear_wake_reason(struct hci_dev *hdev)
2451 {
2452         hci_dev_lock(hdev);
2453
2454         hdev->wake_reason = 0;
2455         bacpy(&hdev->wake_addr, BDADDR_ANY);
2456         hdev->wake_addr_type = 0;
2457
2458         hci_dev_unlock(hdev);
2459 }
2460
2461 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2462                                 void *data)
2463 {
2464         struct hci_dev *hdev =
2465                 container_of(nb, struct hci_dev, suspend_notifier);
2466         int ret = 0;
2467
2468         /* Userspace has full control of this device. Do nothing. */
2469         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2470                 return NOTIFY_DONE;
2471
2472         /* To avoid a potential race with hci_unregister_dev. */
2473         hci_dev_hold(hdev);
2474
2475         if (action == PM_SUSPEND_PREPARE)
2476                 ret = hci_suspend_dev(hdev);
2477         else if (action == PM_POST_SUSPEND)
2478                 ret = hci_resume_dev(hdev);
2479
2480         if (ret)
2481                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2482                            action, ret);
2483
2484         hci_dev_put(hdev);
2485         return NOTIFY_DONE;
2486 }
2487
2488 /* Alloc HCI device */
2489 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2490 {
2491         struct hci_dev *hdev;
2492         unsigned int alloc_size;
2493
2494         alloc_size = sizeof(*hdev);
2495         if (sizeof_priv) {
2496                 /* Fixme: May need ALIGN-ment? */
2497                 alloc_size += sizeof_priv;
2498         }
2499
2500         hdev = kzalloc(alloc_size, GFP_KERNEL);
2501         if (!hdev)
2502                 return NULL;
2503
2504         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2505         hdev->esco_type = (ESCO_HV1);
2506         hdev->link_mode = (HCI_LM_ACCEPT);
2507         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2508         hdev->io_capability = 0x03;     /* No Input No Output */
2509         hdev->manufacturer = 0xffff;    /* Default to internal use */
2510         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2511         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2512         hdev->adv_instance_cnt = 0;
2513         hdev->cur_adv_instance = 0x00;
2514         hdev->adv_instance_timeout = 0;
2515
2516         hdev->advmon_allowlist_duration = 300;
2517         hdev->advmon_no_filter_duration = 500;
2518         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
2519
2520         hdev->sniff_max_interval = 800;
2521         hdev->sniff_min_interval = 80;
2522
2523         hdev->le_adv_channel_map = 0x07;
2524         hdev->le_adv_min_interval = 0x0800;
2525         hdev->le_adv_max_interval = 0x0800;
2526         hdev->le_scan_interval = 0x0060;
2527         hdev->le_scan_window = 0x0030;
2528         hdev->le_scan_int_suspend = 0x0400;
2529         hdev->le_scan_window_suspend = 0x0012;
2530         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2531         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2532         hdev->le_scan_int_adv_monitor = 0x0060;
2533         hdev->le_scan_window_adv_monitor = 0x0030;
2534         hdev->le_scan_int_connect = 0x0060;
2535         hdev->le_scan_window_connect = 0x0060;
2536         hdev->le_conn_min_interval = 0x0018;
2537         hdev->le_conn_max_interval = 0x0028;
2538         hdev->le_conn_latency = 0x0000;
2539         hdev->le_supv_timeout = 0x002a;
2540         hdev->le_def_tx_len = 0x001b;
2541         hdev->le_def_tx_time = 0x0148;
2542         hdev->le_max_tx_len = 0x001b;
2543         hdev->le_max_tx_time = 0x0148;
2544         hdev->le_max_rx_len = 0x001b;
2545         hdev->le_max_rx_time = 0x0148;
2546         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2547         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2548         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2549         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2550         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2551         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2552         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2553         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2554         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2555
2556         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2557         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2558         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2559         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2560         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2561         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2562
2563         /* default 1.28 sec page scan */
2564         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2565         hdev->def_page_scan_int = 0x0800;
2566         hdev->def_page_scan_window = 0x0012;
2567
2568         mutex_init(&hdev->lock);
2569         mutex_init(&hdev->req_lock);
2570
2571         ida_init(&hdev->unset_handle_ida);
2572
2573         INIT_LIST_HEAD(&hdev->mesh_pending);
2574         INIT_LIST_HEAD(&hdev->mgmt_pending);
2575         INIT_LIST_HEAD(&hdev->reject_list);
2576         INIT_LIST_HEAD(&hdev->accept_list);
2577         INIT_LIST_HEAD(&hdev->uuids);
2578         INIT_LIST_HEAD(&hdev->link_keys);
2579         INIT_LIST_HEAD(&hdev->long_term_keys);
2580         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2581         INIT_LIST_HEAD(&hdev->remote_oob_data);
2582         INIT_LIST_HEAD(&hdev->le_accept_list);
2583         INIT_LIST_HEAD(&hdev->le_resolv_list);
2584         INIT_LIST_HEAD(&hdev->le_conn_params);
2585         INIT_LIST_HEAD(&hdev->pend_le_conns);
2586         INIT_LIST_HEAD(&hdev->pend_le_reports);
2587         INIT_LIST_HEAD(&hdev->conn_hash.list);
2588         INIT_LIST_HEAD(&hdev->adv_instances);
2589         INIT_LIST_HEAD(&hdev->blocked_keys);
2590         INIT_LIST_HEAD(&hdev->monitored_devices);
2591
2592         INIT_LIST_HEAD(&hdev->local_codecs);
2593         INIT_WORK(&hdev->rx_work, hci_rx_work);
2594         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2595         INIT_WORK(&hdev->tx_work, hci_tx_work);
2596         INIT_WORK(&hdev->power_on, hci_power_on);
2597         INIT_WORK(&hdev->error_reset, hci_error_reset);
2598
2599         hci_cmd_sync_init(hdev);
2600
2601         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2602
2603         skb_queue_head_init(&hdev->rx_q);
2604         skb_queue_head_init(&hdev->cmd_q);
2605         skb_queue_head_init(&hdev->raw_q);
2606
2607         init_waitqueue_head(&hdev->req_wait_q);
2608
2609         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2610         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2611
2612         hci_devcd_setup(hdev);
2613         hci_request_setup(hdev);
2614
2615         hci_init_sysfs(hdev);
2616         discovery_init(hdev);
2617
2618         return hdev;
2619 }
2620 EXPORT_SYMBOL(hci_alloc_dev_priv);
2621
2622 /* Free HCI device */
2623 void hci_free_dev(struct hci_dev *hdev)
2624 {
2625         /* will free via device release */
2626         put_device(&hdev->dev);
2627 }
2628 EXPORT_SYMBOL(hci_free_dev);
2629
2630 /* Register HCI device */
2631 int hci_register_dev(struct hci_dev *hdev)
2632 {
2633         int id, error;
2634
2635         if (!hdev->open || !hdev->close || !hdev->send)
2636                 return -EINVAL;
2637
2638         /* Do not allow HCI_AMP devices to register at index 0,
2639          * so the index can be used as the AMP controller ID.
2640          */
2641         switch (hdev->dev_type) {
2642         case HCI_PRIMARY:
2643                 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2644                 break;
2645         case HCI_AMP:
2646                 id = ida_alloc_range(&hci_index_ida, 1, HCI_MAX_ID - 1,
2647                                      GFP_KERNEL);
2648                 break;
2649         default:
2650                 return -EINVAL;
2651         }
2652
2653         if (id < 0)
2654                 return id;
2655
2656         error = dev_set_name(&hdev->dev, "hci%u", id);
2657         if (error)
2658                 return error;
2659
2660         hdev->name = dev_name(&hdev->dev);
2661         hdev->id = id;
2662
2663         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2664
2665         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2666         if (!hdev->workqueue) {
2667                 error = -ENOMEM;
2668                 goto err;
2669         }
2670
2671         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2672                                                       hdev->name);
2673         if (!hdev->req_workqueue) {
2674                 destroy_workqueue(hdev->workqueue);
2675                 error = -ENOMEM;
2676                 goto err;
2677         }
2678
2679         if (!IS_ERR_OR_NULL(bt_debugfs))
2680                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2681
2682         error = device_add(&hdev->dev);
2683         if (error < 0)
2684                 goto err_wqueue;
2685
2686         hci_leds_init(hdev);
2687
2688         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2689                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2690                                     hdev);
2691         if (hdev->rfkill) {
2692                 if (rfkill_register(hdev->rfkill) < 0) {
2693                         rfkill_destroy(hdev->rfkill);
2694                         hdev->rfkill = NULL;
2695                 }
2696         }
2697
2698         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2699                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2700
2701         hci_dev_set_flag(hdev, HCI_SETUP);
2702         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2703
2704         if (hdev->dev_type == HCI_PRIMARY) {
2705                 /* Assume BR/EDR support until proven otherwise (such as
2706                  * through reading supported features during init.
2707                  */
2708                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2709         }
2710
2711         write_lock(&hci_dev_list_lock);
2712         list_add(&hdev->list, &hci_dev_list);
2713         write_unlock(&hci_dev_list_lock);
2714
2715         /* Devices that are marked for raw-only usage are unconfigured
2716          * and should not be included in normal operation.
2717          */
2718         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2719                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2720
2721         /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2722          * callback.
2723          */
2724         if (hdev->wakeup)
2725                 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2726
2727         hci_sock_dev_event(hdev, HCI_DEV_REG);
2728         hci_dev_hold(hdev);
2729
2730         error = hci_register_suspend_notifier(hdev);
2731         if (error)
2732                 BT_WARN("register suspend notifier failed error:%d\n", error);
2733
2734         queue_work(hdev->req_workqueue, &hdev->power_on);
2735
2736         idr_init(&hdev->adv_monitors_idr);
2737         msft_register(hdev);
2738
2739         return id;
2740
2741 err_wqueue:
2742         debugfs_remove_recursive(hdev->debugfs);
2743         destroy_workqueue(hdev->workqueue);
2744         destroy_workqueue(hdev->req_workqueue);
2745 err:
2746         ida_free(&hci_index_ida, hdev->id);
2747
2748         return error;
2749 }
2750 EXPORT_SYMBOL(hci_register_dev);
2751
2752 /* Unregister HCI device */
2753 void hci_unregister_dev(struct hci_dev *hdev)
2754 {
2755         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2756
2757         mutex_lock(&hdev->unregister_lock);
2758         hci_dev_set_flag(hdev, HCI_UNREGISTER);
2759         mutex_unlock(&hdev->unregister_lock);
2760
2761         write_lock(&hci_dev_list_lock);
2762         list_del(&hdev->list);
2763         write_unlock(&hci_dev_list_lock);
2764
2765         cancel_work_sync(&hdev->power_on);
2766
2767         hci_cmd_sync_clear(hdev);
2768
2769         hci_unregister_suspend_notifier(hdev);
2770
2771         hci_dev_do_close(hdev);
2772
2773         if (!test_bit(HCI_INIT, &hdev->flags) &&
2774             !hci_dev_test_flag(hdev, HCI_SETUP) &&
2775             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2776                 hci_dev_lock(hdev);
2777                 mgmt_index_removed(hdev);
2778                 hci_dev_unlock(hdev);
2779         }
2780
2781         /* mgmt_index_removed should take care of emptying the
2782          * pending list */
2783         BUG_ON(!list_empty(&hdev->mgmt_pending));
2784
2785         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2786
2787         if (hdev->rfkill) {
2788                 rfkill_unregister(hdev->rfkill);
2789                 rfkill_destroy(hdev->rfkill);
2790         }
2791
2792         device_del(&hdev->dev);
2793         /* Actual cleanup is deferred until hci_release_dev(). */
2794         hci_dev_put(hdev);
2795 }
2796 EXPORT_SYMBOL(hci_unregister_dev);
2797
2798 /* Release HCI device */
2799 void hci_release_dev(struct hci_dev *hdev)
2800 {
2801         debugfs_remove_recursive(hdev->debugfs);
2802         kfree_const(hdev->hw_info);
2803         kfree_const(hdev->fw_info);
2804
2805         destroy_workqueue(hdev->workqueue);
2806         destroy_workqueue(hdev->req_workqueue);
2807
2808         hci_dev_lock(hdev);
2809         hci_bdaddr_list_clear(&hdev->reject_list);
2810         hci_bdaddr_list_clear(&hdev->accept_list);
2811         hci_uuids_clear(hdev);
2812         hci_link_keys_clear(hdev);
2813         hci_smp_ltks_clear(hdev);
2814         hci_smp_irks_clear(hdev);
2815         hci_remote_oob_data_clear(hdev);
2816         hci_adv_instances_clear(hdev);
2817         hci_adv_monitors_clear(hdev);
2818         hci_bdaddr_list_clear(&hdev->le_accept_list);
2819         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2820         hci_conn_params_clear_all(hdev);
2821         hci_discovery_filter_clear(hdev);
2822         hci_blocked_keys_clear(hdev);
2823         hci_codec_list_clear(&hdev->local_codecs);
2824         msft_release(hdev);
2825         hci_dev_unlock(hdev);
2826
2827         ida_destroy(&hdev->unset_handle_ida);
2828         ida_free(&hci_index_ida, hdev->id);
2829         kfree_skb(hdev->sent_cmd);
2830         kfree_skb(hdev->req_skb);
2831         kfree_skb(hdev->recv_event);
2832         kfree(hdev);
2833 }
2834 EXPORT_SYMBOL(hci_release_dev);
2835
2836 int hci_register_suspend_notifier(struct hci_dev *hdev)
2837 {
2838         int ret = 0;
2839
2840         if (!hdev->suspend_notifier.notifier_call &&
2841             !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2842                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2843                 ret = register_pm_notifier(&hdev->suspend_notifier);
2844         }
2845
2846         return ret;
2847 }
2848
2849 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2850 {
2851         int ret = 0;
2852
2853         if (hdev->suspend_notifier.notifier_call) {
2854                 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2855                 if (!ret)
2856                         hdev->suspend_notifier.notifier_call = NULL;
2857         }
2858
2859         return ret;
2860 }
2861
2862 /* Cancel ongoing command synchronously:
2863  *
2864  * - Cancel command timer
2865  * - Reset command counter
2866  * - Cancel command request
2867  */
2868 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2869 {
2870         bt_dev_dbg(hdev, "err 0x%2.2x", err);
2871
2872         cancel_delayed_work_sync(&hdev->cmd_timer);
2873         cancel_delayed_work_sync(&hdev->ncmd_timer);
2874         atomic_set(&hdev->cmd_cnt, 1);
2875
2876         hci_cmd_sync_cancel_sync(hdev, err);
2877 }
2878
2879 /* Suspend HCI device */
2880 int hci_suspend_dev(struct hci_dev *hdev)
2881 {
2882         int ret;
2883
2884         bt_dev_dbg(hdev, "");
2885
2886         /* Suspend should only act on when powered. */
2887         if (!hdev_is_powered(hdev) ||
2888             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2889                 return 0;
2890
2891         /* If powering down don't attempt to suspend */
2892         if (mgmt_powering_down(hdev))
2893                 return 0;
2894
2895         /* Cancel potentially blocking sync operation before suspend */
2896         hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2897
2898         hci_req_sync_lock(hdev);
2899         ret = hci_suspend_sync(hdev);
2900         hci_req_sync_unlock(hdev);
2901
2902         hci_clear_wake_reason(hdev);
2903         mgmt_suspending(hdev, hdev->suspend_state);
2904
2905         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2906         return ret;
2907 }
2908 EXPORT_SYMBOL(hci_suspend_dev);
2909
2910 /* Resume HCI device */
2911 int hci_resume_dev(struct hci_dev *hdev)
2912 {
2913         int ret;
2914
2915         bt_dev_dbg(hdev, "");
2916
2917         /* Resume should only act on when powered. */
2918         if (!hdev_is_powered(hdev) ||
2919             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2920                 return 0;
2921
2922         /* If powering down don't attempt to resume */
2923         if (mgmt_powering_down(hdev))
2924                 return 0;
2925
2926         hci_req_sync_lock(hdev);
2927         ret = hci_resume_sync(hdev);
2928         hci_req_sync_unlock(hdev);
2929
2930         mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2931                       hdev->wake_addr_type);
2932
2933         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2934         return ret;
2935 }
2936 EXPORT_SYMBOL(hci_resume_dev);
2937
2938 /* Reset HCI device */
2939 int hci_reset_dev(struct hci_dev *hdev)
2940 {
2941         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2942         struct sk_buff *skb;
2943
2944         skb = bt_skb_alloc(3, GFP_ATOMIC);
2945         if (!skb)
2946                 return -ENOMEM;
2947
2948         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2949         skb_put_data(skb, hw_err, 3);
2950
2951         bt_dev_err(hdev, "Injecting HCI hardware error event");
2952
2953         /* Send Hardware Error to upper stack */
2954         return hci_recv_frame(hdev, skb);
2955 }
2956 EXPORT_SYMBOL(hci_reset_dev);
2957
2958 /* Receive frame from HCI drivers */
2959 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2960 {
2961         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2962                       && !test_bit(HCI_INIT, &hdev->flags))) {
2963                 kfree_skb(skb);
2964                 return -ENXIO;
2965         }
2966
2967         switch (hci_skb_pkt_type(skb)) {
2968         case HCI_EVENT_PKT:
2969                 break;
2970         case HCI_ACLDATA_PKT:
2971                 /* Detect if ISO packet has been sent as ACL */
2972                 if (hci_conn_num(hdev, ISO_LINK)) {
2973                         __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2974                         __u8 type;
2975
2976                         type = hci_conn_lookup_type(hdev, hci_handle(handle));
2977                         if (type == ISO_LINK)
2978                                 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2979                 }
2980                 break;
2981         case HCI_SCODATA_PKT:
2982                 break;
2983         case HCI_ISODATA_PKT:
2984                 break;
2985         default:
2986                 kfree_skb(skb);
2987                 return -EINVAL;
2988         }
2989
2990         /* Incoming skb */
2991         bt_cb(skb)->incoming = 1;
2992
2993         /* Time stamp */
2994         __net_timestamp(skb);
2995
2996         skb_queue_tail(&hdev->rx_q, skb);
2997         queue_work(hdev->workqueue, &hdev->rx_work);
2998
2999         return 0;
3000 }
3001 EXPORT_SYMBOL(hci_recv_frame);
3002
3003 /* Receive diagnostic message from HCI drivers */
3004 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3005 {
3006         /* Mark as diagnostic packet */
3007         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3008
3009         /* Time stamp */
3010         __net_timestamp(skb);
3011
3012         skb_queue_tail(&hdev->rx_q, skb);
3013         queue_work(hdev->workqueue, &hdev->rx_work);
3014
3015         return 0;
3016 }
3017 EXPORT_SYMBOL(hci_recv_diag);
3018
3019 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3020 {
3021         va_list vargs;
3022
3023         va_start(vargs, fmt);
3024         kfree_const(hdev->hw_info);
3025         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3026         va_end(vargs);
3027 }
3028 EXPORT_SYMBOL(hci_set_hw_info);
3029
3030 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3031 {
3032         va_list vargs;
3033
3034         va_start(vargs, fmt);
3035         kfree_const(hdev->fw_info);
3036         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3037         va_end(vargs);
3038 }
3039 EXPORT_SYMBOL(hci_set_fw_info);
3040
3041 /* ---- Interface to upper protocols ---- */
3042
3043 int hci_register_cb(struct hci_cb *cb)
3044 {
3045         BT_DBG("%p name %s", cb, cb->name);
3046
3047         mutex_lock(&hci_cb_list_lock);
3048         list_add_tail(&cb->list, &hci_cb_list);
3049         mutex_unlock(&hci_cb_list_lock);
3050
3051         return 0;
3052 }
3053 EXPORT_SYMBOL(hci_register_cb);
3054
3055 int hci_unregister_cb(struct hci_cb *cb)
3056 {
3057         BT_DBG("%p name %s", cb, cb->name);
3058
3059         mutex_lock(&hci_cb_list_lock);
3060         list_del(&cb->list);
3061         mutex_unlock(&hci_cb_list_lock);
3062
3063         return 0;
3064 }
3065 EXPORT_SYMBOL(hci_unregister_cb);
3066
3067 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3068 {
3069         int err;
3070
3071         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3072                skb->len);
3073
3074         /* Time stamp */
3075         __net_timestamp(skb);
3076
3077         /* Send copy to monitor */
3078         hci_send_to_monitor(hdev, skb);
3079
3080         if (atomic_read(&hdev->promisc)) {
3081                 /* Send copy to the sockets */
3082                 hci_send_to_sock(hdev, skb);
3083         }
3084
3085         /* Get rid of skb owner, prior to sending to the driver. */
3086         skb_orphan(skb);
3087
3088         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3089                 kfree_skb(skb);
3090                 return -EINVAL;
3091         }
3092
3093         err = hdev->send(hdev, skb);
3094         if (err < 0) {
3095                 bt_dev_err(hdev, "sending frame failed (%d)", err);
3096                 kfree_skb(skb);
3097                 return err;
3098         }
3099
3100         return 0;
3101 }
3102
3103 /* Send HCI command */
3104 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3105                  const void *param)
3106 {
3107         struct sk_buff *skb;
3108
3109         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3110
3111         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3112         if (!skb) {
3113                 bt_dev_err(hdev, "no memory for command");
3114                 return -ENOMEM;
3115         }
3116
3117         /* Stand-alone HCI commands must be flagged as
3118          * single-command requests.
3119          */
3120         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3121
3122         skb_queue_tail(&hdev->cmd_q, skb);
3123         queue_work(hdev->workqueue, &hdev->cmd_work);
3124
3125         return 0;
3126 }
3127
3128 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3129                    const void *param)
3130 {
3131         struct sk_buff *skb;
3132
3133         if (hci_opcode_ogf(opcode) != 0x3f) {
3134                 /* A controller receiving a command shall respond with either
3135                  * a Command Status Event or a Command Complete Event.
3136                  * Therefore, all standard HCI commands must be sent via the
3137                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3138                  * Some vendors do not comply with this rule for vendor-specific
3139                  * commands and do not return any event. We want to support
3140                  * unresponded commands for such cases only.
3141                  */
3142                 bt_dev_err(hdev, "unresponded command not supported");
3143                 return -EINVAL;
3144         }
3145
3146         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3147         if (!skb) {
3148                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3149                            opcode);
3150                 return -ENOMEM;
3151         }
3152
3153         hci_send_frame(hdev, skb);
3154
3155         return 0;
3156 }
3157 EXPORT_SYMBOL(__hci_cmd_send);
3158
3159 /* Get data from the previously sent command */
3160 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3161 {
3162         struct hci_command_hdr *hdr;
3163
3164         if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3165                 return NULL;
3166
3167         hdr = (void *)skb->data;
3168
3169         if (hdr->opcode != cpu_to_le16(opcode))
3170                 return NULL;
3171
3172         return skb->data + HCI_COMMAND_HDR_SIZE;
3173 }
3174
3175 /* Get data from the previously sent command */
3176 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3177 {
3178         void *data;
3179
3180         /* Check if opcode matches last sent command */
3181         data = hci_cmd_data(hdev->sent_cmd, opcode);
3182         if (!data)
3183                 /* Check if opcode matches last request */
3184                 data = hci_cmd_data(hdev->req_skb, opcode);
3185
3186         return data;
3187 }
3188
3189 /* Get data from last received event */
3190 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3191 {
3192         struct hci_event_hdr *hdr;
3193         int offset;
3194
3195         if (!hdev->recv_event)
3196                 return NULL;
3197
3198         hdr = (void *)hdev->recv_event->data;
3199         offset = sizeof(*hdr);
3200
3201         if (hdr->evt != event) {
3202                 /* In case of LE metaevent check the subevent match */
3203                 if (hdr->evt == HCI_EV_LE_META) {
3204                         struct hci_ev_le_meta *ev;
3205
3206                         ev = (void *)hdev->recv_event->data + offset;
3207                         offset += sizeof(*ev);
3208                         if (ev->subevent == event)
3209                                 goto found;
3210                 }
3211                 return NULL;
3212         }
3213
3214 found:
3215         bt_dev_dbg(hdev, "event 0x%2.2x", event);
3216
3217         return hdev->recv_event->data + offset;
3218 }
3219
3220 /* Send ACL data */
3221 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3222 {
3223         struct hci_acl_hdr *hdr;
3224         int len = skb->len;
3225
3226         skb_push(skb, HCI_ACL_HDR_SIZE);
3227         skb_reset_transport_header(skb);
3228         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3229         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3230         hdr->dlen   = cpu_to_le16(len);
3231 }
3232
3233 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3234                           struct sk_buff *skb, __u16 flags)
3235 {
3236         struct hci_conn *conn = chan->conn;
3237         struct hci_dev *hdev = conn->hdev;
3238         struct sk_buff *list;
3239
3240         skb->len = skb_headlen(skb);
3241         skb->data_len = 0;
3242
3243         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3244
3245         switch (hdev->dev_type) {
3246         case HCI_PRIMARY:
3247                 hci_add_acl_hdr(skb, conn->handle, flags);
3248                 break;
3249         case HCI_AMP:
3250                 hci_add_acl_hdr(skb, chan->handle, flags);
3251                 break;
3252         default:
3253                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3254                 return;
3255         }
3256
3257         list = skb_shinfo(skb)->frag_list;
3258         if (!list) {
3259                 /* Non fragmented */
3260                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3261
3262                 skb_queue_tail(queue, skb);
3263         } else {
3264                 /* Fragmented */
3265                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3266
3267                 skb_shinfo(skb)->frag_list = NULL;
3268
3269                 /* Queue all fragments atomically. We need to use spin_lock_bh
3270                  * here because of 6LoWPAN links, as there this function is
3271                  * called from softirq and using normal spin lock could cause
3272                  * deadlocks.
3273                  */
3274                 spin_lock_bh(&queue->lock);
3275
3276                 __skb_queue_tail(queue, skb);
3277
3278                 flags &= ~ACL_START;
3279                 flags |= ACL_CONT;
3280                 do {
3281                         skb = list; list = list->next;
3282
3283                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3284                         hci_add_acl_hdr(skb, conn->handle, flags);
3285
3286                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3287
3288                         __skb_queue_tail(queue, skb);
3289                 } while (list);
3290
3291                 spin_unlock_bh(&queue->lock);
3292         }
3293 }
3294
3295 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3296 {
3297         struct hci_dev *hdev = chan->conn->hdev;
3298
3299         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3300
3301         hci_queue_acl(chan, &chan->data_q, skb, flags);
3302
3303         queue_work(hdev->workqueue, &hdev->tx_work);
3304 }
3305
3306 /* Send SCO data */
3307 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3308 {
3309         struct hci_dev *hdev = conn->hdev;
3310         struct hci_sco_hdr hdr;
3311
3312         BT_DBG("%s len %d", hdev->name, skb->len);
3313
3314         hdr.handle = cpu_to_le16(conn->handle);
3315         hdr.dlen   = skb->len;
3316
3317         skb_push(skb, HCI_SCO_HDR_SIZE);
3318         skb_reset_transport_header(skb);
3319         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3320
3321         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3322
3323         skb_queue_tail(&conn->data_q, skb);
3324         queue_work(hdev->workqueue, &hdev->tx_work);
3325 }
3326
3327 /* Send ISO data */
3328 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3329 {
3330         struct hci_iso_hdr *hdr;
3331         int len = skb->len;
3332
3333         skb_push(skb, HCI_ISO_HDR_SIZE);
3334         skb_reset_transport_header(skb);
3335         hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3336         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3337         hdr->dlen   = cpu_to_le16(len);
3338 }
3339
3340 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3341                           struct sk_buff *skb)
3342 {
3343         struct hci_dev *hdev = conn->hdev;
3344         struct sk_buff *list;
3345         __u16 flags;
3346
3347         skb->len = skb_headlen(skb);
3348         skb->data_len = 0;
3349
3350         hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3351
3352         list = skb_shinfo(skb)->frag_list;
3353
3354         flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3355         hci_add_iso_hdr(skb, conn->handle, flags);
3356
3357         if (!list) {
3358                 /* Non fragmented */
3359                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3360
3361                 skb_queue_tail(queue, skb);
3362         } else {
3363                 /* Fragmented */
3364                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3365
3366                 skb_shinfo(skb)->frag_list = NULL;
3367
3368                 __skb_queue_tail(queue, skb);
3369
3370                 do {
3371                         skb = list; list = list->next;
3372
3373                         hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3374                         flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3375                                                    0x00);
3376                         hci_add_iso_hdr(skb, conn->handle, flags);
3377
3378                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3379
3380                         __skb_queue_tail(queue, skb);
3381                 } while (list);
3382         }
3383 }
3384
3385 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3386 {
3387         struct hci_dev *hdev = conn->hdev;
3388
3389         BT_DBG("%s len %d", hdev->name, skb->len);
3390
3391         hci_queue_iso(conn, &conn->data_q, skb);
3392
3393         queue_work(hdev->workqueue, &hdev->tx_work);
3394 }
3395
3396 /* ---- HCI TX task (outgoing data) ---- */
3397
3398 /* HCI Connection scheduler */
3399 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3400 {
3401         struct hci_dev *hdev;
3402         int cnt, q;
3403
3404         if (!conn) {
3405                 *quote = 0;
3406                 return;
3407         }
3408
3409         hdev = conn->hdev;
3410
3411         switch (conn->type) {
3412         case ACL_LINK:
3413                 cnt = hdev->acl_cnt;
3414                 break;
3415         case AMP_LINK:
3416                 cnt = hdev->block_cnt;
3417                 break;
3418         case SCO_LINK:
3419         case ESCO_LINK:
3420                 cnt = hdev->sco_cnt;
3421                 break;
3422         case LE_LINK:
3423                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3424                 break;
3425         case ISO_LINK:
3426                 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3427                         hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3428                 break;
3429         default:
3430                 cnt = 0;
3431                 bt_dev_err(hdev, "unknown link type %d", conn->type);
3432         }
3433
3434         q = cnt / num;
3435         *quote = q ? q : 1;
3436 }
3437
3438 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3439                                      int *quote)
3440 {
3441         struct hci_conn_hash *h = &hdev->conn_hash;
3442         struct hci_conn *conn = NULL, *c;
3443         unsigned int num = 0, min = ~0;
3444
3445         /* We don't have to lock device here. Connections are always
3446          * added and removed with TX task disabled. */
3447
3448         rcu_read_lock();
3449
3450         list_for_each_entry_rcu(c, &h->list, list) {
3451                 if (c->type != type || skb_queue_empty(&c->data_q))
3452                         continue;
3453
3454                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3455                         continue;
3456
3457                 num++;
3458
3459                 if (c->sent < min) {
3460                         min  = c->sent;
3461                         conn = c;
3462                 }
3463
3464                 if (hci_conn_num(hdev, type) == num)
3465                         break;
3466         }
3467
3468         rcu_read_unlock();
3469
3470         hci_quote_sent(conn, num, quote);
3471
3472         BT_DBG("conn %p quote %d", conn, *quote);
3473         return conn;
3474 }
3475
3476 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3477 {
3478         struct hci_conn_hash *h = &hdev->conn_hash;
3479         struct hci_conn *c;
3480
3481         bt_dev_err(hdev, "link tx timeout");
3482
3483         rcu_read_lock();
3484
3485         /* Kill stalled connections */
3486         list_for_each_entry_rcu(c, &h->list, list) {
3487                 if (c->type == type && c->sent) {
3488                         bt_dev_err(hdev, "killing stalled connection %pMR",
3489                                    &c->dst);
3490                         /* hci_disconnect might sleep, so, we have to release
3491                          * the RCU read lock before calling it.
3492                          */
3493                         rcu_read_unlock();
3494                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3495                         rcu_read_lock();
3496                 }
3497         }
3498
3499         rcu_read_unlock();
3500 }
3501
3502 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3503                                       int *quote)
3504 {
3505         struct hci_conn_hash *h = &hdev->conn_hash;
3506         struct hci_chan *chan = NULL;
3507         unsigned int num = 0, min = ~0, cur_prio = 0;
3508         struct hci_conn *conn;
3509         int conn_num = 0;
3510
3511         BT_DBG("%s", hdev->name);
3512
3513         rcu_read_lock();
3514
3515         list_for_each_entry_rcu(conn, &h->list, list) {
3516                 struct hci_chan *tmp;
3517
3518                 if (conn->type != type)
3519                         continue;
3520
3521                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3522                         continue;
3523
3524                 conn_num++;
3525
3526                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3527                         struct sk_buff *skb;
3528
3529                         if (skb_queue_empty(&tmp->data_q))
3530                                 continue;
3531
3532                         skb = skb_peek(&tmp->data_q);
3533                         if (skb->priority < cur_prio)
3534                                 continue;
3535
3536                         if (skb->priority > cur_prio) {
3537                                 num = 0;
3538                                 min = ~0;
3539                                 cur_prio = skb->priority;
3540                         }
3541
3542                         num++;
3543
3544                         if (conn->sent < min) {
3545                                 min  = conn->sent;
3546                                 chan = tmp;
3547                         }
3548                 }
3549
3550                 if (hci_conn_num(hdev, type) == conn_num)
3551                         break;
3552         }
3553
3554         rcu_read_unlock();
3555
3556         if (!chan)
3557                 return NULL;
3558
3559         hci_quote_sent(chan->conn, num, quote);
3560
3561         BT_DBG("chan %p quote %d", chan, *quote);
3562         return chan;
3563 }
3564
3565 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3566 {
3567         struct hci_conn_hash *h = &hdev->conn_hash;
3568         struct hci_conn *conn;
3569         int num = 0;
3570
3571         BT_DBG("%s", hdev->name);
3572
3573         rcu_read_lock();
3574
3575         list_for_each_entry_rcu(conn, &h->list, list) {
3576                 struct hci_chan *chan;
3577
3578                 if (conn->type != type)
3579                         continue;
3580
3581                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3582                         continue;
3583
3584                 num++;
3585
3586                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3587                         struct sk_buff *skb;
3588
3589                         if (chan->sent) {
3590                                 chan->sent = 0;
3591                                 continue;
3592                         }
3593
3594                         if (skb_queue_empty(&chan->data_q))
3595                                 continue;
3596
3597                         skb = skb_peek(&chan->data_q);
3598                         if (skb->priority >= HCI_PRIO_MAX - 1)
3599                                 continue;
3600
3601                         skb->priority = HCI_PRIO_MAX - 1;
3602
3603                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3604                                skb->priority);
3605                 }
3606
3607                 if (hci_conn_num(hdev, type) == num)
3608                         break;
3609         }
3610
3611         rcu_read_unlock();
3612
3613 }
3614
3615 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3616 {
3617         /* Calculate count of blocks used by this packet */
3618         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3619 }
3620
3621 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3622 {
3623         unsigned long last_tx;
3624
3625         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3626                 return;
3627
3628         switch (type) {
3629         case LE_LINK:
3630                 last_tx = hdev->le_last_tx;
3631                 break;
3632         default:
3633                 last_tx = hdev->acl_last_tx;
3634                 break;
3635         }
3636
3637         /* tx timeout must be longer than maximum link supervision timeout
3638          * (40.9 seconds)
3639          */
3640         if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3641                 hci_link_tx_to(hdev, type);
3642 }
3643
3644 /* Schedule SCO */
3645 static void hci_sched_sco(struct hci_dev *hdev)
3646 {
3647         struct hci_conn *conn;
3648         struct sk_buff *skb;
3649         int quote;
3650
3651         BT_DBG("%s", hdev->name);
3652
3653         if (!hci_conn_num(hdev, SCO_LINK))
3654                 return;
3655
3656         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3657                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3658                         BT_DBG("skb %p len %d", skb, skb->len);
3659                         hci_send_frame(hdev, skb);
3660
3661                         conn->sent++;
3662                         if (conn->sent == ~0)
3663                                 conn->sent = 0;
3664                 }
3665         }
3666 }
3667
3668 static void hci_sched_esco(struct hci_dev *hdev)
3669 {
3670         struct hci_conn *conn;
3671         struct sk_buff *skb;
3672         int quote;
3673
3674         BT_DBG("%s", hdev->name);
3675
3676         if (!hci_conn_num(hdev, ESCO_LINK))
3677                 return;
3678
3679         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3680                                                      &quote))) {
3681                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3682                         BT_DBG("skb %p len %d", skb, skb->len);
3683                         hci_send_frame(hdev, skb);
3684
3685                         conn->sent++;
3686                         if (conn->sent == ~0)
3687                                 conn->sent = 0;
3688                 }
3689         }
3690 }
3691
3692 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3693 {
3694         unsigned int cnt = hdev->acl_cnt;
3695         struct hci_chan *chan;
3696         struct sk_buff *skb;
3697         int quote;
3698
3699         __check_timeout(hdev, cnt, ACL_LINK);
3700
3701         while (hdev->acl_cnt &&
3702                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3703                 u32 priority = (skb_peek(&chan->data_q))->priority;
3704                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3705                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3706                                skb->len, skb->priority);
3707
3708                         /* Stop if priority has changed */
3709                         if (skb->priority < priority)
3710                                 break;
3711
3712                         skb = skb_dequeue(&chan->data_q);
3713
3714                         hci_conn_enter_active_mode(chan->conn,
3715                                                    bt_cb(skb)->force_active);
3716
3717                         hci_send_frame(hdev, skb);
3718                         hdev->acl_last_tx = jiffies;
3719
3720                         hdev->acl_cnt--;
3721                         chan->sent++;
3722                         chan->conn->sent++;
3723
3724                         /* Send pending SCO packets right away */
3725                         hci_sched_sco(hdev);
3726                         hci_sched_esco(hdev);
3727                 }
3728         }
3729
3730         if (cnt != hdev->acl_cnt)
3731                 hci_prio_recalculate(hdev, ACL_LINK);
3732 }
3733
3734 static void hci_sched_acl_blk(struct hci_dev *hdev)
3735 {
3736         unsigned int cnt = hdev->block_cnt;
3737         struct hci_chan *chan;
3738         struct sk_buff *skb;
3739         int quote;
3740         u8 type;
3741
3742         BT_DBG("%s", hdev->name);
3743
3744         if (hdev->dev_type == HCI_AMP)
3745                 type = AMP_LINK;
3746         else
3747                 type = ACL_LINK;
3748
3749         __check_timeout(hdev, cnt, type);
3750
3751         while (hdev->block_cnt > 0 &&
3752                (chan = hci_chan_sent(hdev, type, &quote))) {
3753                 u32 priority = (skb_peek(&chan->data_q))->priority;
3754                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3755                         int blocks;
3756
3757                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3758                                skb->len, skb->priority);
3759
3760                         /* Stop if priority has changed */
3761                         if (skb->priority < priority)
3762                                 break;
3763
3764                         skb = skb_dequeue(&chan->data_q);
3765
3766                         blocks = __get_blocks(hdev, skb);
3767                         if (blocks > hdev->block_cnt)
3768                                 return;
3769
3770                         hci_conn_enter_active_mode(chan->conn,
3771                                                    bt_cb(skb)->force_active);
3772
3773                         hci_send_frame(hdev, skb);
3774                         hdev->acl_last_tx = jiffies;
3775
3776                         hdev->block_cnt -= blocks;
3777                         quote -= blocks;
3778
3779                         chan->sent += blocks;
3780                         chan->conn->sent += blocks;
3781                 }
3782         }
3783
3784         if (cnt != hdev->block_cnt)
3785                 hci_prio_recalculate(hdev, type);
3786 }
3787
3788 static void hci_sched_acl(struct hci_dev *hdev)
3789 {
3790         BT_DBG("%s", hdev->name);
3791
3792         /* No ACL link over BR/EDR controller */
3793         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3794                 return;
3795
3796         /* No AMP link over AMP controller */
3797         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3798                 return;
3799
3800         switch (hdev->flow_ctl_mode) {
3801         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3802                 hci_sched_acl_pkt(hdev);
3803                 break;
3804
3805         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3806                 hci_sched_acl_blk(hdev);
3807                 break;
3808         }
3809 }
3810
3811 static void hci_sched_le(struct hci_dev *hdev)
3812 {
3813         struct hci_chan *chan;
3814         struct sk_buff *skb;
3815         int quote, cnt, tmp;
3816
3817         BT_DBG("%s", hdev->name);
3818
3819         if (!hci_conn_num(hdev, LE_LINK))
3820                 return;
3821
3822         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3823
3824         __check_timeout(hdev, cnt, LE_LINK);
3825
3826         tmp = cnt;
3827         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3828                 u32 priority = (skb_peek(&chan->data_q))->priority;
3829                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3830                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3831                                skb->len, skb->priority);
3832
3833                         /* Stop if priority has changed */
3834                         if (skb->priority < priority)
3835                                 break;
3836
3837                         skb = skb_dequeue(&chan->data_q);
3838
3839                         hci_send_frame(hdev, skb);
3840                         hdev->le_last_tx = jiffies;
3841
3842                         cnt--;
3843                         chan->sent++;
3844                         chan->conn->sent++;
3845
3846                         /* Send pending SCO packets right away */
3847                         hci_sched_sco(hdev);
3848                         hci_sched_esco(hdev);
3849                 }
3850         }
3851
3852         if (hdev->le_pkts)
3853                 hdev->le_cnt = cnt;
3854         else
3855                 hdev->acl_cnt = cnt;
3856
3857         if (cnt != tmp)
3858                 hci_prio_recalculate(hdev, LE_LINK);
3859 }
3860
3861 /* Schedule CIS */
3862 static void hci_sched_iso(struct hci_dev *hdev)
3863 {
3864         struct hci_conn *conn;
3865         struct sk_buff *skb;
3866         int quote, *cnt;
3867
3868         BT_DBG("%s", hdev->name);
3869
3870         if (!hci_conn_num(hdev, ISO_LINK))
3871                 return;
3872
3873         cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3874                 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3875         while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3876                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3877                         BT_DBG("skb %p len %d", skb, skb->len);
3878                         hci_send_frame(hdev, skb);
3879
3880                         conn->sent++;
3881                         if (conn->sent == ~0)
3882                                 conn->sent = 0;
3883                         (*cnt)--;
3884                 }
3885         }
3886 }
3887
3888 static void hci_tx_work(struct work_struct *work)
3889 {
3890         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3891         struct sk_buff *skb;
3892
3893         BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3894                hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3895
3896         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3897                 /* Schedule queues and send stuff to HCI driver */
3898                 hci_sched_sco(hdev);
3899                 hci_sched_esco(hdev);
3900                 hci_sched_iso(hdev);
3901                 hci_sched_acl(hdev);
3902                 hci_sched_le(hdev);
3903         }
3904
3905         /* Send next queued raw (unknown type) packet */
3906         while ((skb = skb_dequeue(&hdev->raw_q)))
3907                 hci_send_frame(hdev, skb);
3908 }
3909
3910 /* ----- HCI RX task (incoming data processing) ----- */
3911
3912 /* ACL data packet */
3913 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3914 {
3915         struct hci_acl_hdr *hdr = (void *) skb->data;
3916         struct hci_conn *conn;
3917         __u16 handle, flags;
3918
3919         skb_pull(skb, HCI_ACL_HDR_SIZE);
3920
3921         handle = __le16_to_cpu(hdr->handle);
3922         flags  = hci_flags(handle);
3923         handle = hci_handle(handle);
3924
3925         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3926                handle, flags);
3927
3928         hdev->stat.acl_rx++;
3929
3930         hci_dev_lock(hdev);
3931         conn = hci_conn_hash_lookup_handle(hdev, handle);
3932         hci_dev_unlock(hdev);
3933
3934         if (conn) {
3935                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3936
3937                 /* Send to upper protocol */
3938                 l2cap_recv_acldata(conn, skb, flags);
3939                 return;
3940         } else {
3941                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3942                            handle);
3943         }
3944
3945         kfree_skb(skb);
3946 }
3947
3948 /* SCO data packet */
3949 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3950 {
3951         struct hci_sco_hdr *hdr = (void *) skb->data;
3952         struct hci_conn *conn;
3953         __u16 handle, flags;
3954
3955         skb_pull(skb, HCI_SCO_HDR_SIZE);
3956
3957         handle = __le16_to_cpu(hdr->handle);
3958         flags  = hci_flags(handle);
3959         handle = hci_handle(handle);
3960
3961         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3962                handle, flags);
3963
3964         hdev->stat.sco_rx++;
3965
3966         hci_dev_lock(hdev);
3967         conn = hci_conn_hash_lookup_handle(hdev, handle);
3968         hci_dev_unlock(hdev);
3969
3970         if (conn) {
3971                 /* Send to upper protocol */
3972                 hci_skb_pkt_status(skb) = flags & 0x03;
3973                 sco_recv_scodata(conn, skb);
3974                 return;
3975         } else {
3976                 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3977                                        handle);
3978         }
3979
3980         kfree_skb(skb);
3981 }
3982
3983 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3984 {
3985         struct hci_iso_hdr *hdr;
3986         struct hci_conn *conn;
3987         __u16 handle, flags;
3988
3989         hdr = skb_pull_data(skb, sizeof(*hdr));
3990         if (!hdr) {
3991                 bt_dev_err(hdev, "ISO packet too small");
3992                 goto drop;
3993         }
3994
3995         handle = __le16_to_cpu(hdr->handle);
3996         flags  = hci_flags(handle);
3997         handle = hci_handle(handle);
3998
3999         bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
4000                    handle, flags);
4001
4002         hci_dev_lock(hdev);
4003         conn = hci_conn_hash_lookup_handle(hdev, handle);
4004         hci_dev_unlock(hdev);
4005
4006         if (!conn) {
4007                 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
4008                            handle);
4009                 goto drop;
4010         }
4011
4012         /* Send to upper protocol */
4013         iso_recv(conn, skb, flags);
4014         return;
4015
4016 drop:
4017         kfree_skb(skb);
4018 }
4019
4020 static bool hci_req_is_complete(struct hci_dev *hdev)
4021 {
4022         struct sk_buff *skb;
4023
4024         skb = skb_peek(&hdev->cmd_q);
4025         if (!skb)
4026                 return true;
4027
4028         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4029 }
4030
4031 static void hci_resend_last(struct hci_dev *hdev)
4032 {
4033         struct hci_command_hdr *sent;
4034         struct sk_buff *skb;
4035         u16 opcode;
4036
4037         if (!hdev->sent_cmd)
4038                 return;
4039
4040         sent = (void *) hdev->sent_cmd->data;
4041         opcode = __le16_to_cpu(sent->opcode);
4042         if (opcode == HCI_OP_RESET)
4043                 return;
4044
4045         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4046         if (!skb)
4047                 return;
4048
4049         skb_queue_head(&hdev->cmd_q, skb);
4050         queue_work(hdev->workqueue, &hdev->cmd_work);
4051 }
4052
4053 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4054                           hci_req_complete_t *req_complete,
4055                           hci_req_complete_skb_t *req_complete_skb)
4056 {
4057         struct sk_buff *skb;
4058         unsigned long flags;
4059
4060         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4061
4062         /* If the completed command doesn't match the last one that was
4063          * sent we need to do special handling of it.
4064          */
4065         if (!hci_sent_cmd_data(hdev, opcode)) {
4066                 /* Some CSR based controllers generate a spontaneous
4067                  * reset complete event during init and any pending
4068                  * command will never be completed. In such a case we
4069                  * need to resend whatever was the last sent
4070                  * command.
4071                  */
4072                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4073                         hci_resend_last(hdev);
4074
4075                 return;
4076         }
4077
4078         /* If we reach this point this event matches the last command sent */
4079         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4080
4081         /* If the command succeeded and there's still more commands in
4082          * this request the request is not yet complete.
4083          */
4084         if (!status && !hci_req_is_complete(hdev))
4085                 return;
4086
4087         skb = hdev->req_skb;
4088
4089         /* If this was the last command in a request the complete
4090          * callback would be found in hdev->req_skb instead of the
4091          * command queue (hdev->cmd_q).
4092          */
4093         if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
4094                 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4095                 return;
4096         }
4097
4098         if (skb && bt_cb(skb)->hci.req_complete) {
4099                 *req_complete = bt_cb(skb)->hci.req_complete;
4100                 return;
4101         }
4102
4103         /* Remove all pending commands belonging to this request */
4104         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4105         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4106                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4107                         __skb_queue_head(&hdev->cmd_q, skb);
4108                         break;
4109                 }
4110
4111                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4112                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4113                 else
4114                         *req_complete = bt_cb(skb)->hci.req_complete;
4115                 dev_kfree_skb_irq(skb);
4116         }
4117         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4118 }
4119
4120 static void hci_rx_work(struct work_struct *work)
4121 {
4122         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4123         struct sk_buff *skb;
4124
4125         BT_DBG("%s", hdev->name);
4126
4127         /* The kcov_remote functions used for collecting packet parsing
4128          * coverage information from this background thread and associate
4129          * the coverage with the syscall's thread which originally injected
4130          * the packet. This helps fuzzing the kernel.
4131          */
4132         for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4133                 kcov_remote_start_common(skb_get_kcov_handle(skb));
4134
4135                 /* Send copy to monitor */
4136                 hci_send_to_monitor(hdev, skb);
4137
4138                 if (atomic_read(&hdev->promisc)) {
4139                         /* Send copy to the sockets */
4140                         hci_send_to_sock(hdev, skb);
4141                 }
4142
4143                 /* If the device has been opened in HCI_USER_CHANNEL,
4144                  * the userspace has exclusive access to device.
4145                  * When device is HCI_INIT, we still need to process
4146                  * the data packets to the driver in order
4147                  * to complete its setup().
4148                  */
4149                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4150                     !test_bit(HCI_INIT, &hdev->flags)) {
4151                         kfree_skb(skb);
4152                         continue;
4153                 }
4154
4155                 if (test_bit(HCI_INIT, &hdev->flags)) {
4156                         /* Don't process data packets in this states. */
4157                         switch (hci_skb_pkt_type(skb)) {
4158                         case HCI_ACLDATA_PKT:
4159                         case HCI_SCODATA_PKT:
4160                         case HCI_ISODATA_PKT:
4161                                 kfree_skb(skb);
4162                                 continue;
4163                         }
4164                 }
4165
4166                 /* Process frame */
4167                 switch (hci_skb_pkt_type(skb)) {
4168                 case HCI_EVENT_PKT:
4169                         BT_DBG("%s Event packet", hdev->name);
4170                         hci_event_packet(hdev, skb);
4171                         break;
4172
4173                 case HCI_ACLDATA_PKT:
4174                         BT_DBG("%s ACL data packet", hdev->name);
4175                         hci_acldata_packet(hdev, skb);
4176                         break;
4177
4178                 case HCI_SCODATA_PKT:
4179                         BT_DBG("%s SCO data packet", hdev->name);
4180                         hci_scodata_packet(hdev, skb);
4181                         break;
4182
4183                 case HCI_ISODATA_PKT:
4184                         BT_DBG("%s ISO data packet", hdev->name);
4185                         hci_isodata_packet(hdev, skb);
4186                         break;
4187
4188                 default:
4189                         kfree_skb(skb);
4190                         break;
4191                 }
4192         }
4193 }
4194
4195 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4196 {
4197         int err;
4198
4199         bt_dev_dbg(hdev, "skb %p", skb);
4200
4201         kfree_skb(hdev->sent_cmd);
4202
4203         hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4204         if (!hdev->sent_cmd) {
4205                 skb_queue_head(&hdev->cmd_q, skb);
4206                 queue_work(hdev->workqueue, &hdev->cmd_work);
4207                 return;
4208         }
4209
4210         err = hci_send_frame(hdev, skb);
4211         if (err < 0) {
4212                 hci_cmd_sync_cancel_sync(hdev, -err);
4213                 return;
4214         }
4215
4216         if (hci_req_status_pend(hdev) &&
4217             !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4218                 kfree_skb(hdev->req_skb);
4219                 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4220         }
4221
4222         atomic_dec(&hdev->cmd_cnt);
4223 }
4224
4225 static void hci_cmd_work(struct work_struct *work)
4226 {
4227         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4228         struct sk_buff *skb;
4229
4230         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4231                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4232
4233         /* Send queued commands */
4234         if (atomic_read(&hdev->cmd_cnt)) {
4235                 skb = skb_dequeue(&hdev->cmd_q);
4236                 if (!skb)
4237                         return;
4238
4239                 hci_send_cmd_sync(hdev, skb);
4240
4241                 rcu_read_lock();
4242                 if (test_bit(HCI_RESET, &hdev->flags) ||
4243                     hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4244                         cancel_delayed_work(&hdev->cmd_timer);
4245                 else
4246                         queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4247                                            HCI_CMD_TIMEOUT);
4248                 rcu_read_unlock();
4249         }
4250 }