Linux 6.10-rc3
[linux-block.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
8c520a59 29#include <linux/rfkill.h>
baf27f6e 30#include <linux/debugfs.h>
99780a7b 31#include <linux/crypto.h>
9f30de9e 32#include <linux/kcov.h>
7a0e5b15 33#include <linux/property.h>
9952d90e
APS
34#include <linux/suspend.h>
35#include <linux/wait.h>
47219839 36#include <asm/unaligned.h>
1da177e4
LT
37
38#include <net/bluetooth/bluetooth.h>
39#include <net/bluetooth/hci_core.h>
4bc58f51 40#include <net/bluetooth/l2cap.h>
af58925c 41#include <net/bluetooth/mgmt.h>
1da177e4 42
0857dd3b 43#include "hci_request.h"
60c5f5fb 44#include "hci_debugfs.h"
970c4e46 45#include "smp.h"
6d5d2ee6 46#include "leds.h"
145373cb 47#include "msft.h"
f67743f9 48#include "aosp.h"
8961987f 49#include "hci_codec.h"
970c4e46 50
b78752cc 51static void hci_rx_work(struct work_struct *work);
c347b765 52static void hci_cmd_work(struct work_struct *work);
3eff45ea 53static void hci_tx_work(struct work_struct *work);
1da177e4 54
1da177e4
LT
55/* HCI device list */
56LIST_HEAD(hci_dev_list);
57DEFINE_RWLOCK(hci_dev_list_lock);
58
59/* HCI callback list */
60LIST_HEAD(hci_cb_list);
fba7ecf0 61DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 62
3df92b31
SL
63/* HCI ID Numbering */
64static DEFINE_IDA(hci_index_ida);
65
a1d01db1 66static int hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
67{
68 __u8 scan = opt;
69
42c6b129 70 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
71
72 /* Inquiry and Page scans */
42c6b129 73 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
a1d01db1 74 return 0;
1da177e4
LT
75}
76
a1d01db1 77static int hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
78{
79 __u8 auth = opt;
80
42c6b129 81 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
82
83 /* Authentication */
42c6b129 84 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
a1d01db1 85 return 0;
1da177e4
LT
86}
87
a1d01db1 88static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
89{
90 __u8 encrypt = opt;
91
42c6b129 92 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 93
e4e8e37c 94 /* Encryption */
42c6b129 95 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
a1d01db1 96 return 0;
1da177e4
LT
97}
98
a1d01db1 99static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
100{
101 __le16 policy = cpu_to_le16(opt);
102
42c6b129 103 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
104
105 /* Default link policy */
42c6b129 106 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
a1d01db1 107 return 0;
e4e8e37c
MH
108}
109
8e87d142 110/* Get HCI device by index.
1da177e4
LT
111 * Device is held on return. */
112struct hci_dev *hci_dev_get(int index)
113{
8035ded4 114 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
115
116 BT_DBG("%d", index);
117
118 if (index < 0)
119 return NULL;
120
121 read_lock(&hci_dev_list_lock);
8035ded4 122 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
123 if (d->id == index) {
124 hdev = hci_dev_hold(d);
125 break;
126 }
127 }
128 read_unlock(&hci_dev_list_lock);
129 return hdev;
130}
1da177e4
LT
131
132/* ---- Inquiry support ---- */
ff9ef578 133
30dc78e1
JH
134bool hci_discovery_active(struct hci_dev *hdev)
135{
136 struct discovery_state *discov = &hdev->discovery;
137
6fbe195d 138 switch (discov->state) {
343f935b 139 case DISCOVERY_FINDING:
6fbe195d 140 case DISCOVERY_RESOLVING:
30dc78e1
JH
141 return true;
142
6fbe195d
AG
143 default:
144 return false;
145 }
30dc78e1
JH
146}
147
ff9ef578
JH
148void hci_discovery_set_state(struct hci_dev *hdev, int state)
149{
bb3e0a33
JH
150 int old_state = hdev->discovery.state;
151
bb3e0a33 152 if (old_state == state)
ff9ef578
JH
153 return;
154
bb3e0a33
JH
155 hdev->discovery.state = state;
156
ff9ef578
JH
157 switch (state) {
158 case DISCOVERY_STOPPED:
5bee2fd6 159 hci_update_passive_scan(hdev);
c54c3860 160
bb3e0a33 161 if (old_state != DISCOVERY_STARTING)
7b99b659 162 mgmt_discovering(hdev, 0);
ff9ef578
JH
163 break;
164 case DISCOVERY_STARTING:
165 break;
343f935b 166 case DISCOVERY_FINDING:
2e2515c1
LAD
167 /* If discovery was not started then it was initiated by the
168 * MGMT interface so no MGMT event shall be generated either
169 */
170 if (old_state != DISCOVERY_STARTING) {
171 hdev->discovery.state = old_state;
172 return;
173 }
ff9ef578
JH
174 mgmt_discovering(hdev, 1);
175 break;
30dc78e1
JH
176 case DISCOVERY_RESOLVING:
177 break;
ff9ef578
JH
178 case DISCOVERY_STOPPING:
179 break;
180 }
2e2515c1
LAD
181
182 bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
ff9ef578
JH
183}
184
1f9b9a5d 185void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 186{
30883512 187 struct discovery_state *cache = &hdev->discovery;
b57c1a56 188 struct inquiry_entry *p, *n;
1da177e4 189
561aafbc
JH
190 list_for_each_entry_safe(p, n, &cache->all, all) {
191 list_del(&p->all);
b57c1a56 192 kfree(p);
1da177e4 193 }
561aafbc
JH
194
195 INIT_LIST_HEAD(&cache->unknown);
196 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
197}
198
a8c5fb1a
GP
199struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
200 bdaddr_t *bdaddr)
1da177e4 201{
30883512 202 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
203 struct inquiry_entry *e;
204
6ed93dc6 205 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 206
561aafbc
JH
207 list_for_each_entry(e, &cache->all, all) {
208 if (!bacmp(&e->data.bdaddr, bdaddr))
209 return e;
210 }
211
212 return NULL;
213}
214
215struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 216 bdaddr_t *bdaddr)
561aafbc 217{
30883512 218 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
219 struct inquiry_entry *e;
220
6ed93dc6 221 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
222
223 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 224 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
225 return e;
226 }
227
228 return NULL;
1da177e4
LT
229}
230
30dc78e1 231struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
232 bdaddr_t *bdaddr,
233 int state)
30dc78e1
JH
234{
235 struct discovery_state *cache = &hdev->discovery;
236 struct inquiry_entry *e;
237
6ed93dc6 238 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
239
240 list_for_each_entry(e, &cache->resolve, list) {
241 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
242 return e;
243 if (!bacmp(&e->data.bdaddr, bdaddr))
244 return e;
245 }
246
247 return NULL;
248}
249
a3d4e20a 250void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 251 struct inquiry_entry *ie)
a3d4e20a
JH
252{
253 struct discovery_state *cache = &hdev->discovery;
254 struct list_head *pos = &cache->resolve;
255 struct inquiry_entry *p;
256
257 list_del(&ie->list);
258
259 list_for_each_entry(p, &cache->resolve, list) {
260 if (p->name_state != NAME_PENDING &&
a8c5fb1a 261 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
262 break;
263 pos = &p->list;
264 }
265
266 list_add(&ie->list, pos);
267}
268
af58925c
MH
269u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
270 bool name_known)
1da177e4 271{
30883512 272 struct discovery_state *cache = &hdev->discovery;
70f23020 273 struct inquiry_entry *ie;
af58925c 274 u32 flags = 0;
1da177e4 275
6ed93dc6 276 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 277
6928a924 278 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 279
af58925c
MH
280 if (!data->ssp_mode)
281 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 282
70f23020 283 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 284 if (ie) {
af58925c
MH
285 if (!ie->data.ssp_mode)
286 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 287
a3d4e20a 288 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 289 data->rssi != ie->data.rssi) {
a3d4e20a
JH
290 ie->data.rssi = data->rssi;
291 hci_inquiry_cache_update_resolve(hdev, ie);
292 }
293
561aafbc 294 goto update;
a3d4e20a 295 }
561aafbc
JH
296
297 /* Entry not in the cache. Add new one. */
27f70f3e 298 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
299 if (!ie) {
300 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
301 goto done;
302 }
561aafbc
JH
303
304 list_add(&ie->all, &cache->all);
305
306 if (name_known) {
307 ie->name_state = NAME_KNOWN;
308 } else {
309 ie->name_state = NAME_NOT_KNOWN;
310 list_add(&ie->list, &cache->unknown);
311 }
70f23020 312
561aafbc
JH
313update:
314 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 315 ie->name_state != NAME_PENDING) {
561aafbc
JH
316 ie->name_state = NAME_KNOWN;
317 list_del(&ie->list);
1da177e4
LT
318 }
319
70f23020
AE
320 memcpy(&ie->data, data, sizeof(*data));
321 ie->timestamp = jiffies;
1da177e4 322 cache->timestamp = jiffies;
3175405b
JH
323
324 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 325 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 326
af58925c
MH
327done:
328 return flags;
1da177e4
LT
329}
330
331static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
332{
30883512 333 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
334 struct inquiry_info *info = (struct inquiry_info *) buf;
335 struct inquiry_entry *e;
336 int copied = 0;
337
561aafbc 338 list_for_each_entry(e, &cache->all, all) {
1da177e4 339 struct inquiry_data *data = &e->data;
b57c1a56
JH
340
341 if (copied >= num)
342 break;
343
1da177e4
LT
344 bacpy(&info->bdaddr, &data->bdaddr);
345 info->pscan_rep_mode = data->pscan_rep_mode;
346 info->pscan_period_mode = data->pscan_period_mode;
347 info->pscan_mode = data->pscan_mode;
348 memcpy(info->dev_class, data->dev_class, 3);
349 info->clock_offset = data->clock_offset;
b57c1a56 350
1da177e4 351 info++;
b57c1a56 352 copied++;
1da177e4
LT
353 }
354
355 BT_DBG("cache %p, copied %d", cache, copied);
356 return copied;
357}
358
a1d01db1 359static int hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
360{
361 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 362 struct hci_dev *hdev = req->hdev;
1da177e4
LT
363 struct hci_cp_inquiry cp;
364
365 BT_DBG("%s", hdev->name);
366
367 if (test_bit(HCI_INQUIRY, &hdev->flags))
a1d01db1 368 return 0;
1da177e4
LT
369
370 /* Start Inquiry */
371 memcpy(&cp.lap, &ir->lap, 3);
372 cp.length = ir->length;
373 cp.num_rsp = ir->num_rsp;
42c6b129 374 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
a1d01db1
JH
375
376 return 0;
1da177e4
LT
377}
378
379int hci_inquiry(void __user *arg)
380{
381 __u8 __user *ptr = arg;
382 struct hci_inquiry_req ir;
383 struct hci_dev *hdev;
384 int err = 0, do_inquiry = 0, max_rsp;
385 long timeo;
386 __u8 *buf;
387
388 if (copy_from_user(&ir, ptr, sizeof(ir)))
389 return -EFAULT;
390
5a08ecce
AE
391 hdev = hci_dev_get(ir.dev_id);
392 if (!hdev)
1da177e4
LT
393 return -ENODEV;
394
d7a5a11d 395 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
396 err = -EBUSY;
397 goto done;
398 }
399
d7a5a11d 400 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
401 err = -EOPNOTSUPP;
402 goto done;
403 }
404
d7a5a11d 405 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
406 err = -EOPNOTSUPP;
407 goto done;
408 }
409
f41a4b2b
PS
410 /* Restrict maximum inquiry length to 60 seconds */
411 if (ir.length > 60) {
412 err = -EINVAL;
413 goto done;
414 }
415
09fd0de5 416 hci_dev_lock(hdev);
8e87d142 417 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 418 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 419 hci_inquiry_cache_flush(hdev);
1da177e4
LT
420 do_inquiry = 1;
421 }
09fd0de5 422 hci_dev_unlock(hdev);
1da177e4 423
04837f64 424 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
425
426 if (do_inquiry) {
01178cd4 427 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
4ebeee2d 428 timeo, NULL);
70f23020
AE
429 if (err < 0)
430 goto done;
3e13fa1e
AG
431
432 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
433 * cleared). If it is interrupted by a signal, return -EINTR.
434 */
74316201 435 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
28a758c8
PB
436 TASK_INTERRUPTIBLE)) {
437 err = -EINTR;
438 goto done;
439 }
70f23020 440 }
1da177e4 441
8fc9ced3
GP
442 /* for unlimited number of responses we will use buffer with
443 * 255 entries
444 */
1da177e4
LT
445 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
446
447 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
448 * copy it to the user space.
449 */
6da2ec56 450 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
70f23020 451 if (!buf) {
1da177e4
LT
452 err = -ENOMEM;
453 goto done;
454 }
455
09fd0de5 456 hci_dev_lock(hdev);
1da177e4 457 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 458 hci_dev_unlock(hdev);
1da177e4
LT
459
460 BT_DBG("num_rsp %d", ir.num_rsp);
461
462 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
463 ptr += sizeof(ir);
464 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 465 ir.num_rsp))
1da177e4 466 err = -EFAULT;
8e87d142 467 } else
1da177e4
LT
468 err = -EFAULT;
469
470 kfree(buf);
471
472done:
473 hci_dev_put(hdev);
474 return err;
475}
476
cf75ad8b
LAD
477static int hci_dev_do_open(struct hci_dev *hdev)
478{
479 int ret = 0;
480
481 BT_DBG("%s %p", hdev->name, hdev);
482
483 hci_req_sync_lock(hdev);
484
485 ret = hci_dev_open_sync(hdev);
486
b504430c 487 hci_req_sync_unlock(hdev);
1da177e4
LT
488 return ret;
489}
490
cbed0ca1
JH
491/* ---- HCI ioctl helpers ---- */
492
493int hci_dev_open(__u16 dev)
494{
495 struct hci_dev *hdev;
496 int err;
497
498 hdev = hci_dev_get(dev);
499 if (!hdev)
500 return -ENODEV;
501
4a964404 502 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
503 * up as user channel. Trying to bring them up as normal devices
504 * will result into a failure. Only user channel operation is
505 * possible.
506 *
507 * When this function is called for a user channel, the flag
508 * HCI_USER_CHANNEL will be set first before attempting to
509 * open the device.
510 */
d7a5a11d
MH
511 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
512 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
513 err = -EOPNOTSUPP;
514 goto done;
515 }
516
e1d08f40
JH
517 /* We need to ensure that no other power on/off work is pending
518 * before proceeding to call hci_dev_do_open. This is
519 * particularly important if the setup procedure has not yet
520 * completed.
521 */
a69d8927 522 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
523 cancel_delayed_work(&hdev->power_off);
524
a5c8f270
MH
525 /* After this call it is guaranteed that the setup procedure
526 * has finished. This means that error conditions like RFKILL
527 * or no valid public or static random address apply.
528 */
e1d08f40
JH
529 flush_workqueue(hdev->req_workqueue);
530
12aa4f0a 531 /* For controllers not using the management interface and that
b6ae8457 532 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
533 * so that pairing works for them. Once the management interface
534 * is in use this bit will be cleared again and userspace has
535 * to explicitly enable it.
536 */
d7a5a11d
MH
537 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
538 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 539 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 540
cbed0ca1
JH
541 err = hci_dev_do_open(hdev);
542
fee746b0 543done:
cbed0ca1 544 hci_dev_put(hdev);
cbed0ca1
JH
545 return err;
546}
547
cf75ad8b
LAD
548int hci_dev_do_close(struct hci_dev *hdev)
549{
550 int err;
551
552 BT_DBG("%s %p", hdev->name, hdev);
553
554 hci_req_sync_lock(hdev);
555
556 err = hci_dev_close_sync(hdev);
557
b504430c 558 hci_req_sync_unlock(hdev);
1da177e4 559
61969ef8 560 return err;
1da177e4
LT
561}
562
563int hci_dev_close(__u16 dev)
564{
565 struct hci_dev *hdev;
566 int err;
567
70f23020
AE
568 hdev = hci_dev_get(dev);
569 if (!hdev)
1da177e4 570 return -ENODEV;
8ee56540 571
d7a5a11d 572 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
573 err = -EBUSY;
574 goto done;
575 }
576
e36bea6e 577 cancel_work_sync(&hdev->power_on);
a69d8927 578 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
579 cancel_delayed_work(&hdev->power_off);
580
1da177e4 581 err = hci_dev_do_close(hdev);
8ee56540 582
0736cfa8 583done:
1da177e4
LT
584 hci_dev_put(hdev);
585 return err;
586}
587
5c912495 588static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 589{
5c912495 590 int ret;
1da177e4 591
5c912495 592 BT_DBG("%s %p", hdev->name, hdev);
1da177e4 593
b504430c 594 hci_req_sync_lock(hdev);
1da177e4 595
1da177e4
LT
596 /* Drop queues */
597 skb_queue_purge(&hdev->rx_q);
598 skb_queue_purge(&hdev->cmd_q);
599
877afada
SS
600 /* Cancel these to avoid queueing non-chained pending work */
601 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
deee93d1
TH
602 /* Wait for
603 *
604 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
605 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
606 *
607 * inside RCU section to see the flag or complete scheduling.
608 */
609 synchronize_rcu();
610 /* Explicitly cancel works in case scheduled after setting the flag. */
877afada
SS
611 cancel_delayed_work(&hdev->cmd_timer);
612 cancel_delayed_work(&hdev->ncmd_timer);
613
76727c02
JH
614 /* Avoid potential lockdep warnings from the *_flush() calls by
615 * ensuring the workqueue is empty up front.
616 */
617 drain_workqueue(hdev->workqueue);
618
09fd0de5 619 hci_dev_lock(hdev);
1f9b9a5d 620 hci_inquiry_cache_flush(hdev);
1da177e4 621 hci_conn_hash_flush(hdev);
09fd0de5 622 hci_dev_unlock(hdev);
1da177e4
LT
623
624 if (hdev->flush)
625 hdev->flush(hdev);
626
877afada
SS
627 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
628
8e87d142 629 atomic_set(&hdev->cmd_cnt, 1);
26afbd82
LAD
630 hdev->acl_cnt = 0;
631 hdev->sco_cnt = 0;
632 hdev->le_cnt = 0;
633 hdev->iso_cnt = 0;
1da177e4 634
d0b13706 635 ret = hci_reset_sync(hdev);
1da177e4 636
b504430c 637 hci_req_sync_unlock(hdev);
1da177e4
LT
638 return ret;
639}
640
5c912495
MH
641int hci_dev_reset(__u16 dev)
642{
643 struct hci_dev *hdev;
644 int err;
645
646 hdev = hci_dev_get(dev);
647 if (!hdev)
648 return -ENODEV;
649
650 if (!test_bit(HCI_UP, &hdev->flags)) {
651 err = -ENETDOWN;
652 goto done;
653 }
654
d7a5a11d 655 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
656 err = -EBUSY;
657 goto done;
658 }
659
d7a5a11d 660 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
661 err = -EOPNOTSUPP;
662 goto done;
663 }
664
665 err = hci_dev_do_reset(hdev);
666
667done:
668 hci_dev_put(hdev);
669 return err;
670}
671
1da177e4
LT
672int hci_dev_reset_stat(__u16 dev)
673{
674 struct hci_dev *hdev;
675 int ret = 0;
676
70f23020
AE
677 hdev = hci_dev_get(dev);
678 if (!hdev)
1da177e4
LT
679 return -ENODEV;
680
d7a5a11d 681 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
682 ret = -EBUSY;
683 goto done;
684 }
685
d7a5a11d 686 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
687 ret = -EOPNOTSUPP;
688 goto done;
689 }
690
1da177e4
LT
691 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
692
0736cfa8 693done:
1da177e4 694 hci_dev_put(hdev);
1da177e4
LT
695 return ret;
696}
697
5bee2fd6 698static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
123abc08 699{
bc6d2d04 700 bool conn_changed, discov_changed;
123abc08
JH
701
702 BT_DBG("%s scan 0x%02x", hdev->name, scan);
703
704 if ((scan & SCAN_PAGE))
238be788
MH
705 conn_changed = !hci_dev_test_and_set_flag(hdev,
706 HCI_CONNECTABLE);
123abc08 707 else
a69d8927
MH
708 conn_changed = hci_dev_test_and_clear_flag(hdev,
709 HCI_CONNECTABLE);
123abc08 710
bc6d2d04 711 if ((scan & SCAN_INQUIRY)) {
238be788
MH
712 discov_changed = !hci_dev_test_and_set_flag(hdev,
713 HCI_DISCOVERABLE);
bc6d2d04 714 } else {
a358dc11 715 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
716 discov_changed = hci_dev_test_and_clear_flag(hdev,
717 HCI_DISCOVERABLE);
bc6d2d04
JH
718 }
719
d7a5a11d 720 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
721 return;
722
bc6d2d04
JH
723 if (conn_changed || discov_changed) {
724 /* In case this was disabled through mgmt */
a1536da2 725 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 726
d7a5a11d 727 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
651cd3d6 728 hci_update_adv_data(hdev, hdev->cur_adv_instance);
bc6d2d04 729
123abc08 730 mgmt_new_settings(hdev);
bc6d2d04 731 }
123abc08
JH
732}
733
1da177e4
LT
734int hci_dev_cmd(unsigned int cmd, void __user *arg)
735{
736 struct hci_dev *hdev;
737 struct hci_dev_req dr;
738 int err = 0;
739
740 if (copy_from_user(&dr, arg, sizeof(dr)))
741 return -EFAULT;
742
70f23020
AE
743 hdev = hci_dev_get(dr.dev_id);
744 if (!hdev)
1da177e4
LT
745 return -ENODEV;
746
d7a5a11d 747 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
748 err = -EBUSY;
749 goto done;
750 }
751
d7a5a11d 752 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
753 err = -EOPNOTSUPP;
754 goto done;
755 }
756
d7a5a11d 757 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
758 err = -EOPNOTSUPP;
759 goto done;
760 }
761
1da177e4
LT
762 switch (cmd) {
763 case HCISETAUTH:
01178cd4 764 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 765 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
766 break;
767
768 case HCISETENCRYPT:
769 if (!lmp_encrypt_capable(hdev)) {
770 err = -EOPNOTSUPP;
771 break;
772 }
773
774 if (!test_bit(HCI_AUTH, &hdev->flags)) {
775 /* Auth must be enabled first */
01178cd4 776 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 777 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
778 if (err)
779 break;
780 }
781
01178cd4 782 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
4ebeee2d 783 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
784 break;
785
786 case HCISETSCAN:
01178cd4 787 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
4ebeee2d 788 HCI_INIT_TIMEOUT, NULL);
91a668b0 789
bc6d2d04
JH
790 /* Ensure that the connectable and discoverable states
791 * get correctly modified as this was a non-mgmt change.
91a668b0 792 */
123abc08 793 if (!err)
5bee2fd6 794 hci_update_passive_scan_state(hdev, dr.dev_opt);
1da177e4
LT
795 break;
796
1da177e4 797 case HCISETLINKPOL:
01178cd4 798 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
4ebeee2d 799 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
800 break;
801
802 case HCISETLINKMODE:
e4e8e37c
MH
803 hdev->link_mode = ((__u16) dr.dev_opt) &
804 (HCI_LM_MASTER | HCI_LM_ACCEPT);
805 break;
806
807 case HCISETPTYPE:
b7c23df8
JK
808 if (hdev->pkt_type == (__u16) dr.dev_opt)
809 break;
810
e4e8e37c 811 hdev->pkt_type = (__u16) dr.dev_opt;
b7c23df8 812 mgmt_phy_configuration_changed(hdev, NULL);
1da177e4
LT
813 break;
814
815 case HCISETACLMTU:
e4e8e37c
MH
816 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
817 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
818 break;
819
820 case HCISETSCOMTU:
e4e8e37c
MH
821 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
822 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
823 break;
824
825 default:
826 err = -EINVAL;
827 break;
828 }
e4e8e37c 829
0736cfa8 830done:
1da177e4
LT
831 hci_dev_put(hdev);
832 return err;
833}
834
835int hci_get_dev_list(void __user *arg)
836{
8035ded4 837 struct hci_dev *hdev;
1da177e4
LT
838 struct hci_dev_list_req *dl;
839 struct hci_dev_req *dr;
1da177e4
LT
840 int n = 0, size, err;
841 __u16 dev_num;
842
843 if (get_user(dev_num, (__u16 __user *) arg))
844 return -EFAULT;
845
846 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
847 return -EINVAL;
848
849 size = sizeof(*dl) + dev_num * sizeof(*dr);
850
70f23020
AE
851 dl = kzalloc(size, GFP_KERNEL);
852 if (!dl)
1da177e4
LT
853 return -ENOMEM;
854
855 dr = dl->dev_req;
856
f20d09d5 857 read_lock(&hci_dev_list_lock);
8035ded4 858 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 859 unsigned long flags = hdev->flags;
c542a06c 860
2e84d8db
MH
861 /* When the auto-off is configured it means the transport
862 * is running, but in that case still indicate that the
863 * device is actually down.
864 */
d7a5a11d 865 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 866 flags &= ~BIT(HCI_UP);
c542a06c 867
1da177e4 868 (dr + n)->dev_id = hdev->id;
2e84d8db 869 (dr + n)->dev_opt = flags;
c542a06c 870
1da177e4
LT
871 if (++n >= dev_num)
872 break;
873 }
f20d09d5 874 read_unlock(&hci_dev_list_lock);
1da177e4
LT
875
876 dl->dev_num = n;
877 size = sizeof(*dl) + n * sizeof(*dr);
878
879 err = copy_to_user(arg, dl, size);
880 kfree(dl);
881
882 return err ? -EFAULT : 0;
883}
884
885int hci_get_dev_info(void __user *arg)
886{
887 struct hci_dev *hdev;
888 struct hci_dev_info di;
2e84d8db 889 unsigned long flags;
1da177e4
LT
890 int err = 0;
891
892 if (copy_from_user(&di, arg, sizeof(di)))
893 return -EFAULT;
894
70f23020
AE
895 hdev = hci_dev_get(di.dev_id);
896 if (!hdev)
1da177e4
LT
897 return -ENODEV;
898
2e84d8db
MH
899 /* When the auto-off is configured it means the transport
900 * is running, but in that case still indicate that the
901 * device is actually down.
902 */
d7a5a11d 903 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
904 flags = hdev->flags & ~BIT(HCI_UP);
905 else
906 flags = hdev->flags;
c542a06c 907
81137162 908 strscpy(di.name, hdev->name, sizeof(di.name));
1da177e4 909 di.bdaddr = hdev->bdaddr;
84a4bb65 910 di.type = (hdev->bus & 0x0f);
2e84d8db 911 di.flags = flags;
1da177e4 912 di.pkt_type = hdev->pkt_type;
572c7f84
JH
913 if (lmp_bredr_capable(hdev)) {
914 di.acl_mtu = hdev->acl_mtu;
915 di.acl_pkts = hdev->acl_pkts;
916 di.sco_mtu = hdev->sco_mtu;
917 di.sco_pkts = hdev->sco_pkts;
918 } else {
919 di.acl_mtu = hdev->le_mtu;
920 di.acl_pkts = hdev->le_pkts;
921 di.sco_mtu = 0;
922 di.sco_pkts = 0;
923 }
1da177e4
LT
924 di.link_policy = hdev->link_policy;
925 di.link_mode = hdev->link_mode;
926
927 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
928 memcpy(&di.features, &hdev->features, sizeof(di.features));
929
930 if (copy_to_user(arg, &di, sizeof(di)))
931 err = -EFAULT;
932
933 hci_dev_put(hdev);
934
935 return err;
936}
937
938/* ---- Interface to HCI drivers ---- */
939
d77433cd
JD
940static int hci_dev_do_poweroff(struct hci_dev *hdev)
941{
942 int err;
943
944 BT_DBG("%s %p", hdev->name, hdev);
945
946 hci_req_sync_lock(hdev);
947
948 err = hci_set_powered_sync(hdev, false);
949
950 hci_req_sync_unlock(hdev);
951
952 return err;
953}
954
611b30f7
MH
955static int hci_rfkill_set_block(void *data, bool blocked)
956{
957 struct hci_dev *hdev = data;
d77433cd 958 int err;
611b30f7
MH
959
960 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
961
d7a5a11d 962 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
963 return -EBUSY;
964
d77433cd
JD
965 if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
966 return 0;
967
5e130367 968 if (blocked) {
a1536da2 969 hci_dev_set_flag(hdev, HCI_RFKILLED);
d77433cd 970
d7a5a11d 971 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
d77433cd
JD
972 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
973 err = hci_dev_do_poweroff(hdev);
974 if (err) {
975 bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
976 err);
977
978 /* Make sure the device is still closed even if
979 * anything during power off sequence (eg.
980 * disconnecting devices) failed.
981 */
982 hci_dev_do_close(hdev);
983 }
984 }
5e130367 985 } else {
a358dc11 986 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 987 }
611b30f7
MH
988
989 return 0;
990}
991
992static const struct rfkill_ops hci_rfkill_ops = {
993 .set_block = hci_rfkill_set_block,
994};
995
ab81cbf9
JH
996static void hci_power_on(struct work_struct *work)
997{
998 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 999 int err;
ab81cbf9
JH
1000
1001 BT_DBG("%s", hdev->name);
1002
2ff13894
JH
1003 if (test_bit(HCI_UP, &hdev->flags) &&
1004 hci_dev_test_flag(hdev, HCI_MGMT) &&
1005 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
d82142a8 1006 cancel_delayed_work(&hdev->power_off);
cf75ad8b 1007 err = hci_powered_update_sync(hdev);
2ff13894
JH
1008 mgmt_power_on(hdev, err);
1009 return;
1010 }
1011
cbed0ca1 1012 err = hci_dev_do_open(hdev);
96570ffc 1013 if (err < 0) {
3ad67582 1014 hci_dev_lock(hdev);
96570ffc 1015 mgmt_set_powered_failed(hdev, err);
3ad67582 1016 hci_dev_unlock(hdev);
ab81cbf9 1017 return;
96570ffc 1018 }
ab81cbf9 1019
a5c8f270
MH
1020 /* During the HCI setup phase, a few error conditions are
1021 * ignored and they need to be checked now. If they are still
1022 * valid, it is important to turn the device back off.
1023 */
d7a5a11d
MH
1024 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
1025 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
84a4bb65 1026 (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
a5c8f270 1027 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 1028 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 1029 hci_dev_do_close(hdev);
d7a5a11d 1030 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
1031 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1032 HCI_AUTO_OFF_TIMEOUT);
bf543036 1033 }
ab81cbf9 1034
a69d8927 1035 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
1036 /* For unconfigured devices, set the HCI_RAW flag
1037 * so that userspace can easily identify them.
4a964404 1038 */
d7a5a11d 1039 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 1040 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
1041
1042 /* For fully configured devices, this will send
1043 * the Index Added event. For unconfigured devices,
1044 * it will send Unconfigued Index Added event.
1045 *
1046 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1047 * and no event will be send.
1048 */
1049 mgmt_index_added(hdev);
a69d8927 1050 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
1051 /* When the controller is now configured, then it
1052 * is important to clear the HCI_RAW flag.
1053 */
d7a5a11d 1054 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
1055 clear_bit(HCI_RAW, &hdev->flags);
1056
d603b76b
MH
1057 /* Powering on the controller with HCI_CONFIG set only
1058 * happens with the transition from unconfigured to
1059 * configured. This will send the Index Added event.
1060 */
744cf19e 1061 mgmt_index_added(hdev);
fee746b0 1062 }
ab81cbf9
JH
1063}
1064
1065static void hci_power_off(struct work_struct *work)
1066{
3243553f 1067 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1068 power_off.work);
ab81cbf9
JH
1069
1070 BT_DBG("%s", hdev->name);
1071
8ee56540 1072 hci_dev_do_close(hdev);
ab81cbf9
JH
1073}
1074
c7741d16
MH
1075static void hci_error_reset(struct work_struct *work)
1076{
1077 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1078
2449007d 1079 hci_dev_hold(hdev);
c7741d16
MH
1080 BT_DBG("%s", hdev->name);
1081
1082 if (hdev->hw_error)
1083 hdev->hw_error(hdev, hdev->hw_error_code);
1084 else
2064ee33 1085 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
c7741d16 1086
2449007d
YH
1087 if (!hci_dev_do_close(hdev))
1088 hci_dev_do_open(hdev);
c7741d16 1089
2449007d 1090 hci_dev_put(hdev);
c7741d16
MH
1091}
1092
35f7498a 1093void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 1094{
4821002c 1095 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1096
4821002c
JH
1097 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1098 list_del(&uuid->list);
2aeb9a1a
JH
1099 kfree(uuid);
1100 }
2aeb9a1a
JH
1101}
1102
35f7498a 1103void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 1104{
3673952c 1105 struct link_key *key, *tmp;
55ed8ca1 1106
3673952c 1107 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
0378b597
JH
1108 list_del_rcu(&key->list);
1109 kfree_rcu(key, rcu);
55ed8ca1 1110 }
55ed8ca1
JH
1111}
1112
35f7498a 1113void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 1114{
3673952c 1115 struct smp_ltk *k, *tmp;
b899efaf 1116
3673952c 1117 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
970d0f1b
JH
1118 list_del_rcu(&k->list);
1119 kfree_rcu(k, rcu);
b899efaf 1120 }
b899efaf
VCG
1121}
1122
970c4e46
JH
1123void hci_smp_irks_clear(struct hci_dev *hdev)
1124{
3673952c 1125 struct smp_irk *k, *tmp;
970c4e46 1126
3673952c 1127 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
adae20cb
JH
1128 list_del_rcu(&k->list);
1129 kfree_rcu(k, rcu);
970c4e46
JH
1130 }
1131}
1132
600a8749
AM
1133void hci_blocked_keys_clear(struct hci_dev *hdev)
1134{
3673952c 1135 struct blocked_key *b, *tmp;
600a8749 1136
3673952c 1137 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
600a8749
AM
1138 list_del_rcu(&b->list);
1139 kfree_rcu(b, rcu);
1140 }
1141}
1142
1143bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1144{
1145 bool blocked = false;
1146 struct blocked_key *b;
1147
1148 rcu_read_lock();
0c2ac7d4 1149 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
600a8749
AM
1150 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1151 blocked = true;
1152 break;
1153 }
1154 }
1155
1156 rcu_read_unlock();
1157 return blocked;
1158}
1159
55ed8ca1
JH
1160struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1161{
8035ded4 1162 struct link_key *k;
55ed8ca1 1163
0378b597
JH
1164 rcu_read_lock();
1165 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1166 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1167 rcu_read_unlock();
600a8749
AM
1168
1169 if (hci_is_blocked_key(hdev,
1170 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1171 k->val)) {
1172 bt_dev_warn_ratelimited(hdev,
1173 "Link key blocked for %pMR",
1174 &k->bdaddr);
1175 return NULL;
1176 }
1177
55ed8ca1 1178 return k;
0378b597
JH
1179 }
1180 }
1181 rcu_read_unlock();
55ed8ca1
JH
1182
1183 return NULL;
1184}
1185
745c0ce3 1186static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1187 u8 key_type, u8 old_key_type)
d25e28ab
JH
1188{
1189 /* Legacy key */
1190 if (key_type < 0x03)
745c0ce3 1191 return true;
d25e28ab
JH
1192
1193 /* Debug keys are insecure so don't store them persistently */
1194 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1195 return false;
d25e28ab
JH
1196
1197 /* Changed combination key and there's no previous one */
1198 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1199 return false;
d25e28ab
JH
1200
1201 /* Security mode 3 case */
1202 if (!conn)
745c0ce3 1203 return true;
d25e28ab 1204
e3befab9
JH
1205 /* BR/EDR key derived using SC from an LE link */
1206 if (conn->type == LE_LINK)
1207 return true;
1208
d25e28ab
JH
1209 /* Neither local nor remote side had no-bonding as requirement */
1210 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1211 return true;
d25e28ab
JH
1212
1213 /* Local side had dedicated bonding as requirement */
1214 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1215 return true;
d25e28ab
JH
1216
1217 /* Remote side had dedicated bonding as requirement */
1218 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1219 return true;
d25e28ab
JH
1220
1221 /* If none of the above criteria match, then don't store the key
1222 * persistently */
745c0ce3 1223 return false;
d25e28ab
JH
1224}
1225
e804d25d 1226static u8 ltk_role(u8 type)
98a0b845 1227{
e804d25d
JH
1228 if (type == SMP_LTK)
1229 return HCI_ROLE_MASTER;
98a0b845 1230
e804d25d 1231 return HCI_ROLE_SLAVE;
98a0b845
JH
1232}
1233
f3a73d97
JH
1234struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1235 u8 addr_type, u8 role)
75d262c2 1236{
c9839a11 1237 struct smp_ltk *k;
75d262c2 1238
970d0f1b
JH
1239 rcu_read_lock();
1240 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
1241 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1242 continue;
1243
923e2414 1244 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 1245 rcu_read_unlock();
600a8749
AM
1246
1247 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1248 k->val)) {
1249 bt_dev_warn_ratelimited(hdev,
1250 "LTK blocked for %pMR",
1251 &k->bdaddr);
1252 return NULL;
1253 }
1254
75d262c2 1255 return k;
970d0f1b
JH
1256 }
1257 }
1258 rcu_read_unlock();
75d262c2
VCG
1259
1260 return NULL;
1261}
75d262c2 1262
970c4e46
JH
1263struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1264{
600a8749 1265 struct smp_irk *irk_to_return = NULL;
970c4e46
JH
1266 struct smp_irk *irk;
1267
adae20cb
JH
1268 rcu_read_lock();
1269 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1270 if (!bacmp(&irk->rpa, rpa)) {
600a8749
AM
1271 irk_to_return = irk;
1272 goto done;
adae20cb 1273 }
970c4e46
JH
1274 }
1275
adae20cb 1276 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 1277 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 1278 bacpy(&irk->rpa, rpa);
600a8749
AM
1279 irk_to_return = irk;
1280 goto done;
970c4e46
JH
1281 }
1282 }
600a8749
AM
1283
1284done:
1285 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1286 irk_to_return->val)) {
1287 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1288 &irk_to_return->bdaddr);
1289 irk_to_return = NULL;
1290 }
1291
adae20cb 1292 rcu_read_unlock();
970c4e46 1293
600a8749 1294 return irk_to_return;
970c4e46
JH
1295}
1296
1297struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1298 u8 addr_type)
1299{
600a8749 1300 struct smp_irk *irk_to_return = NULL;
970c4e46
JH
1301 struct smp_irk *irk;
1302
6cfc9988
JH
1303 /* Identity Address must be public or static random */
1304 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1305 return NULL;
1306
adae20cb
JH
1307 rcu_read_lock();
1308 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 1309 if (addr_type == irk->addr_type &&
adae20cb 1310 bacmp(bdaddr, &irk->bdaddr) == 0) {
600a8749
AM
1311 irk_to_return = irk;
1312 goto done;
adae20cb 1313 }
970c4e46 1314 }
600a8749
AM
1315
1316done:
1317
1318 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1319 irk_to_return->val)) {
1320 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1321 &irk_to_return->bdaddr);
1322 irk_to_return = NULL;
1323 }
1324
adae20cb 1325 rcu_read_unlock();
970c4e46 1326
600a8749 1327 return irk_to_return;
970c4e46
JH
1328}
1329
567fa2aa 1330struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
1331 bdaddr_t *bdaddr, u8 *val, u8 type,
1332 u8 pin_len, bool *persistent)
55ed8ca1
JH
1333{
1334 struct link_key *key, *old_key;
745c0ce3 1335 u8 old_key_type;
55ed8ca1
JH
1336
1337 old_key = hci_find_link_key(hdev, bdaddr);
1338 if (old_key) {
1339 old_key_type = old_key->type;
1340 key = old_key;
1341 } else {
12adcf3a 1342 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 1343 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 1344 if (!key)
567fa2aa 1345 return NULL;
0378b597 1346 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
1347 }
1348
6ed93dc6 1349 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1350
d25e28ab
JH
1351 /* Some buggy controller combinations generate a changed
1352 * combination key for legacy pairing even when there's no
1353 * previous key */
1354 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1355 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1356 type = HCI_LK_COMBINATION;
655fe6ec
JH
1357 if (conn)
1358 conn->key_type = type;
1359 }
d25e28ab 1360
55ed8ca1 1361 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1362 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1363 key->pin_len = pin_len;
1364
b6020ba0 1365 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1366 key->type = old_key_type;
4748fed2
JH
1367 else
1368 key->type = type;
1369
7652ff6a
JH
1370 if (persistent)
1371 *persistent = hci_persistent_key(hdev, conn, type,
1372 old_key_type);
4df378a1 1373
567fa2aa 1374 return key;
55ed8ca1
JH
1375}
1376
ca9142b8 1377struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 1378 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 1379 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 1380{
c9839a11 1381 struct smp_ltk *key, *old_key;
e804d25d 1382 u8 role = ltk_role(type);
75d262c2 1383
f3a73d97 1384 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 1385 if (old_key)
75d262c2 1386 key = old_key;
c9839a11 1387 else {
0a14ab41 1388 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 1389 if (!key)
ca9142b8 1390 return NULL;
970d0f1b 1391 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1392 }
1393
75d262c2 1394 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1395 key->bdaddr_type = addr_type;
1396 memcpy(key->val, tk, sizeof(key->val));
1397 key->authenticated = authenticated;
1398 key->ediv = ediv;
fe39c7b2 1399 key->rand = rand;
c9839a11
VCG
1400 key->enc_size = enc_size;
1401 key->type = type;
75d262c2 1402
ca9142b8 1403 return key;
75d262c2
VCG
1404}
1405
ca9142b8
JH
1406struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1407 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
1408{
1409 struct smp_irk *irk;
1410
1411 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1412 if (!irk) {
1413 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1414 if (!irk)
ca9142b8 1415 return NULL;
970c4e46
JH
1416
1417 bacpy(&irk->bdaddr, bdaddr);
1418 irk->addr_type = addr_type;
1419
adae20cb 1420 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
1421 }
1422
1423 memcpy(irk->val, val, 16);
1424 bacpy(&irk->rpa, rpa);
1425
ca9142b8 1426 return irk;
970c4e46
JH
1427}
1428
55ed8ca1
JH
1429int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1430{
1431 struct link_key *key;
1432
1433 key = hci_find_link_key(hdev, bdaddr);
1434 if (!key)
1435 return -ENOENT;
1436
6ed93dc6 1437 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 1438
0378b597
JH
1439 list_del_rcu(&key->list);
1440 kfree_rcu(key, rcu);
55ed8ca1
JH
1441
1442 return 0;
1443}
1444
e0b2b27e 1445int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 1446{
c5d2b6fa 1447 struct smp_ltk *k, *tmp;
c51ffa0b 1448 int removed = 0;
b899efaf 1449
c5d2b6fa 1450 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 1451 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
1452 continue;
1453
6ed93dc6 1454 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 1455
970d0f1b
JH
1456 list_del_rcu(&k->list);
1457 kfree_rcu(k, rcu);
c51ffa0b 1458 removed++;
b899efaf
VCG
1459 }
1460
c51ffa0b 1461 return removed ? 0 : -ENOENT;
b899efaf
VCG
1462}
1463
a7ec7338
JH
1464void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1465{
c5d2b6fa 1466 struct smp_irk *k, *tmp;
a7ec7338 1467
c5d2b6fa 1468 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
1469 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1470 continue;
1471
1472 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1473
adae20cb
JH
1474 list_del_rcu(&k->list);
1475 kfree_rcu(k, rcu);
a7ec7338
JH
1476 }
1477}
1478
55e76b38
JH
1479bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1480{
1481 struct smp_ltk *k;
4ba9faf3 1482 struct smp_irk *irk;
55e76b38
JH
1483 u8 addr_type;
1484
1485 if (type == BDADDR_BREDR) {
1486 if (hci_find_link_key(hdev, bdaddr))
1487 return true;
1488 return false;
1489 }
1490
1491 /* Convert to HCI addr type which struct smp_ltk uses */
1492 if (type == BDADDR_LE_PUBLIC)
1493 addr_type = ADDR_LE_DEV_PUBLIC;
1494 else
1495 addr_type = ADDR_LE_DEV_RANDOM;
1496
4ba9faf3
JH
1497 irk = hci_get_irk(hdev, bdaddr, addr_type);
1498 if (irk) {
1499 bdaddr = &irk->bdaddr;
1500 addr_type = irk->addr_type;
1501 }
1502
55e76b38
JH
1503 rcu_read_lock();
1504 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
1505 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1506 rcu_read_unlock();
55e76b38 1507 return true;
87c8b28d 1508 }
55e76b38
JH
1509 }
1510 rcu_read_unlock();
1511
1512 return false;
1513}
1514
6bd32326 1515/* HCI command timer function */
65cc2b49 1516static void hci_cmd_timeout(struct work_struct *work)
6bd32326 1517{
65cc2b49
MH
1518 struct hci_dev *hdev = container_of(work, struct hci_dev,
1519 cmd_timer.work);
6bd32326 1520
2615fd9a
LAD
1521 if (hdev->req_skb) {
1522 u16 opcode = hci_skb_opcode(hdev->req_skb);
bda4f23a 1523
2064ee33 1524 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
63298d6e
LAD
1525
1526 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
bda4f23a 1527 } else {
2064ee33 1528 bt_dev_err(hdev, "command tx timeout");
bda4f23a
AE
1529 }
1530
e2bef384
RJ
1531 if (hdev->cmd_timeout)
1532 hdev->cmd_timeout(hdev);
1533
6bd32326 1534 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1535 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1536}
1537
de75cd0d
MM
1538/* HCI ncmd timer function */
1539static void hci_ncmd_timeout(struct work_struct *work)
1540{
1541 struct hci_dev *hdev = container_of(work, struct hci_dev,
1542 ncmd_timer.work);
1543
1544 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1545
1546 /* During HCI_INIT phase no events can be injected if the ncmd timer
1547 * triggers since the procedure has its own timeout handling.
1548 */
1549 if (test_bit(HCI_INIT, &hdev->flags))
1550 return;
1551
1552 /* This is an irrecoverable state, inject hardware error event */
1553 hci_reset_dev(hdev);
1554}
1555
2763eda6 1556struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 1557 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
1558{
1559 struct oob_data *data;
1560
6928a924
JH
1561 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1562 if (bacmp(bdaddr, &data->bdaddr) != 0)
1563 continue;
1564 if (data->bdaddr_type != bdaddr_type)
1565 continue;
1566 return data;
1567 }
2763eda6
SJ
1568
1569 return NULL;
1570}
1571
6928a924
JH
1572int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1573 u8 bdaddr_type)
2763eda6
SJ
1574{
1575 struct oob_data *data;
1576
6928a924 1577 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
1578 if (!data)
1579 return -ENOENT;
1580
6928a924 1581 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
1582
1583 list_del(&data->list);
1584 kfree(data);
1585
1586 return 0;
1587}
1588
35f7498a 1589void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
1590{
1591 struct oob_data *data, *n;
1592
1593 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1594 list_del(&data->list);
1595 kfree(data);
1596 }
2763eda6
SJ
1597}
1598
0798872e 1599int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 1600 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 1601 u8 *hash256, u8 *rand256)
2763eda6
SJ
1602{
1603 struct oob_data *data;
1604
6928a924 1605 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 1606 if (!data) {
0a14ab41 1607 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
1608 if (!data)
1609 return -ENOMEM;
1610
1611 bacpy(&data->bdaddr, bdaddr);
6928a924 1612 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
1613 list_add(&data->list, &hdev->remote_oob_data);
1614 }
1615
81328d5c
JH
1616 if (hash192 && rand192) {
1617 memcpy(data->hash192, hash192, sizeof(data->hash192));
1618 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
1619 if (hash256 && rand256)
1620 data->present = 0x03;
81328d5c
JH
1621 } else {
1622 memset(data->hash192, 0, sizeof(data->hash192));
1623 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
1624 if (hash256 && rand256)
1625 data->present = 0x02;
1626 else
1627 data->present = 0x00;
0798872e
MH
1628 }
1629
81328d5c
JH
1630 if (hash256 && rand256) {
1631 memcpy(data->hash256, hash256, sizeof(data->hash256));
1632 memcpy(data->rand256, rand256, sizeof(data->rand256));
1633 } else {
1634 memset(data->hash256, 0, sizeof(data->hash256));
1635 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
1636 if (hash192 && rand192)
1637 data->present = 0x01;
81328d5c 1638 }
0798872e 1639
6ed93dc6 1640 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1641
1642 return 0;
1643}
1644
d2609b34
FG
1645/* This function requires the caller holds hdev->lock */
1646struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1647{
1648 struct adv_info *adv_instance;
1649
1650 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1651 if (adv_instance->instance == instance)
1652 return adv_instance;
1653 }
1654
1655 return NULL;
1656}
1657
1658/* This function requires the caller holds hdev->lock */
74b93e9f
PK
1659struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1660{
d2609b34
FG
1661 struct adv_info *cur_instance;
1662
1663 cur_instance = hci_find_adv_instance(hdev, instance);
1664 if (!cur_instance)
1665 return NULL;
1666
1667 if (cur_instance == list_last_entry(&hdev->adv_instances,
1668 struct adv_info, list))
1669 return list_first_entry(&hdev->adv_instances,
1670 struct adv_info, list);
1671 else
1672 return list_next_entry(cur_instance, list);
1673}
1674
1675/* This function requires the caller holds hdev->lock */
1676int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1677{
1678 struct adv_info *adv_instance;
1679
1680 adv_instance = hci_find_adv_instance(hdev, instance);
1681 if (!adv_instance)
1682 return -ENOENT;
1683
1684 BT_DBG("%s removing %dMR", hdev->name, instance);
1685
cab054ab
JH
1686 if (hdev->cur_adv_instance == instance) {
1687 if (hdev->adv_instance_timeout) {
1688 cancel_delayed_work(&hdev->adv_instance_expire);
1689 hdev->adv_instance_timeout = 0;
1690 }
1691 hdev->cur_adv_instance = 0x00;
5d900e46
FG
1692 }
1693
a73c046a
JK
1694 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1695
d2609b34
FG
1696 list_del(&adv_instance->list);
1697 kfree(adv_instance);
1698
1699 hdev->adv_instance_cnt--;
1700
1701 return 0;
1702}
1703
a73c046a
JK
1704void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1705{
1706 struct adv_info *adv_instance, *n;
1707
1708 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1709 adv_instance->rpa_expired = rpa_expired;
1710}
1711
d2609b34
FG
1712/* This function requires the caller holds hdev->lock */
1713void hci_adv_instances_clear(struct hci_dev *hdev)
1714{
1715 struct adv_info *adv_instance, *n;
1716
5d900e46
FG
1717 if (hdev->adv_instance_timeout) {
1718 cancel_delayed_work(&hdev->adv_instance_expire);
1719 hdev->adv_instance_timeout = 0;
1720 }
1721
d2609b34 1722 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
a73c046a 1723 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
d2609b34
FG
1724 list_del(&adv_instance->list);
1725 kfree(adv_instance);
1726 }
1727
1728 hdev->adv_instance_cnt = 0;
cab054ab 1729 hdev->cur_adv_instance = 0x00;
d2609b34
FG
1730}
1731
a73c046a
JK
1732static void adv_instance_rpa_expired(struct work_struct *work)
1733{
1734 struct adv_info *adv_instance = container_of(work, struct adv_info,
1735 rpa_expired_cb.work);
1736
1737 BT_DBG("");
1738
1739 adv_instance->rpa_expired = true;
1740}
1741
d2609b34 1742/* This function requires the caller holds hdev->lock */
eca0ae4a
LAD
1743struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1744 u32 flags, u16 adv_data_len, u8 *adv_data,
1745 u16 scan_rsp_len, u8 *scan_rsp_data,
1746 u16 timeout, u16 duration, s8 tx_power,
b338d917
BG
1747 u32 min_interval, u32 max_interval,
1748 u8 mesh_handle)
d2609b34 1749{
eca0ae4a 1750 struct adv_info *adv;
d2609b34 1751
eca0ae4a
LAD
1752 adv = hci_find_adv_instance(hdev, instance);
1753 if (adv) {
1754 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1755 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1756 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
d2609b34 1757 } else {
1d0fac2c 1758 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
b338d917 1759 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
eca0ae4a 1760 return ERR_PTR(-EOVERFLOW);
d2609b34 1761
eca0ae4a
LAD
1762 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1763 if (!adv)
1764 return ERR_PTR(-ENOMEM);
d2609b34 1765
eca0ae4a
LAD
1766 adv->pending = true;
1767 adv->instance = instance;
e77f43d5
LAD
1768
1769 /* If controller support only one set and the instance is set to
1770 * 1 then there is no option other than using handle 0x00.
1771 */
1772 if (hdev->le_num_of_adv_sets == 1 && instance == 1)
1773 adv->handle = 0x00;
1774 else
1775 adv->handle = instance;
1776
eca0ae4a 1777 list_add(&adv->list, &hdev->adv_instances);
d2609b34
FG
1778 hdev->adv_instance_cnt++;
1779 }
1780
eca0ae4a
LAD
1781 adv->flags = flags;
1782 adv->min_interval = min_interval;
1783 adv->max_interval = max_interval;
1784 adv->tx_power = tx_power;
b338d917
BG
1785 /* Defining a mesh_handle changes the timing units to ms,
1786 * rather than seconds, and ties the instance to the requested
1787 * mesh_tx queue.
1788 */
1789 adv->mesh = mesh_handle;
d2609b34 1790
34a718bc
LAD
1791 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1792 scan_rsp_len, scan_rsp_data);
d2609b34 1793
eca0ae4a
LAD
1794 adv->timeout = timeout;
1795 adv->remaining_time = timeout;
d2609b34
FG
1796
1797 if (duration == 0)
eca0ae4a 1798 adv->duration = hdev->def_multi_adv_rotation_duration;
d2609b34 1799 else
eca0ae4a 1800 adv->duration = duration;
d2609b34 1801
eca0ae4a 1802 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
a73c046a 1803
d2609b34
FG
1804 BT_DBG("%s for %dMR", hdev->name, instance);
1805
eca0ae4a
LAD
1806 return adv;
1807}
1808
1809/* This function requires the caller holds hdev->lock */
1810struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1811 u32 flags, u8 data_len, u8 *data,
1812 u32 min_interval, u32 max_interval)
1813{
1814 struct adv_info *adv;
1815
1816 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1817 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
b338d917 1818 min_interval, max_interval, 0);
eca0ae4a
LAD
1819 if (IS_ERR(adv))
1820 return adv;
1821
1822 adv->periodic = true;
1823 adv->per_adv_data_len = data_len;
1824
1825 if (data)
1826 memcpy(adv->per_adv_data, data, data_len);
1827
1828 return adv;
d2609b34
FG
1829}
1830
31aab5c2
DW
1831/* This function requires the caller holds hdev->lock */
1832int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1833 u16 adv_data_len, u8 *adv_data,
1834 u16 scan_rsp_len, u8 *scan_rsp_data)
1835{
34a718bc 1836 struct adv_info *adv;
31aab5c2 1837
34a718bc 1838 adv = hci_find_adv_instance(hdev, instance);
31aab5c2
DW
1839
1840 /* If advertisement doesn't exist, we can't modify its data */
34a718bc 1841 if (!adv)
31aab5c2
DW
1842 return -ENOENT;
1843
34a718bc
LAD
1844 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1845 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1846 memcpy(adv->adv_data, adv_data, adv_data_len);
1847 adv->adv_data_len = adv_data_len;
1848 adv->adv_data_changed = true;
31aab5c2
DW
1849 }
1850
34a718bc
LAD
1851 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1852 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1853 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1854 adv->scan_rsp_len = scan_rsp_len;
1855 adv->scan_rsp_changed = true;
31aab5c2
DW
1856 }
1857
34a718bc
LAD
1858 /* Mark as changed if there are flags which would affect it */
1859 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1860 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1861 adv->scan_rsp_changed = true;
1862
31aab5c2
DW
1863 return 0;
1864}
1865
01ce70b0
LAD
1866/* This function requires the caller holds hdev->lock */
1867u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1868{
1869 u32 flags;
1870 struct adv_info *adv;
1871
1872 if (instance == 0x00) {
1873 /* Instance 0 always manages the "Tx Power" and "Flags"
1874 * fields
1875 */
1876 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1877
1878 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1879 * corresponds to the "connectable" instance flag.
1880 */
1881 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1882 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1883
1884 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1885 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1886 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1887 flags |= MGMT_ADV_FLAG_DISCOV;
1888
1889 return flags;
1890 }
1891
1892 adv = hci_find_adv_instance(hdev, instance);
1893
1894 /* Return 0 when we got an invalid instance identifier. */
1895 if (!adv)
1896 return 0;
1897
1898 return adv->flags;
1899}
1900
1901bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1902{
1903 struct adv_info *adv;
1904
1905 /* Instance 0x00 always set local name */
1906 if (instance == 0x00)
1907 return true;
1908
1909 adv = hci_find_adv_instance(hdev, instance);
1910 if (!adv)
1911 return false;
1912
1913 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1914 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1915 return true;
1916
1917 return adv->scan_rsp_len ? true : false;
1918}
1919
e5e1e7fd
MC
1920/* This function requires the caller holds hdev->lock */
1921void hci_adv_monitors_clear(struct hci_dev *hdev)
1922{
b139553d
MC
1923 struct adv_monitor *monitor;
1924 int handle;
1925
1926 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
66bd095a 1927 hci_free_adv_monitor(hdev, monitor);
b139553d 1928
e5e1e7fd
MC
1929 idr_destroy(&hdev->adv_monitors_idr);
1930}
1931
66bd095a
AP
1932/* Frees the monitor structure and do some bookkeepings.
1933 * This function requires the caller holds hdev->lock.
1934 */
1935void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
b139553d
MC
1936{
1937 struct adv_pattern *pattern;
1938 struct adv_pattern *tmp;
1939
1940 if (!monitor)
1941 return;
1942
66bd095a
AP
1943 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1944 list_del(&pattern->list);
b139553d 1945 kfree(pattern);
66bd095a
AP
1946 }
1947
1948 if (monitor->handle)
1949 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1950
1951 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1952 hdev->adv_monitors_cnt--;
1953 mgmt_adv_monitor_removed(hdev, monitor->handle);
1954 }
b139553d
MC
1955
1956 kfree(monitor);
1957}
1958
a2a4dedf
AP
1959/* Assigns handle to a monitor, and if offloading is supported and power is on,
1960 * also attempts to forward the request to the controller.
b747a836 1961 * This function requires the caller holds hci_req_sync_lock.
a2a4dedf 1962 */
b747a836 1963int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
b139553d
MC
1964{
1965 int min, max, handle;
b747a836 1966 int status = 0;
b139553d 1967
b747a836
MM
1968 if (!monitor)
1969 return -EINVAL;
a2a4dedf 1970
b747a836 1971 hci_dev_lock(hdev);
b139553d
MC
1972
1973 min = HCI_MIN_ADV_MONITOR_HANDLE;
1974 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1975 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1976 GFP_KERNEL);
b747a836
MM
1977
1978 hci_dev_unlock(hdev);
1979
1980 if (handle < 0)
1981 return handle;
b139553d 1982
b139553d 1983 monitor->handle = handle;
8208f5a9 1984
a2a4dedf 1985 if (!hdev_is_powered(hdev))
b747a836 1986 return status;
8208f5a9 1987
a2a4dedf
AP
1988 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1989 case HCI_ADV_MONITOR_EXT_NONE:
6f55eea1 1990 bt_dev_dbg(hdev, "add monitor %d status %d",
b747a836 1991 monitor->handle, status);
a2a4dedf 1992 /* Message was not forwarded to controller - not an error */
b747a836
MM
1993 break;
1994
a2a4dedf 1995 case HCI_ADV_MONITOR_EXT_MSFT:
b747a836 1996 status = msft_add_monitor_pattern(hdev, monitor);
6f55eea1 1997 bt_dev_dbg(hdev, "add monitor %d msft status %d",
a2bcd2b6 1998 handle, status);
a2a4dedf
AP
1999 break;
2000 }
2001
b747a836 2002 return status;
b139553d
MC
2003}
2004
66bd095a
AP
2005/* Attempts to tell the controller and free the monitor. If somehow the
2006 * controller doesn't have a corresponding handle, remove anyway.
7cf5c297 2007 * This function requires the caller holds hci_req_sync_lock.
66bd095a 2008 */
7cf5c297
MM
2009static int hci_remove_adv_monitor(struct hci_dev *hdev,
2010 struct adv_monitor *monitor)
bd2fbc6c 2011{
7cf5c297 2012 int status = 0;
de6dfcef 2013 int handle;
bd2fbc6c 2014
66bd095a
AP
2015 switch (hci_get_adv_monitor_offload_ext(hdev)) {
2016 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
6f55eea1 2017 bt_dev_dbg(hdev, "remove monitor %d status %d",
7cf5c297 2018 monitor->handle, status);
66bd095a 2019 goto free_monitor;
7cf5c297 2020
66bd095a 2021 case HCI_ADV_MONITOR_EXT_MSFT:
de6dfcef 2022 handle = monitor->handle;
7cf5c297 2023 status = msft_remove_monitor(hdev, monitor);
6f55eea1
DA
2024 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
2025 handle, status);
66bd095a
AP
2026 break;
2027 }
bd2fbc6c 2028
66bd095a 2029 /* In case no matching handle registered, just free the monitor */
7cf5c297 2030 if (status == -ENOENT)
66bd095a
AP
2031 goto free_monitor;
2032
7cf5c297 2033 return status;
66bd095a
AP
2034
2035free_monitor:
7cf5c297 2036 if (status == -ENOENT)
66bd095a
AP
2037 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2038 monitor->handle);
2039 hci_free_adv_monitor(hdev, monitor);
2040
7cf5c297 2041 return status;
bd2fbc6c
MC
2042}
2043
7cf5c297
MM
2044/* This function requires the caller holds hci_req_sync_lock */
2045int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
66bd095a
AP
2046{
2047 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
66bd095a 2048
7cf5c297
MM
2049 if (!monitor)
2050 return -EINVAL;
66bd095a 2051
7cf5c297 2052 return hci_remove_adv_monitor(hdev, monitor);
66bd095a
AP
2053}
2054
7cf5c297
MM
2055/* This function requires the caller holds hci_req_sync_lock */
2056int hci_remove_all_adv_monitor(struct hci_dev *hdev)
bd2fbc6c
MC
2057{
2058 struct adv_monitor *monitor;
66bd095a 2059 int idr_next_id = 0;
7cf5c297 2060 int status = 0;
66bd095a 2061
7cf5c297 2062 while (1) {
66bd095a 2063 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
bd2fbc6c 2064 if (!monitor)
66bd095a 2065 break;
bd2fbc6c 2066
7cf5c297
MM
2067 status = hci_remove_adv_monitor(hdev, monitor);
2068 if (status)
2069 return status;
66bd095a 2070
7cf5c297 2071 idr_next_id++;
bd2fbc6c
MC
2072 }
2073
7cf5c297 2074 return status;
bd2fbc6c
MC
2075}
2076
8208f5a9
MC
2077/* This function requires the caller holds hdev->lock */
2078bool hci_is_adv_monitoring(struct hci_dev *hdev)
2079{
2080 return !idr_is_empty(&hdev->adv_monitors_idr);
2081}
2082
a2a4dedf
AP
2083int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2084{
2085 if (msft_monitor_supported(hdev))
2086 return HCI_ADV_MONITOR_EXT_MSFT;
2087
2088 return HCI_ADV_MONITOR_EXT_NONE;
2089}
2090
dcc36c16 2091struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2092 bdaddr_t *bdaddr, u8 type)
b2a66aad 2093{
8035ded4 2094 struct bdaddr_list *b;
b2a66aad 2095
dcc36c16 2096 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2097 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2098 return b;
b9ee0a78 2099 }
b2a66aad
AJ
2100
2101 return NULL;
2102}
2103
b950aa88
AN
2104struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2105 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2106 u8 type)
2107{
2108 struct bdaddr_list_with_irk *b;
2109
2110 list_for_each_entry(b, bdaddr_list, list) {
2111 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2112 return b;
2113 }
2114
2115 return NULL;
2116}
2117
8baaa403
APS
2118struct bdaddr_list_with_flags *
2119hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2120 bdaddr_t *bdaddr, u8 type)
2121{
2122 struct bdaddr_list_with_flags *b;
2123
2124 list_for_each_entry(b, bdaddr_list, list) {
2125 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2126 return b;
2127 }
2128
2129 return NULL;
2130}
2131
dcc36c16 2132void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad 2133{
7eb7404f 2134 struct bdaddr_list *b, *n;
b2a66aad 2135
7eb7404f
GT
2136 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2137 list_del(&b->list);
b2a66aad
AJ
2138 kfree(b);
2139 }
b2a66aad
AJ
2140}
2141
dcc36c16 2142int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2143{
2144 struct bdaddr_list *entry;
b2a66aad 2145
b9ee0a78 2146 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2147 return -EBADF;
2148
dcc36c16 2149 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2150 return -EEXIST;
b2a66aad 2151
27f70f3e 2152 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2153 if (!entry)
2154 return -ENOMEM;
b2a66aad
AJ
2155
2156 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2157 entry->bdaddr_type = type;
b2a66aad 2158
dcc36c16 2159 list_add(&entry->list, list);
b2a66aad 2160
2a8357f2 2161 return 0;
b2a66aad
AJ
2162}
2163
b950aa88
AN
2164int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2165 u8 type, u8 *peer_irk, u8 *local_irk)
2166{
2167 struct bdaddr_list_with_irk *entry;
2168
2169 if (!bacmp(bdaddr, BDADDR_ANY))
2170 return -EBADF;
2171
2172 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2173 return -EEXIST;
2174
2175 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2176 if (!entry)
2177 return -ENOMEM;
2178
2179 bacpy(&entry->bdaddr, bdaddr);
2180 entry->bdaddr_type = type;
2181
2182 if (peer_irk)
2183 memcpy(entry->peer_irk, peer_irk, 16);
2184
2185 if (local_irk)
2186 memcpy(entry->local_irk, local_irk, 16);
2187
2188 list_add(&entry->list, list);
2189
2190 return 0;
2191}
2192
8baaa403
APS
2193int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2194 u8 type, u32 flags)
2195{
2196 struct bdaddr_list_with_flags *entry;
2197
2198 if (!bacmp(bdaddr, BDADDR_ANY))
2199 return -EBADF;
2200
2201 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2202 return -EEXIST;
2203
2204 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2205 if (!entry)
2206 return -ENOMEM;
2207
2208 bacpy(&entry->bdaddr, bdaddr);
2209 entry->bdaddr_type = type;
e1cff700 2210 entry->flags = flags;
8baaa403
APS
2211
2212 list_add(&entry->list, list);
2213
2214 return 0;
2215}
2216
dcc36c16 2217int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2218{
2219 struct bdaddr_list *entry;
b2a66aad 2220
35f7498a 2221 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2222 hci_bdaddr_list_clear(list);
35f7498a
JH
2223 return 0;
2224 }
b2a66aad 2225
dcc36c16 2226 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2227 if (!entry)
2228 return -ENOENT;
2229
2230 list_del(&entry->list);
2231 kfree(entry);
2232
2233 return 0;
2234}
2235
b950aa88
AN
2236int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2237 u8 type)
2238{
2239 struct bdaddr_list_with_irk *entry;
2240
2241 if (!bacmp(bdaddr, BDADDR_ANY)) {
2242 hci_bdaddr_list_clear(list);
2243 return 0;
2244 }
2245
2246 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2247 if (!entry)
2248 return -ENOENT;
2249
2250 list_del(&entry->list);
2251 kfree(entry);
2252
2253 return 0;
2254}
2255
8baaa403
APS
2256int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2257 u8 type)
2258{
2259 struct bdaddr_list_with_flags *entry;
2260
2261 if (!bacmp(bdaddr, BDADDR_ANY)) {
2262 hci_bdaddr_list_clear(list);
2263 return 0;
2264 }
2265
2266 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2267 if (!entry)
2268 return -ENOENT;
2269
2270 list_del(&entry->list);
2271 kfree(entry);
2272
2273 return 0;
2274}
2275
15819a70
AG
2276/* This function requires the caller holds hdev->lock */
2277struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2278 bdaddr_t *addr, u8 addr_type)
2279{
2280 struct hci_conn_params *params;
2281
2282 list_for_each_entry(params, &hdev->le_conn_params, list) {
2283 if (bacmp(&params->addr, addr) == 0 &&
2284 params->addr_type == addr_type) {
2285 return params;
2286 }
2287 }
2288
2289 return NULL;
2290}
2291
195ef75e 2292/* This function requires the caller holds hdev->lock or rcu_read_lock */
501f8827
JH
2293struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2294 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2295{
912b42ef 2296 struct hci_conn_params *param;
a9b0a04c 2297
195ef75e
PV
2298 rcu_read_lock();
2299
2300 list_for_each_entry_rcu(param, list, action) {
912b42ef 2301 if (bacmp(&param->addr, addr) == 0 &&
195ef75e
PV
2302 param->addr_type == addr_type) {
2303 rcu_read_unlock();
912b42ef 2304 return param;
195ef75e 2305 }
4b10966f
MH
2306 }
2307
195ef75e
PV
2308 rcu_read_unlock();
2309
4b10966f 2310 return NULL;
a9b0a04c
AG
2311}
2312
195ef75e
PV
2313/* This function requires the caller holds hdev->lock */
2314void hci_pend_le_list_del_init(struct hci_conn_params *param)
2315{
2316 if (list_empty(&param->action))
2317 return;
2318
2319 list_del_rcu(&param->action);
2320 synchronize_rcu();
2321 INIT_LIST_HEAD(&param->action);
2322}
2323
2324/* This function requires the caller holds hdev->lock */
2325void hci_pend_le_list_add(struct hci_conn_params *param,
2326 struct list_head *list)
2327{
2328 list_add_rcu(&param->action, list);
2329}
2330
15819a70 2331/* This function requires the caller holds hdev->lock */
51d167c0
MH
2332struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2333 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2334{
2335 struct hci_conn_params *params;
2336
2337 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2338 if (params)
51d167c0 2339 return params;
15819a70
AG
2340
2341 params = kzalloc(sizeof(*params), GFP_KERNEL);
2342 if (!params) {
2064ee33 2343 bt_dev_err(hdev, "out of memory");
51d167c0 2344 return NULL;
15819a70
AG
2345 }
2346
2347 bacpy(&params->addr, addr);
2348 params->addr_type = addr_type;
cef952ce
AG
2349
2350 list_add(&params->list, &hdev->le_conn_params);
93450c75 2351 INIT_LIST_HEAD(&params->action);
cef952ce 2352
bf5b3c8b
MH
2353 params->conn_min_interval = hdev->le_conn_min_interval;
2354 params->conn_max_interval = hdev->le_conn_max_interval;
2355 params->conn_latency = hdev->le_conn_latency;
2356 params->supervision_timeout = hdev->le_supv_timeout;
2357 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2358
2359 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2360
51d167c0 2361 return params;
bf5b3c8b
MH
2362}
2363
195ef75e 2364void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2365{
195ef75e
PV
2366 hci_pend_le_list_del_init(params);
2367
f8aaf9b6 2368 if (params->conn) {
f161dd41 2369 hci_conn_drop(params->conn);
f8aaf9b6
JH
2370 hci_conn_put(params->conn);
2371 }
f161dd41 2372
15819a70
AG
2373 list_del(&params->list);
2374 kfree(params);
f6c63249
JH
2375}
2376
2377/* This function requires the caller holds hdev->lock */
2378void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2379{
2380 struct hci_conn_params *params;
2381
2382 params = hci_conn_params_lookup(hdev, addr, addr_type);
2383 if (!params)
2384 return;
2385
2386 hci_conn_params_free(params);
15819a70 2387
5bee2fd6 2388 hci_update_passive_scan(hdev);
95305baa 2389
15819a70
AG
2390 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2391}
2392
2393/* This function requires the caller holds hdev->lock */
55af49a8 2394void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
2395{
2396 struct hci_conn_params *params, *tmp;
2397
2398 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
2399 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2400 continue;
f75113a2 2401
91641b79 2402 /* If trying to establish one time connection to disabled
f75113a2
JP
2403 * device, leave the params, but mark them as just once.
2404 */
2405 if (params->explicit_connect) {
2406 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2407 continue;
2408 }
2409
195ef75e 2410 hci_conn_params_free(params);
15819a70
AG
2411 }
2412
55af49a8 2413 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
2414}
2415
2416/* This function requires the caller holds hdev->lock */
030e7f81 2417static void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 2418{
15819a70 2419 struct hci_conn_params *params, *tmp;
77a77a30 2420
f6c63249
JH
2421 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2422 hci_conn_params_free(params);
77a77a30 2423
15819a70 2424 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
2425}
2426
a1f4c318
JH
2427/* Copy the Identity Address of the controller.
2428 *
2429 * If the controller has a public BD_ADDR, then by default use that one.
2430 * If this is a LE only controller without a public address, default to
2431 * the static random address.
2432 *
2433 * For debugging purposes it is possible to force controllers with a
2434 * public address to use the static random address instead.
50b5b952
MH
2435 *
2436 * In case BR/EDR has been disabled on a dual-mode controller and
2437 * userspace has configured a static address, then that address
2438 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
2439 */
2440void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2441 u8 *bdaddr_type)
2442{
b7cb93e5 2443 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 2444 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 2445 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 2446 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
2447 bacpy(bdaddr, &hdev->static_addr);
2448 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2449 } else {
2450 bacpy(bdaddr, &hdev->bdaddr);
2451 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2452 }
2453}
2454
2f20216c
APS
2455static void hci_clear_wake_reason(struct hci_dev *hdev)
2456{
2457 hci_dev_lock(hdev);
2458
2459 hdev->wake_reason = 0;
2460 bacpy(&hdev->wake_addr, BDADDR_ANY);
2461 hdev->wake_addr_type = 0;
2462
2463 hci_dev_unlock(hdev);
2464}
2465
9952d90e
APS
2466static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2467 void *data)
2468{
2469 struct hci_dev *hdev =
2470 container_of(nb, struct hci_dev, suspend_notifier);
2471 int ret = 0;
9952d90e 2472
4b8af331
APS
2473 /* Userspace has full control of this device. Do nothing. */
2474 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2475 return NOTIFY_DONE;
2476
573ebae1
YH
2477 /* To avoid a potential race with hci_unregister_dev. */
2478 hci_dev_hold(hdev);
2479
e1b77d68
LAD
2480 if (action == PM_SUSPEND_PREPARE)
2481 ret = hci_suspend_dev(hdev);
2482 else if (action == PM_POST_SUSPEND)
2483 ret = hci_resume_dev(hdev);
2f20216c 2484
a9ec8423
APS
2485 if (ret)
2486 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2487 action, ret);
2488
573ebae1 2489 hci_dev_put(hdev);
24b06572 2490 return NOTIFY_DONE;
9952d90e 2491}
8731840a 2492
9be0dab7 2493/* Alloc HCI device */
6ec56613 2494struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
9be0dab7
DH
2495{
2496 struct hci_dev *hdev;
6ec56613 2497 unsigned int alloc_size;
9be0dab7 2498
6ec56613
THJA
2499 alloc_size = sizeof(*hdev);
2500 if (sizeof_priv) {
2501 /* Fixme: May need ALIGN-ment? */
2502 alloc_size += sizeof_priv;
2503 }
9be0dab7 2504
6ec56613 2505 hdev = kzalloc(alloc_size, GFP_KERNEL);
9be0dab7
DH
2506 if (!hdev)
2507 return NULL;
2508
b1b813d4
DH
2509 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2510 hdev->esco_type = (ESCO_HV1);
2511 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
2512 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2513 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 2514 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
2515 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2516 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
2517 hdev->adv_instance_cnt = 0;
2518 hdev->cur_adv_instance = 0x00;
5d900e46 2519 hdev->adv_instance_timeout = 0;
b1b813d4 2520
c4f1f408
HC
2521 hdev->advmon_allowlist_duration = 300;
2522 hdev->advmon_no_filter_duration = 500;
80af16a3 2523 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
c4f1f408 2524
b1b813d4
DH
2525 hdev->sniff_max_interval = 800;
2526 hdev->sniff_min_interval = 80;
2527
3f959d46 2528 hdev->le_adv_channel_map = 0x07;
628531c9
GL
2529 hdev->le_adv_min_interval = 0x0800;
2530 hdev->le_adv_max_interval = 0x0800;
7c2cc5b1
LAD
2531 hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
2532 hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
2533 hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
2534 hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
10873f99
AM
2535 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2536 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
7c2cc5b1
LAD
2537 hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
2538 hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
2539 hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
2540 hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
b48c3b59
JH
2541 hdev->le_conn_min_interval = 0x0018;
2542 hdev->le_conn_max_interval = 0x0028;
04fb7d90
MH
2543 hdev->le_conn_latency = 0x0000;
2544 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
2545 hdev->le_def_tx_len = 0x001b;
2546 hdev->le_def_tx_time = 0x0148;
2547 hdev->le_max_tx_len = 0x001b;
2548 hdev->le_max_tx_time = 0x0148;
2549 hdev->le_max_rx_len = 0x001b;
2550 hdev->le_max_rx_time = 0x0148;
30d65e08
MK
2551 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2552 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
6decb5b4
JK
2553 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2554 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
1d0fac2c 2555 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
10873f99 2556 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
21d74b6b 2557 hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
7c395ea5
DW
2558 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2559 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
bef64738 2560
d6bfd59c 2561 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 2562 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
2563 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2564 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
302975cb 2565 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
58a96fc3 2566 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
d6bfd59c 2567
10873f99
AM
2568 /* default 1.28 sec page scan */
2569 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2570 hdev->def_page_scan_int = 0x0800;
2571 hdev->def_page_scan_window = 0x0012;
2572
b1b813d4
DH
2573 mutex_init(&hdev->lock);
2574 mutex_init(&hdev->req_lock);
2575
181a42ed
ZX
2576 ida_init(&hdev->unset_handle_ida);
2577
b338d917 2578 INIT_LIST_HEAD(&hdev->mesh_pending);
b1b813d4 2579 INIT_LIST_HEAD(&hdev->mgmt_pending);
3d4f9c00
AP
2580 INIT_LIST_HEAD(&hdev->reject_list);
2581 INIT_LIST_HEAD(&hdev->accept_list);
b1b813d4
DH
2582 INIT_LIST_HEAD(&hdev->uuids);
2583 INIT_LIST_HEAD(&hdev->link_keys);
2584 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 2585 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 2586 INIT_LIST_HEAD(&hdev->remote_oob_data);
3d4f9c00 2587 INIT_LIST_HEAD(&hdev->le_accept_list);
cfdb0c2d 2588 INIT_LIST_HEAD(&hdev->le_resolv_list);
15819a70 2589 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 2590 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 2591 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 2592 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 2593 INIT_LIST_HEAD(&hdev->adv_instances);
600a8749 2594 INIT_LIST_HEAD(&hdev->blocked_keys);
3368aa35 2595 INIT_LIST_HEAD(&hdev->monitored_devices);
b1b813d4 2596
8961987f 2597 INIT_LIST_HEAD(&hdev->local_codecs);
b1b813d4
DH
2598 INIT_WORK(&hdev->rx_work, hci_rx_work);
2599 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2600 INIT_WORK(&hdev->tx_work, hci_tx_work);
2601 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 2602 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 2603
6a98e383
MH
2604 hci_cmd_sync_init(hdev);
2605
b1b813d4 2606 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
b1b813d4 2607
b1b813d4
DH
2608 skb_queue_head_init(&hdev->rx_q);
2609 skb_queue_head_init(&hdev->cmd_q);
2610 skb_queue_head_init(&hdev->raw_q);
2611
2612 init_waitqueue_head(&hdev->req_wait_q);
2613
65cc2b49 2614 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
de75cd0d 2615 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
b1b813d4 2616
9695ef87 2617 hci_devcd_setup(hdev);
5fc16cc4
JH
2618 hci_request_setup(hdev);
2619
b1b813d4
DH
2620 hci_init_sysfs(hdev);
2621 discovery_init(hdev);
9be0dab7
DH
2622
2623 return hdev;
2624}
6ec56613 2625EXPORT_SYMBOL(hci_alloc_dev_priv);
9be0dab7
DH
2626
2627/* Free HCI device */
2628void hci_free_dev(struct hci_dev *hdev)
2629{
9be0dab7
DH
2630 /* will free via device release */
2631 put_device(&hdev->dev);
2632}
2633EXPORT_SYMBOL(hci_free_dev);
2634
1da177e4
LT
2635/* Register HCI device */
2636int hci_register_dev(struct hci_dev *hdev)
2637{
b1b813d4 2638 int id, error;
1da177e4 2639
74292d5a 2640 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
2641 return -EINVAL;
2642
84a4bb65 2643 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
3df92b31
SL
2644 if (id < 0)
2645 return id;
2646
dcda1657
LAD
2647 error = dev_set_name(&hdev->dev, "hci%u", id);
2648 if (error)
2649 return error;
2650
2651 hdev->name = dev_name(&hdev->dev);
1da177e4 2652 hdev->id = id;
2d8b3a11
AE
2653
2654 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2655
29e2dd0d 2656 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
33ca954d
DH
2657 if (!hdev->workqueue) {
2658 error = -ENOMEM;
2659 goto err;
2660 }
f48fd9c8 2661
29e2dd0d
TH
2662 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2663 hdev->name);
6ead1bbc
JH
2664 if (!hdev->req_workqueue) {
2665 destroy_workqueue(hdev->workqueue);
2666 error = -ENOMEM;
2667 goto err;
2668 }
2669
0153e2ec
MH
2670 if (!IS_ERR_OR_NULL(bt_debugfs))
2671 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2672
bdc3e0f1 2673 error = device_add(&hdev->dev);
33ca954d 2674 if (error < 0)
54506918 2675 goto err_wqueue;
1da177e4 2676
6d5d2ee6
HK
2677 hci_leds_init(hdev);
2678
611b30f7 2679 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2680 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2681 hdev);
611b30f7
MH
2682 if (hdev->rfkill) {
2683 if (rfkill_register(hdev->rfkill) < 0) {
2684 rfkill_destroy(hdev->rfkill);
2685 hdev->rfkill = NULL;
2686 }
2687 }
2688
5e130367 2689 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 2690 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 2691
a1536da2
MH
2692 hci_dev_set_flag(hdev, HCI_SETUP);
2693 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 2694
84a4bb65
LAD
2695 /* Assume BR/EDR support until proven otherwise (such as
2696 * through reading supported features during init.
2697 */
2698 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
ce2be9ac 2699
fcee3377
GP
2700 write_lock(&hci_dev_list_lock);
2701 list_add(&hdev->list, &hci_dev_list);
2702 write_unlock(&hci_dev_list_lock);
2703
4a964404
MH
2704 /* Devices that are marked for raw-only usage are unconfigured
2705 * and should not be included in normal operation.
fee746b0
MH
2706 */
2707 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 2708 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 2709
fe92ee64
LAD
2710 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2711 * callback.
2712 */
2713 if (hdev->wakeup)
e1cff700 2714 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
fe92ee64 2715
05fcd4c4 2716 hci_sock_dev_event(hdev, HCI_DEV_REG);
dc946bd8 2717 hci_dev_hold(hdev);
1da177e4 2718
91117864
DC
2719 error = hci_register_suspend_notifier(hdev);
2720 if (error)
0d75da38 2721 BT_WARN("register suspend notifier failed error:%d\n", error);
9952d90e 2722
19202573 2723 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2724
e5e1e7fd 2725 idr_init(&hdev->adv_monitors_idr);
5031ffcc 2726 msft_register(hdev);
e5e1e7fd 2727
1da177e4 2728 return id;
f48fd9c8 2729
33ca954d 2730err_wqueue:
5a4bb6a8 2731 debugfs_remove_recursive(hdev->debugfs);
33ca954d 2732 destroy_workqueue(hdev->workqueue);
6ead1bbc 2733 destroy_workqueue(hdev->req_workqueue);
33ca954d 2734err:
9c16d0c8 2735 ida_free(&hci_index_ida, hdev->id);
f48fd9c8 2736
33ca954d 2737 return error;
1da177e4
LT
2738}
2739EXPORT_SYMBOL(hci_register_dev);
2740
2741/* Unregister HCI device */
59735631 2742void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2743{
c13854ce 2744 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2745
1857c199 2746 mutex_lock(&hdev->unregister_lock);
a1536da2 2747 hci_dev_set_flag(hdev, HCI_UNREGISTER);
1857c199 2748 mutex_unlock(&hdev->unregister_lock);
94324962 2749
f20d09d5 2750 write_lock(&hci_dev_list_lock);
1da177e4 2751 list_del(&hdev->list);
f20d09d5 2752 write_unlock(&hci_dev_list_lock);
1da177e4 2753
e36bea6e
VV
2754 cancel_work_sync(&hdev->power_on);
2755
6a98e383
MH
2756 hci_cmd_sync_clear(hdev);
2757
359ee4f8 2758 hci_unregister_suspend_notifier(hdev);
4e8c36c3
APS
2759
2760 hci_dev_do_close(hdev);
9952d90e 2761
ab81cbf9 2762 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
2763 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2764 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 2765 hci_dev_lock(hdev);
744cf19e 2766 mgmt_index_removed(hdev);
09fd0de5 2767 hci_dev_unlock(hdev);
56e5cb86 2768 }
ab81cbf9 2769
2e58ef3e
JH
2770 /* mgmt_index_removed should take care of emptying the
2771 * pending list */
2772 BUG_ON(!list_empty(&hdev->mgmt_pending));
2773
05fcd4c4 2774 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
1da177e4 2775
611b30f7
MH
2776 if (hdev->rfkill) {
2777 rfkill_unregister(hdev->rfkill);
2778 rfkill_destroy(hdev->rfkill);
2779 }
2780
bdc3e0f1 2781 device_del(&hdev->dev);
e61fbee7 2782 /* Actual cleanup is deferred until hci_release_dev(). */
e0448092
TH
2783 hci_dev_put(hdev);
2784}
2785EXPORT_SYMBOL(hci_unregister_dev);
147e2d59 2786
58ce6d5b
TH
2787/* Release HCI device */
2788void hci_release_dev(struct hci_dev *hdev)
e0448092 2789{
0153e2ec 2790 debugfs_remove_recursive(hdev->debugfs);
5177a838
MH
2791 kfree_const(hdev->hw_info);
2792 kfree_const(hdev->fw_info);
0153e2ec 2793
f48fd9c8 2794 destroy_workqueue(hdev->workqueue);
6ead1bbc 2795 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2796
09fd0de5 2797 hci_dev_lock(hdev);
3d4f9c00
AP
2798 hci_bdaddr_list_clear(&hdev->reject_list);
2799 hci_bdaddr_list_clear(&hdev->accept_list);
2aeb9a1a 2800 hci_uuids_clear(hdev);
55ed8ca1 2801 hci_link_keys_clear(hdev);
b899efaf 2802 hci_smp_ltks_clear(hdev);
970c4e46 2803 hci_smp_irks_clear(hdev);
2763eda6 2804 hci_remote_oob_data_clear(hdev);
d2609b34 2805 hci_adv_instances_clear(hdev);
e5e1e7fd 2806 hci_adv_monitors_clear(hdev);
3d4f9c00 2807 hci_bdaddr_list_clear(&hdev->le_accept_list);
cfdb0c2d 2808 hci_bdaddr_list_clear(&hdev->le_resolv_list);
373110c5 2809 hci_conn_params_clear_all(hdev);
22078800 2810 hci_discovery_filter_clear(hdev);
600a8749 2811 hci_blocked_keys_clear(hdev);
b938790e 2812 hci_codec_list_clear(&hdev->local_codecs);
10f9f426 2813 msft_release(hdev);
09fd0de5 2814 hci_dev_unlock(hdev);
e2e0cacb 2815
181a42ed 2816 ida_destroy(&hdev->unset_handle_ida);
9c16d0c8 2817 ida_free(&hci_index_ida, hdev->id);
dd3b1dc3 2818 kfree_skb(hdev->sent_cmd);
2615fd9a 2819 kfree_skb(hdev->req_skb);
dfe6d5c3 2820 kfree_skb(hdev->recv_event);
58ce6d5b 2821 kfree(hdev);
1da177e4 2822}
58ce6d5b 2823EXPORT_SYMBOL(hci_release_dev);
1da177e4 2824
359ee4f8
APS
2825int hci_register_suspend_notifier(struct hci_dev *hdev)
2826{
2827 int ret = 0;
2828
b5ca3387
LAD
2829 if (!hdev->suspend_notifier.notifier_call &&
2830 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
359ee4f8
APS
2831 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2832 ret = register_pm_notifier(&hdev->suspend_notifier);
2833 }
2834
2835 return ret;
2836}
2837
2838int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2839{
2840 int ret = 0;
2841
b5ca3387 2842 if (hdev->suspend_notifier.notifier_call) {
359ee4f8 2843 ret = unregister_pm_notifier(&hdev->suspend_notifier);
b5ca3387
LAD
2844 if (!ret)
2845 hdev->suspend_notifier.notifier_call = NULL;
2846 }
359ee4f8
APS
2847
2848 return ret;
2849}
2850
63298d6e
LAD
2851/* Cancel ongoing command synchronously:
2852 *
2853 * - Cancel command timer
2854 * - Reset command counter
2855 * - Cancel command request
2856 */
2857static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2858{
2859 bt_dev_dbg(hdev, "err 0x%2.2x", err);
2860
2861 cancel_delayed_work_sync(&hdev->cmd_timer);
2862 cancel_delayed_work_sync(&hdev->ncmd_timer);
2863 atomic_set(&hdev->cmd_cnt, 1);
2864
6946b9c9 2865 hci_cmd_sync_cancel_sync(hdev, err);
63298d6e
LAD
2866}
2867
1da177e4
LT
2868/* Suspend HCI device */
2869int hci_suspend_dev(struct hci_dev *hdev)
2870{
e1b77d68 2871 int ret;
e1b77d68
LAD
2872
2873 bt_dev_dbg(hdev, "");
2874
2875 /* Suspend should only act on when powered. */
2876 if (!hdev_is_powered(hdev) ||
2877 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2878 return 0;
2879
182ee45d
LAD
2880 /* If powering down don't attempt to suspend */
2881 if (mgmt_powering_down(hdev))
2882 return 0;
4539ca67 2883
f4198635 2884 /* Cancel potentially blocking sync operation before suspend */
6946b9c9 2885 hci_cancel_cmd_sync(hdev, EHOSTDOWN);
f4198635 2886
182ee45d
LAD
2887 hci_req_sync_lock(hdev);
2888 ret = hci_suspend_sync(hdev);
2889 hci_req_sync_unlock(hdev);
e1b77d68
LAD
2890
2891 hci_clear_wake_reason(hdev);
182ee45d 2892 mgmt_suspending(hdev, hdev->suspend_state);
e1b77d68 2893
05fcd4c4 2894 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
e1b77d68 2895 return ret;
1da177e4
LT
2896}
2897EXPORT_SYMBOL(hci_suspend_dev);
2898
2899/* Resume HCI device */
2900int hci_resume_dev(struct hci_dev *hdev)
2901{
e1b77d68
LAD
2902 int ret;
2903
2904 bt_dev_dbg(hdev, "");
2905
2906 /* Resume should only act on when powered. */
2907 if (!hdev_is_powered(hdev) ||
2908 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2909 return 0;
2910
2911 /* If powering down don't attempt to resume */
2912 if (mgmt_powering_down(hdev))
2913 return 0;
2914
182ee45d
LAD
2915 hci_req_sync_lock(hdev);
2916 ret = hci_resume_sync(hdev);
2917 hci_req_sync_unlock(hdev);
e1b77d68
LAD
2918
2919 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
182ee45d 2920 hdev->wake_addr_type);
e1b77d68 2921
05fcd4c4 2922 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
e1b77d68 2923 return ret;
1da177e4
LT
2924}
2925EXPORT_SYMBOL(hci_resume_dev);
2926
75e0569f
MH
2927/* Reset HCI device */
2928int hci_reset_dev(struct hci_dev *hdev)
2929{
1e4b6e91 2930 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
75e0569f
MH
2931 struct sk_buff *skb;
2932
2933 skb = bt_skb_alloc(3, GFP_ATOMIC);
2934 if (!skb)
2935 return -ENOMEM;
2936
d79f34e3 2937 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
59ae1d12 2938 skb_put_data(skb, hw_err, 3);
75e0569f 2939
de75cd0d
MM
2940 bt_dev_err(hdev, "Injecting HCI hardware error event");
2941
75e0569f
MH
2942 /* Send Hardware Error to upper stack */
2943 return hci_recv_frame(hdev, skb);
2944}
2945EXPORT_SYMBOL(hci_reset_dev);
2946
76bca880 2947/* Receive frame from HCI drivers */
e1a26170 2948int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 2949{
76bca880 2950 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2951 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2952 kfree_skb(skb);
2953 return -ENXIO;
2954 }
2955
876e7810
LAD
2956 switch (hci_skb_pkt_type(skb)) {
2957 case HCI_EVENT_PKT:
2958 break;
2959 case HCI_ACLDATA_PKT:
2960 /* Detect if ISO packet has been sent as ACL */
2961 if (hci_conn_num(hdev, ISO_LINK)) {
2962 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2963 __u8 type;
2964
2965 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2966 if (type == ISO_LINK)
2967 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2968 }
2969 break;
2970 case HCI_SCODATA_PKT:
2971 break;
2972 case HCI_ISODATA_PKT:
2973 break;
2974 default:
fe806dce
MH
2975 kfree_skb(skb);
2976 return -EINVAL;
2977 }
2978
d82603c6 2979 /* Incoming skb */
76bca880
MH
2980 bt_cb(skb)->incoming = 1;
2981
2982 /* Time stamp */
2983 __net_timestamp(skb);
2984
76bca880 2985 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2986 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2987
76bca880
MH
2988 return 0;
2989}
2990EXPORT_SYMBOL(hci_recv_frame);
2991
e875ff84
MH
2992/* Receive diagnostic message from HCI drivers */
2993int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2994{
581d6fd6 2995 /* Mark as diagnostic packet */
d79f34e3 2996 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
581d6fd6 2997
e875ff84
MH
2998 /* Time stamp */
2999 __net_timestamp(skb);
3000
581d6fd6
MH
3001 skb_queue_tail(&hdev->rx_q, skb);
3002 queue_work(hdev->workqueue, &hdev->rx_work);
e875ff84 3003
e875ff84
MH
3004 return 0;
3005}
3006EXPORT_SYMBOL(hci_recv_diag);
3007
5177a838
MH
3008void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3009{
3010 va_list vargs;
3011
3012 va_start(vargs, fmt);
3013 kfree_const(hdev->hw_info);
3014 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3015 va_end(vargs);
3016}
3017EXPORT_SYMBOL(hci_set_hw_info);
3018
3019void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3020{
3021 va_list vargs;
3022
3023 va_start(vargs, fmt);
3024 kfree_const(hdev->fw_info);
3025 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3026 va_end(vargs);
3027}
3028EXPORT_SYMBOL(hci_set_fw_info);
3029
1da177e4
LT
3030/* ---- Interface to upper protocols ---- */
3031
1da177e4
LT
3032int hci_register_cb(struct hci_cb *cb)
3033{
3034 BT_DBG("%p name %s", cb, cb->name);
3035
fba7ecf0 3036 mutex_lock(&hci_cb_list_lock);
00629e0f 3037 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3038 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3039
3040 return 0;
3041}
3042EXPORT_SYMBOL(hci_register_cb);
3043
3044int hci_unregister_cb(struct hci_cb *cb)
3045{
3046 BT_DBG("%p name %s", cb, cb->name);
3047
fba7ecf0 3048 mutex_lock(&hci_cb_list_lock);
1da177e4 3049 list_del(&cb->list);
fba7ecf0 3050 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3051
3052 return 0;
3053}
3054EXPORT_SYMBOL(hci_unregister_cb);
3055
2250abad 3056static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3057{
cdc52faa
MH
3058 int err;
3059
d79f34e3
MH
3060 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3061 skb->len);
1da177e4 3062
cd82e61c
MH
3063 /* Time stamp */
3064 __net_timestamp(skb);
1da177e4 3065
cd82e61c
MH
3066 /* Send copy to monitor */
3067 hci_send_to_monitor(hdev, skb);
3068
3069 if (atomic_read(&hdev->promisc)) {
3070 /* Send copy to the sockets */
470fe1b5 3071 hci_send_to_sock(hdev, skb);
1da177e4
LT
3072 }
3073
3074 /* Get rid of skb owner, prior to sending to the driver. */
3075 skb_orphan(skb);
3076
73d0d3c8
MH
3077 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3078 kfree_skb(skb);
2250abad 3079 return -EINVAL;
73d0d3c8
MH
3080 }
3081
cdc52faa
MH
3082 err = hdev->send(hdev, skb);
3083 if (err < 0) {
2064ee33 3084 bt_dev_err(hdev, "sending frame failed (%d)", err);
cdc52faa 3085 kfree_skb(skb);
2250abad 3086 return err;
cdc52faa 3087 }
2250abad
BB
3088
3089 return 0;
1da177e4
LT
3090}
3091
1ca3a9d0 3092/* Send HCI command */
07dc93dd
JH
3093int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3094 const void *param)
1ca3a9d0
JH
3095{
3096 struct sk_buff *skb;
3097
3098 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3099
3100 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3101 if (!skb) {
2064ee33 3102 bt_dev_err(hdev, "no memory for command");
1ca3a9d0
JH
3103 return -ENOMEM;
3104 }
3105
49c922bb 3106 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3107 * single-command requests.
3108 */
44d27137 3109 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
11714b3d 3110
1da177e4 3111 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3112 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3113
3114 return 0;
3115}
1da177e4 3116
d6ee6ad7
LP
3117int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3118 const void *param)
3119{
3120 struct sk_buff *skb;
3121
3122 if (hci_opcode_ogf(opcode) != 0x3f) {
3123 /* A controller receiving a command shall respond with either
3124 * a Command Status Event or a Command Complete Event.
3125 * Therefore, all standard HCI commands must be sent via the
3126 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3127 * Some vendors do not comply with this rule for vendor-specific
3128 * commands and do not return any event. We want to support
3129 * unresponded commands for such cases only.
3130 */
3131 bt_dev_err(hdev, "unresponded command not supported");
3132 return -EINVAL;
3133 }
3134
3135 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3136 if (!skb) {
3137 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3138 opcode);
3139 return -ENOMEM;
3140 }
3141
3142 hci_send_frame(hdev, skb);
3143
3144 return 0;
3145}
3146EXPORT_SYMBOL(__hci_cmd_send);
3147
1da177e4 3148/* Get data from the previously sent command */
2615fd9a 3149static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
1da177e4
LT
3150{
3151 struct hci_command_hdr *hdr;
3152
2615fd9a 3153 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
1da177e4
LT
3154 return NULL;
3155
2615fd9a 3156 hdr = (void *)skb->data;
1da177e4 3157
a9de9248 3158 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3159 return NULL;
3160
2615fd9a
LAD
3161 return skb->data + HCI_COMMAND_HDR_SIZE;
3162}
1da177e4 3163
2615fd9a
LAD
3164/* Get data from the previously sent command */
3165void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3166{
3167 void *data;
3168
3169 /* Check if opcode matches last sent command */
3170 data = hci_cmd_data(hdev->sent_cmd, opcode);
3171 if (!data)
3172 /* Check if opcode matches last request */
3173 data = hci_cmd_data(hdev->req_skb, opcode);
3174
3175 return data;
1da177e4
LT
3176}
3177
dfe6d5c3
LAD
3178/* Get data from last received event */
3179void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3180{
3181 struct hci_event_hdr *hdr;
3182 int offset;
3183
3184 if (!hdev->recv_event)
3185 return NULL;
3186
3187 hdr = (void *)hdev->recv_event->data;
3188 offset = sizeof(*hdr);
3189
3190 if (hdr->evt != event) {
3191 /* In case of LE metaevent check the subevent match */
3192 if (hdr->evt == HCI_EV_LE_META) {
3193 struct hci_ev_le_meta *ev;
3194
3195 ev = (void *)hdev->recv_event->data + offset;
3196 offset += sizeof(*ev);
3197 if (ev->subevent == event)
3198 goto found;
3199 }
3200 return NULL;
3201 }
3202
3203found:
3204 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3205
3206 return hdev->recv_event->data + offset;
3207}
3208
1da177e4
LT
3209/* Send ACL data */
3210static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3211{
3212 struct hci_acl_hdr *hdr;
3213 int len = skb->len;
3214
badff6d0
ACM
3215 skb_push(skb, HCI_ACL_HDR_SIZE);
3216 skb_reset_transport_header(skb);
9c70220b 3217 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3218 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3219 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3220}
3221
ee22be7e 3222static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3223 struct sk_buff *skb, __u16 flags)
1da177e4 3224{
ee22be7e 3225 struct hci_conn *conn = chan->conn;
1da177e4
LT
3226 struct hci_dev *hdev = conn->hdev;
3227 struct sk_buff *list;
3228
087bfd99
GP
3229 skb->len = skb_headlen(skb);
3230 skb->data_len = 0;
3231
d79f34e3 3232 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
204a6e54 3233
84a4bb65 3234 hci_add_acl_hdr(skb, conn->handle, flags);
087bfd99 3235
70f23020
AE
3236 list = skb_shinfo(skb)->frag_list;
3237 if (!list) {
1da177e4
LT
3238 /* Non fragmented */
3239 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3240
73d80deb 3241 skb_queue_tail(queue, skb);
1da177e4
LT
3242 } else {
3243 /* Fragmented */
3244 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3245
3246 skb_shinfo(skb)->frag_list = NULL;
3247
9cfd5a23
JR
3248 /* Queue all fragments atomically. We need to use spin_lock_bh
3249 * here because of 6LoWPAN links, as there this function is
3250 * called from softirq and using normal spin lock could cause
3251 * deadlocks.
3252 */
3253 spin_lock_bh(&queue->lock);
1da177e4 3254
73d80deb 3255 __skb_queue_tail(queue, skb);
e702112f
AE
3256
3257 flags &= ~ACL_START;
3258 flags |= ACL_CONT;
1da177e4
LT
3259 do {
3260 skb = list; list = list->next;
8e87d142 3261
d79f34e3 3262 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
e702112f 3263 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3264
3265 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3266
73d80deb 3267 __skb_queue_tail(queue, skb);
1da177e4
LT
3268 } while (list);
3269
9cfd5a23 3270 spin_unlock_bh(&queue->lock);
1da177e4 3271 }
73d80deb
LAD
3272}
3273
3274void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3275{
ee22be7e 3276 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3277
f0e09510 3278 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3279
ee22be7e 3280 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3281
3eff45ea 3282 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3283}
1da177e4
LT
3284
3285/* Send SCO data */
0d861d8b 3286void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3287{
3288 struct hci_dev *hdev = conn->hdev;
3289 struct hci_sco_hdr hdr;
3290
3291 BT_DBG("%s len %d", hdev->name, skb->len);
3292
aca3192c 3293 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3294 hdr.dlen = skb->len;
3295
badff6d0
ACM
3296 skb_push(skb, HCI_SCO_HDR_SIZE);
3297 skb_reset_transport_header(skb);
9c70220b 3298 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3299
d79f34e3 3300 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
c78ae283 3301
1da177e4 3302 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3303 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3304}
1da177e4 3305
26afbd82
LAD
3306/* Send ISO data */
3307static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3308{
3309 struct hci_iso_hdr *hdr;
3310 int len = skb->len;
3311
3312 skb_push(skb, HCI_ISO_HDR_SIZE);
3313 skb_reset_transport_header(skb);
3314 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3315 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3316 hdr->dlen = cpu_to_le16(len);
3317}
3318
3319static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3320 struct sk_buff *skb)
3321{
3322 struct hci_dev *hdev = conn->hdev;
3323 struct sk_buff *list;
3324 __u16 flags;
3325
3326 skb->len = skb_headlen(skb);
3327 skb->data_len = 0;
3328
3329 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3330
3331 list = skb_shinfo(skb)->frag_list;
3332
3333 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3334 hci_add_iso_hdr(skb, conn->handle, flags);
3335
3336 if (!list) {
3337 /* Non fragmented */
3338 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3339
3340 skb_queue_tail(queue, skb);
3341 } else {
3342 /* Fragmented */
3343 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3344
3345 skb_shinfo(skb)->frag_list = NULL;
3346
3347 __skb_queue_tail(queue, skb);
3348
3349 do {
3350 skb = list; list = list->next;
3351
3352 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3353 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3354 0x00);
3355 hci_add_iso_hdr(skb, conn->handle, flags);
3356
3357 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3358
3359 __skb_queue_tail(queue, skb);
3360 } while (list);
3361 }
3362}
3363
3364void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3365{
3366 struct hci_dev *hdev = conn->hdev;
3367
3368 BT_DBG("%s len %d", hdev->name, skb->len);
3369
3370 hci_queue_iso(conn, &conn->data_q, skb);
3371
3372 queue_work(hdev->workqueue, &hdev->tx_work);
3373}
3374
1da177e4
LT
3375/* ---- HCI TX task (outgoing data) ---- */
3376
3377/* HCI Connection scheduler */
26afbd82
LAD
3378static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3379{
3380 struct hci_dev *hdev;
3381 int cnt, q;
3382
3383 if (!conn) {
3384 *quote = 0;
3385 return;
3386 }
3387
3388 hdev = conn->hdev;
3389
3390 switch (conn->type) {
3391 case ACL_LINK:
3392 cnt = hdev->acl_cnt;
3393 break;
26afbd82
LAD
3394 case SCO_LINK:
3395 case ESCO_LINK:
3396 cnt = hdev->sco_cnt;
3397 break;
3398 case LE_LINK:
3399 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3400 break;
3401 case ISO_LINK:
3402 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3403 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3404 break;
3405 default:
3406 cnt = 0;
3407 bt_dev_err(hdev, "unknown link type %d", conn->type);
3408 }
3409
3410 q = cnt / num;
3411 *quote = q ? q : 1;
3412}
3413
6039aa73
GP
3414static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3415 int *quote)
1da177e4
LT
3416{
3417 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3418 struct hci_conn *conn = NULL, *c;
abc5de8f 3419 unsigned int num = 0, min = ~0;
1da177e4 3420
8e87d142 3421 /* We don't have to lock device here. Connections are always
1da177e4 3422 * added and removed with TX task disabled. */
bf4c6325
GP
3423
3424 rcu_read_lock();
3425
3426 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3427 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3428 continue;
769be974
MH
3429
3430 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3431 continue;
3432
1da177e4
LT
3433 num++;
3434
3435 if (c->sent < min) {
3436 min = c->sent;
3437 conn = c;
3438 }
52087a79
LAD
3439
3440 if (hci_conn_num(hdev, type) == num)
3441 break;
1da177e4
LT
3442 }
3443
bf4c6325
GP
3444 rcu_read_unlock();
3445
26afbd82 3446 hci_quote_sent(conn, num, quote);
1da177e4
LT
3447
3448 BT_DBG("conn %p quote %d", conn, *quote);
3449 return conn;
3450}
3451
6039aa73 3452static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3453{
3454 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3455 struct hci_conn *c;
1da177e4 3456
2064ee33 3457 bt_dev_err(hdev, "link tx timeout");
1da177e4 3458
bf4c6325
GP
3459 rcu_read_lock();
3460
1da177e4 3461 /* Kill stalled connections */
bf4c6325 3462 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3463 if (c->type == type && c->sent) {
2064ee33
MH
3464 bt_dev_err(hdev, "killing stalled connection %pMR",
3465 &c->dst);
c7eaf80b
YH
3466 /* hci_disconnect might sleep, so, we have to release
3467 * the RCU read lock before calling it.
3468 */
3469 rcu_read_unlock();
bed71748 3470 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
c7eaf80b 3471 rcu_read_lock();
1da177e4
LT
3472 }
3473 }
bf4c6325
GP
3474
3475 rcu_read_unlock();
1da177e4
LT
3476}
3477
6039aa73
GP
3478static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3479 int *quote)
1da177e4 3480{
73d80deb
LAD
3481 struct hci_conn_hash *h = &hdev->conn_hash;
3482 struct hci_chan *chan = NULL;
abc5de8f 3483 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3484 struct hci_conn *conn;
26afbd82 3485 int conn_num = 0;
73d80deb
LAD
3486
3487 BT_DBG("%s", hdev->name);
3488
bf4c6325
GP
3489 rcu_read_lock();
3490
3491 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3492 struct hci_chan *tmp;
3493
3494 if (conn->type != type)
3495 continue;
3496
3497 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3498 continue;
3499
3500 conn_num++;
3501
8192edef 3502 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3503 struct sk_buff *skb;
3504
3505 if (skb_queue_empty(&tmp->data_q))
3506 continue;
3507
3508 skb = skb_peek(&tmp->data_q);
3509 if (skb->priority < cur_prio)
3510 continue;
3511
3512 if (skb->priority > cur_prio) {
3513 num = 0;
3514 min = ~0;
3515 cur_prio = skb->priority;
3516 }
3517
3518 num++;
3519
3520 if (conn->sent < min) {
3521 min = conn->sent;
3522 chan = tmp;
3523 }
3524 }
3525
3526 if (hci_conn_num(hdev, type) == conn_num)
3527 break;
3528 }
3529
bf4c6325
GP
3530 rcu_read_unlock();
3531
73d80deb
LAD
3532 if (!chan)
3533 return NULL;
3534
26afbd82 3535 hci_quote_sent(chan->conn, num, quote);
73d80deb 3536
73d80deb
LAD
3537 BT_DBG("chan %p quote %d", chan, *quote);
3538 return chan;
3539}
3540
02b20f0b
LAD
3541static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3542{
3543 struct hci_conn_hash *h = &hdev->conn_hash;
3544 struct hci_conn *conn;
3545 int num = 0;
3546
3547 BT_DBG("%s", hdev->name);
3548
bf4c6325
GP
3549 rcu_read_lock();
3550
3551 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3552 struct hci_chan *chan;
3553
3554 if (conn->type != type)
3555 continue;
3556
3557 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3558 continue;
3559
3560 num++;
3561
8192edef 3562 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3563 struct sk_buff *skb;
3564
3565 if (chan->sent) {
3566 chan->sent = 0;
3567 continue;
3568 }
3569
3570 if (skb_queue_empty(&chan->data_q))
3571 continue;
3572
3573 skb = skb_peek(&chan->data_q);
3574 if (skb->priority >= HCI_PRIO_MAX - 1)
3575 continue;
3576
3577 skb->priority = HCI_PRIO_MAX - 1;
3578
3579 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3580 skb->priority);
02b20f0b
LAD
3581 }
3582
3583 if (hci_conn_num(hdev, type) == num)
3584 break;
3585 }
bf4c6325
GP
3586
3587 rcu_read_unlock();
3588
02b20f0b
LAD
3589}
3590
116523c8 3591static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
73d80deb 3592{
116523c8
LAD
3593 unsigned long last_tx;
3594
3595 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3596 return;
3597
3598 switch (type) {
3599 case LE_LINK:
3600 last_tx = hdev->le_last_tx;
3601 break;
3602 default:
3603 last_tx = hdev->acl_last_tx;
3604 break;
1da177e4 3605 }
116523c8
LAD
3606
3607 /* tx timeout must be longer than maximum link supervision timeout
3608 * (40.9 seconds)
3609 */
3610 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3611 hci_link_tx_to(hdev, type);
63d2bc1b 3612}
1da177e4 3613
7fedd3bb
APS
3614/* Schedule SCO */
3615static void hci_sched_sco(struct hci_dev *hdev)
3616{
3617 struct hci_conn *conn;
3618 struct sk_buff *skb;
3619 int quote;
3620
3621 BT_DBG("%s", hdev->name);
3622
3623 if (!hci_conn_num(hdev, SCO_LINK))
3624 return;
3625
3626 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3627 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3628 BT_DBG("skb %p len %d", skb, skb->len);
3629 hci_send_frame(hdev, skb);
3630
3631 conn->sent++;
3632 if (conn->sent == ~0)
3633 conn->sent = 0;
3634 }
3635 }
3636}
3637
3638static void hci_sched_esco(struct hci_dev *hdev)
3639{
3640 struct hci_conn *conn;
3641 struct sk_buff *skb;
3642 int quote;
3643
3644 BT_DBG("%s", hdev->name);
3645
3646 if (!hci_conn_num(hdev, ESCO_LINK))
3647 return;
3648
3649 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3650 &quote))) {
3651 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3652 BT_DBG("skb %p len %d", skb, skb->len);
3653 hci_send_frame(hdev, skb);
3654
3655 conn->sent++;
3656 if (conn->sent == ~0)
3657 conn->sent = 0;
3658 }
3659 }
3660}
3661
6039aa73 3662static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3663{
3664 unsigned int cnt = hdev->acl_cnt;
3665 struct hci_chan *chan;
3666 struct sk_buff *skb;
3667 int quote;
3668
116523c8 3669 __check_timeout(hdev, cnt, ACL_LINK);
04837f64 3670
73d80deb 3671 while (hdev->acl_cnt &&
a8c5fb1a 3672 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3673 u32 priority = (skb_peek(&chan->data_q))->priority;
3674 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3675 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3676 skb->len, skb->priority);
73d80deb 3677
ec1cce24
LAD
3678 /* Stop if priority has changed */
3679 if (skb->priority < priority)
3680 break;
3681
3682 skb = skb_dequeue(&chan->data_q);
3683
73d80deb 3684 hci_conn_enter_active_mode(chan->conn,
04124681 3685 bt_cb(skb)->force_active);
04837f64 3686
57d17d70 3687 hci_send_frame(hdev, skb);
1da177e4
LT
3688 hdev->acl_last_tx = jiffies;
3689
3690 hdev->acl_cnt--;
73d80deb
LAD
3691 chan->sent++;
3692 chan->conn->sent++;
7fedd3bb
APS
3693
3694 /* Send pending SCO packets right away */
3695 hci_sched_sco(hdev);
3696 hci_sched_esco(hdev);
1da177e4
LT
3697 }
3698 }
02b20f0b
LAD
3699
3700 if (cnt != hdev->acl_cnt)
3701 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3702}
3703
6039aa73 3704static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3705{
3706 BT_DBG("%s", hdev->name);
3707
bd1eb66b 3708 /* No ACL link over BR/EDR controller */
84a4bb65 3709 if (!hci_conn_num(hdev, ACL_LINK))
b71d385a
AE
3710 return;
3711
84a4bb65 3712 hci_sched_acl_pkt(hdev);
b71d385a
AE
3713}
3714
6039aa73 3715static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3716{
73d80deb 3717 struct hci_chan *chan;
6ed58ec5 3718 struct sk_buff *skb;
02b20f0b 3719 int quote, cnt, tmp;
6ed58ec5
VT
3720
3721 BT_DBG("%s", hdev->name);
3722
52087a79
LAD
3723 if (!hci_conn_num(hdev, LE_LINK))
3724 return;
3725
6ed58ec5 3726 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1b1d29e5 3727
116523c8 3728 __check_timeout(hdev, cnt, LE_LINK);
1b1d29e5 3729
02b20f0b 3730 tmp = cnt;
73d80deb 3731 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3732 u32 priority = (skb_peek(&chan->data_q))->priority;
3733 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3734 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3735 skb->len, skb->priority);
6ed58ec5 3736
ec1cce24
LAD
3737 /* Stop if priority has changed */
3738 if (skb->priority < priority)
3739 break;
3740
3741 skb = skb_dequeue(&chan->data_q);
3742
57d17d70 3743 hci_send_frame(hdev, skb);
6ed58ec5
VT
3744 hdev->le_last_tx = jiffies;
3745
3746 cnt--;
73d80deb
LAD
3747 chan->sent++;
3748 chan->conn->sent++;
7fedd3bb
APS
3749
3750 /* Send pending SCO packets right away */
3751 hci_sched_sco(hdev);
3752 hci_sched_esco(hdev);
6ed58ec5
VT
3753 }
3754 }
73d80deb 3755
6ed58ec5
VT
3756 if (hdev->le_pkts)
3757 hdev->le_cnt = cnt;
3758 else
3759 hdev->acl_cnt = cnt;
02b20f0b
LAD
3760
3761 if (cnt != tmp)
3762 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3763}
3764
26afbd82
LAD
3765/* Schedule CIS */
3766static void hci_sched_iso(struct hci_dev *hdev)
3767{
3768 struct hci_conn *conn;
3769 struct sk_buff *skb;
3770 int quote, *cnt;
3771
3772 BT_DBG("%s", hdev->name);
3773
3774 if (!hci_conn_num(hdev, ISO_LINK))
3775 return;
3776
3777 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3778 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3779 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3780 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3781 BT_DBG("skb %p len %d", skb, skb->len);
3782 hci_send_frame(hdev, skb);
3783
3784 conn->sent++;
3785 if (conn->sent == ~0)
3786 conn->sent = 0;
3787 (*cnt)--;
3788 }
3789 }
3790}
3791
3eff45ea 3792static void hci_tx_work(struct work_struct *work)
1da177e4 3793{
3eff45ea 3794 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3795 struct sk_buff *skb;
3796
26afbd82
LAD
3797 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3798 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
1da177e4 3799
d7a5a11d 3800 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e 3801 /* Schedule queues and send stuff to HCI driver */
52de599e
MH
3802 hci_sched_sco(hdev);
3803 hci_sched_esco(hdev);
26afbd82 3804 hci_sched_iso(hdev);
7fedd3bb 3805 hci_sched_acl(hdev);
52de599e
MH
3806 hci_sched_le(hdev);
3807 }
6ed58ec5 3808
1da177e4
LT
3809 /* Send next queued raw (unknown type) packet */
3810 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 3811 hci_send_frame(hdev, skb);
1da177e4
LT
3812}
3813
25985edc 3814/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3815
3816/* ACL data packet */
6039aa73 3817static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3818{
3819 struct hci_acl_hdr *hdr = (void *) skb->data;
3820 struct hci_conn *conn;
3821 __u16 handle, flags;
3822
3823 skb_pull(skb, HCI_ACL_HDR_SIZE);
3824
3825 handle = __le16_to_cpu(hdr->handle);
3826 flags = hci_flags(handle);
3827 handle = hci_handle(handle);
3828
f0e09510 3829 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3830 handle, flags);
1da177e4
LT
3831
3832 hdev->stat.acl_rx++;
3833
3834 hci_dev_lock(hdev);
3835 conn = hci_conn_hash_lookup_handle(hdev, handle);
3836 hci_dev_unlock(hdev);
8e87d142 3837
1da177e4 3838 if (conn) {
65983fc7 3839 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3840
1da177e4 3841 /* Send to upper protocol */
686ebf28
UF
3842 l2cap_recv_acldata(conn, skb, flags);
3843 return;
1da177e4 3844 } else {
2064ee33
MH
3845 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3846 handle);
1da177e4
LT
3847 }
3848
3849 kfree_skb(skb);
3850}
3851
3852/* SCO data packet */
6039aa73 3853static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3854{
3855 struct hci_sco_hdr *hdr = (void *) skb->data;
3856 struct hci_conn *conn;
debdedf2 3857 __u16 handle, flags;
1da177e4
LT
3858
3859 skb_pull(skb, HCI_SCO_HDR_SIZE);
3860
3861 handle = __le16_to_cpu(hdr->handle);
debdedf2
MH
3862 flags = hci_flags(handle);
3863 handle = hci_handle(handle);
1da177e4 3864
debdedf2
MH
3865 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3866 handle, flags);
1da177e4
LT
3867
3868 hdev->stat.sco_rx++;
3869
3870 hci_dev_lock(hdev);
3871 conn = hci_conn_hash_lookup_handle(hdev, handle);
3872 hci_dev_unlock(hdev);
3873
3874 if (conn) {
1da177e4 3875 /* Send to upper protocol */
3f19ffb2 3876 hci_skb_pkt_status(skb) = flags & 0x03;
686ebf28
UF
3877 sco_recv_scodata(conn, skb);
3878 return;
1da177e4 3879 } else {
2d4b37b6
LAD
3880 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3881 handle);
1da177e4
LT
3882 }
3883
3884 kfree_skb(skb);
3885}
3886
26afbd82
LAD
3887static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3888{
3889 struct hci_iso_hdr *hdr;
3890 struct hci_conn *conn;
3891 __u16 handle, flags;
3892
3893 hdr = skb_pull_data(skb, sizeof(*hdr));
3894 if (!hdr) {
3895 bt_dev_err(hdev, "ISO packet too small");
3896 goto drop;
3897 }
3898
3899 handle = __le16_to_cpu(hdr->handle);
3900 flags = hci_flags(handle);
3901 handle = hci_handle(handle);
3902
3903 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3904 handle, flags);
3905
3906 hci_dev_lock(hdev);
3907 conn = hci_conn_hash_lookup_handle(hdev, handle);
3908 hci_dev_unlock(hdev);
3909
26afbd82
LAD
3910 if (!conn) {
3911 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3912 handle);
ccf74f23 3913 goto drop;
26afbd82
LAD
3914 }
3915
ccf74f23
LAD
3916 /* Send to upper protocol */
3917 iso_recv(conn, skb, flags);
3918 return;
3919
26afbd82
LAD
3920drop:
3921 kfree_skb(skb);
3922}
3923
9238f36a
JH
3924static bool hci_req_is_complete(struct hci_dev *hdev)
3925{
3926 struct sk_buff *skb;
3927
3928 skb = skb_peek(&hdev->cmd_q);
3929 if (!skb)
3930 return true;
3931
44d27137 3932 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
9238f36a
JH
3933}
3934
42c6b129
JH
3935static void hci_resend_last(struct hci_dev *hdev)
3936{
3937 struct hci_command_hdr *sent;
3938 struct sk_buff *skb;
3939 u16 opcode;
3940
3941 if (!hdev->sent_cmd)
3942 return;
3943
3944 sent = (void *) hdev->sent_cmd->data;
3945 opcode = __le16_to_cpu(sent->opcode);
3946 if (opcode == HCI_OP_RESET)
3947 return;
3948
3949 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3950 if (!skb)
3951 return;
3952
3953 skb_queue_head(&hdev->cmd_q, skb);
3954 queue_work(hdev->workqueue, &hdev->cmd_work);
3955}
3956
e6214487
JH
3957void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3958 hci_req_complete_t *req_complete,
3959 hci_req_complete_skb_t *req_complete_skb)
9238f36a 3960{
9238f36a
JH
3961 struct sk_buff *skb;
3962 unsigned long flags;
3963
3964 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3965
42c6b129
JH
3966 /* If the completed command doesn't match the last one that was
3967 * sent we need to do special handling of it.
9238f36a 3968 */
42c6b129
JH
3969 if (!hci_sent_cmd_data(hdev, opcode)) {
3970 /* Some CSR based controllers generate a spontaneous
3971 * reset complete event during init and any pending
3972 * command will never be completed. In such a case we
3973 * need to resend whatever was the last sent
3974 * command.
3975 */
3976 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3977 hci_resend_last(hdev);
3978
9238f36a 3979 return;
42c6b129 3980 }
9238f36a 3981
f80c5dad
JPRV
3982 /* If we reach this point this event matches the last command sent */
3983 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3984
9238f36a
JH
3985 /* If the command succeeded and there's still more commands in
3986 * this request the request is not yet complete.
3987 */
3988 if (!status && !hci_req_is_complete(hdev))
3989 return;
3990
2615fd9a
LAD
3991 skb = hdev->req_skb;
3992
9238f36a 3993 /* If this was the last command in a request the complete
2615fd9a 3994 * callback would be found in hdev->req_skb instead of the
9238f36a
JH
3995 * command queue (hdev->cmd_q).
3996 */
2615fd9a
LAD
3997 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3998 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
e6214487
JH
3999 return;
4000 }
53e21fbc 4001
2615fd9a
LAD
4002 if (skb && bt_cb(skb)->hci.req_complete) {
4003 *req_complete = bt_cb(skb)->hci.req_complete;
e6214487 4004 return;
9238f36a
JH
4005 }
4006
4007 /* Remove all pending commands belonging to this request */
4008 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4009 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
44d27137 4010 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
9238f36a
JH
4011 __skb_queue_head(&hdev->cmd_q, skb);
4012 break;
4013 }
4014
3bd7594e
DA
4015 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4016 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4017 else
4018 *req_complete = bt_cb(skb)->hci.req_complete;
39c1eb6f 4019 dev_kfree_skb_irq(skb);
9238f36a
JH
4020 }
4021 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4022}
4023
b78752cc 4024static void hci_rx_work(struct work_struct *work)
1da177e4 4025{
b78752cc 4026 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4027 struct sk_buff *skb;
4028
4029 BT_DBG("%s", hdev->name);
4030
9f30de9e
TK
4031 /* The kcov_remote functions used for collecting packet parsing
4032 * coverage information from this background thread and associate
4033 * the coverage with the syscall's thread which originally injected
4034 * the packet. This helps fuzzing the kernel.
4035 */
4036 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4037 kcov_remote_start_common(skb_get_kcov_handle(skb));
4038
cd82e61c
MH
4039 /* Send copy to monitor */
4040 hci_send_to_monitor(hdev, skb);
4041
1da177e4
LT
4042 if (atomic_read(&hdev->promisc)) {
4043 /* Send copy to the sockets */
470fe1b5 4044 hci_send_to_sock(hdev, skb);
1da177e4
LT
4045 }
4046
eb8c101e
MK
4047 /* If the device has been opened in HCI_USER_CHANNEL,
4048 * the userspace has exclusive access to device.
4049 * When device is HCI_INIT, we still need to process
4050 * the data packets to the driver in order
4051 * to complete its setup().
4052 */
4053 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4054 !test_bit(HCI_INIT, &hdev->flags)) {
1da177e4
LT
4055 kfree_skb(skb);
4056 continue;
4057 }
4058
4059 if (test_bit(HCI_INIT, &hdev->flags)) {
4060 /* Don't process data packets in this states. */
d79f34e3 4061 switch (hci_skb_pkt_type(skb)) {
1da177e4
LT
4062 case HCI_ACLDATA_PKT:
4063 case HCI_SCODATA_PKT:
cc974003 4064 case HCI_ISODATA_PKT:
1da177e4
LT
4065 kfree_skb(skb);
4066 continue;
3ff50b79 4067 }
1da177e4
LT
4068 }
4069
4070 /* Process frame */
d79f34e3 4071 switch (hci_skb_pkt_type(skb)) {
1da177e4 4072 case HCI_EVENT_PKT:
b78752cc 4073 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4074 hci_event_packet(hdev, skb);
4075 break;
4076
4077 case HCI_ACLDATA_PKT:
4078 BT_DBG("%s ACL data packet", hdev->name);
4079 hci_acldata_packet(hdev, skb);
4080 break;
4081
4082 case HCI_SCODATA_PKT:
4083 BT_DBG("%s SCO data packet", hdev->name);
4084 hci_scodata_packet(hdev, skb);
4085 break;
4086
26afbd82
LAD
4087 case HCI_ISODATA_PKT:
4088 BT_DBG("%s ISO data packet", hdev->name);
4089 hci_isodata_packet(hdev, skb);
4090 break;
4091
1da177e4
LT
4092 default:
4093 kfree_skb(skb);
4094 break;
4095 }
4096 }
1da177e4
LT
4097}
4098
63298d6e
LAD
4099static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4100{
4101 int err;
4102
4103 bt_dev_dbg(hdev, "skb %p", skb);
4104
4105 kfree_skb(hdev->sent_cmd);
4106
4107 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4108 if (!hdev->sent_cmd) {
4109 skb_queue_head(&hdev->cmd_q, skb);
4110 queue_work(hdev->workqueue, &hdev->cmd_work);
4111 return;
4112 }
4113
4114 err = hci_send_frame(hdev, skb);
4115 if (err < 0) {
6946b9c9 4116 hci_cmd_sync_cancel_sync(hdev, -err);
63298d6e
LAD
4117 return;
4118 }
4119
2615fd9a
LAD
4120 if (hci_req_status_pend(hdev) &&
4121 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4122 kfree_skb(hdev->req_skb);
947ec0d0 4123 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
2615fd9a 4124 }
63298d6e
LAD
4125
4126 atomic_dec(&hdev->cmd_cnt);
4127}
4128
c347b765 4129static void hci_cmd_work(struct work_struct *work)
1da177e4 4130{
c347b765 4131 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4132 struct sk_buff *skb;
4133
2104786b
AE
4134 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4135 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4136
1da177e4 4137 /* Send queued commands */
5a08ecce
AE
4138 if (atomic_read(&hdev->cmd_cnt)) {
4139 skb = skb_dequeue(&hdev->cmd_q);
4140 if (!skb)
4141 return;
4142
63298d6e 4143 hci_send_cmd_sync(hdev, skb);
2250abad 4144
63298d6e
LAD
4145 rcu_read_lock();
4146 if (test_bit(HCI_RESET, &hdev->flags) ||
4147 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4148 cancel_delayed_work(&hdev->cmd_timer);
4149 else
4150 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4151 HCI_CMD_TIMEOUT);
4152 rcu_read_unlock();
1da177e4
LT
4153 }
4154}