Bluetooth: hci_qca: Stop IBS timer during BT OFF
[linux-2.6-block.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
8c520a59 29#include <linux/rfkill.h>
baf27f6e 30#include <linux/debugfs.h>
99780a7b 31#include <linux/crypto.h>
7a0e5b15 32#include <linux/property.h>
9952d90e
APS
33#include <linux/suspend.h>
34#include <linux/wait.h>
47219839 35#include <asm/unaligned.h>
1da177e4
LT
36
37#include <net/bluetooth/bluetooth.h>
38#include <net/bluetooth/hci_core.h>
4bc58f51 39#include <net/bluetooth/l2cap.h>
af58925c 40#include <net/bluetooth/mgmt.h>
1da177e4 41
0857dd3b 42#include "hci_request.h"
60c5f5fb 43#include "hci_debugfs.h"
970c4e46 44#include "smp.h"
6d5d2ee6 45#include "leds.h"
145373cb 46#include "msft.h"
f67743f9 47#include "aosp.h"
8961987f 48#include "hci_codec.h"
970c4e46 49
b78752cc 50static void hci_rx_work(struct work_struct *work);
c347b765 51static void hci_cmd_work(struct work_struct *work);
3eff45ea 52static void hci_tx_work(struct work_struct *work);
1da177e4 53
1da177e4
LT
54/* HCI device list */
55LIST_HEAD(hci_dev_list);
56DEFINE_RWLOCK(hci_dev_list_lock);
57
58/* HCI callback list */
59LIST_HEAD(hci_cb_list);
fba7ecf0 60DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 61
3df92b31
SL
62/* HCI ID Numbering */
63static DEFINE_IDA(hci_index_ida);
64
a1d01db1 65static int hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
66{
67 __u8 scan = opt;
68
42c6b129 69 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
70
71 /* Inquiry and Page scans */
42c6b129 72 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
a1d01db1 73 return 0;
1da177e4
LT
74}
75
a1d01db1 76static int hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
77{
78 __u8 auth = opt;
79
42c6b129 80 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
81
82 /* Authentication */
42c6b129 83 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
a1d01db1 84 return 0;
1da177e4
LT
85}
86
a1d01db1 87static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
88{
89 __u8 encrypt = opt;
90
42c6b129 91 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 92
e4e8e37c 93 /* Encryption */
42c6b129 94 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
a1d01db1 95 return 0;
1da177e4
LT
96}
97
a1d01db1 98static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
99{
100 __le16 policy = cpu_to_le16(opt);
101
42c6b129 102 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
103
104 /* Default link policy */
42c6b129 105 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
a1d01db1 106 return 0;
e4e8e37c
MH
107}
108
8e87d142 109/* Get HCI device by index.
1da177e4
LT
110 * Device is held on return. */
111struct hci_dev *hci_dev_get(int index)
112{
8035ded4 113 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
114
115 BT_DBG("%d", index);
116
117 if (index < 0)
118 return NULL;
119
120 read_lock(&hci_dev_list_lock);
8035ded4 121 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
122 if (d->id == index) {
123 hdev = hci_dev_hold(d);
124 break;
125 }
126 }
127 read_unlock(&hci_dev_list_lock);
128 return hdev;
129}
1da177e4
LT
130
131/* ---- Inquiry support ---- */
ff9ef578 132
30dc78e1
JH
133bool hci_discovery_active(struct hci_dev *hdev)
134{
135 struct discovery_state *discov = &hdev->discovery;
136
6fbe195d 137 switch (discov->state) {
343f935b 138 case DISCOVERY_FINDING:
6fbe195d 139 case DISCOVERY_RESOLVING:
30dc78e1
JH
140 return true;
141
6fbe195d
AG
142 default:
143 return false;
144 }
30dc78e1
JH
145}
146
ff9ef578
JH
147void hci_discovery_set_state(struct hci_dev *hdev, int state)
148{
bb3e0a33
JH
149 int old_state = hdev->discovery.state;
150
ff9ef578
JH
151 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
152
bb3e0a33 153 if (old_state == state)
ff9ef578
JH
154 return;
155
bb3e0a33
JH
156 hdev->discovery.state = state;
157
ff9ef578
JH
158 switch (state) {
159 case DISCOVERY_STOPPED:
5bee2fd6 160 hci_update_passive_scan(hdev);
c54c3860 161
bb3e0a33 162 if (old_state != DISCOVERY_STARTING)
7b99b659 163 mgmt_discovering(hdev, 0);
ff9ef578
JH
164 break;
165 case DISCOVERY_STARTING:
166 break;
343f935b 167 case DISCOVERY_FINDING:
ff9ef578
JH
168 mgmt_discovering(hdev, 1);
169 break;
30dc78e1
JH
170 case DISCOVERY_RESOLVING:
171 break;
ff9ef578
JH
172 case DISCOVERY_STOPPING:
173 break;
174 }
ff9ef578
JH
175}
176
1f9b9a5d 177void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 178{
30883512 179 struct discovery_state *cache = &hdev->discovery;
b57c1a56 180 struct inquiry_entry *p, *n;
1da177e4 181
561aafbc
JH
182 list_for_each_entry_safe(p, n, &cache->all, all) {
183 list_del(&p->all);
b57c1a56 184 kfree(p);
1da177e4 185 }
561aafbc
JH
186
187 INIT_LIST_HEAD(&cache->unknown);
188 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
189}
190
a8c5fb1a
GP
191struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
192 bdaddr_t *bdaddr)
1da177e4 193{
30883512 194 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
195 struct inquiry_entry *e;
196
6ed93dc6 197 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 198
561aafbc
JH
199 list_for_each_entry(e, &cache->all, all) {
200 if (!bacmp(&e->data.bdaddr, bdaddr))
201 return e;
202 }
203
204 return NULL;
205}
206
207struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 208 bdaddr_t *bdaddr)
561aafbc 209{
30883512 210 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
211 struct inquiry_entry *e;
212
6ed93dc6 213 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
214
215 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 216 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
217 return e;
218 }
219
220 return NULL;
1da177e4
LT
221}
222
30dc78e1 223struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
224 bdaddr_t *bdaddr,
225 int state)
30dc78e1
JH
226{
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
6ed93dc6 230 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
231
232 list_for_each_entry(e, &cache->resolve, list) {
233 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
234 return e;
235 if (!bacmp(&e->data.bdaddr, bdaddr))
236 return e;
237 }
238
239 return NULL;
240}
241
a3d4e20a 242void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 243 struct inquiry_entry *ie)
a3d4e20a
JH
244{
245 struct discovery_state *cache = &hdev->discovery;
246 struct list_head *pos = &cache->resolve;
247 struct inquiry_entry *p;
248
249 list_del(&ie->list);
250
251 list_for_each_entry(p, &cache->resolve, list) {
252 if (p->name_state != NAME_PENDING &&
a8c5fb1a 253 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
254 break;
255 pos = &p->list;
256 }
257
258 list_add(&ie->list, pos);
259}
260
af58925c
MH
261u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
262 bool name_known)
1da177e4 263{
30883512 264 struct discovery_state *cache = &hdev->discovery;
70f23020 265 struct inquiry_entry *ie;
af58925c 266 u32 flags = 0;
1da177e4 267
6ed93dc6 268 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 269
6928a924 270 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 271
af58925c
MH
272 if (!data->ssp_mode)
273 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 274
70f23020 275 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 276 if (ie) {
af58925c
MH
277 if (!ie->data.ssp_mode)
278 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 279
a3d4e20a 280 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 281 data->rssi != ie->data.rssi) {
a3d4e20a
JH
282 ie->data.rssi = data->rssi;
283 hci_inquiry_cache_update_resolve(hdev, ie);
284 }
285
561aafbc 286 goto update;
a3d4e20a 287 }
561aafbc
JH
288
289 /* Entry not in the cache. Add new one. */
27f70f3e 290 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
291 if (!ie) {
292 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
293 goto done;
294 }
561aafbc
JH
295
296 list_add(&ie->all, &cache->all);
297
298 if (name_known) {
299 ie->name_state = NAME_KNOWN;
300 } else {
301 ie->name_state = NAME_NOT_KNOWN;
302 list_add(&ie->list, &cache->unknown);
303 }
70f23020 304
561aafbc
JH
305update:
306 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 307 ie->name_state != NAME_PENDING) {
561aafbc
JH
308 ie->name_state = NAME_KNOWN;
309 list_del(&ie->list);
1da177e4
LT
310 }
311
70f23020
AE
312 memcpy(&ie->data, data, sizeof(*data));
313 ie->timestamp = jiffies;
1da177e4 314 cache->timestamp = jiffies;
3175405b
JH
315
316 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 317 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 318
af58925c
MH
319done:
320 return flags;
1da177e4
LT
321}
322
323static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
324{
30883512 325 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
326 struct inquiry_info *info = (struct inquiry_info *) buf;
327 struct inquiry_entry *e;
328 int copied = 0;
329
561aafbc 330 list_for_each_entry(e, &cache->all, all) {
1da177e4 331 struct inquiry_data *data = &e->data;
b57c1a56
JH
332
333 if (copied >= num)
334 break;
335
1da177e4
LT
336 bacpy(&info->bdaddr, &data->bdaddr);
337 info->pscan_rep_mode = data->pscan_rep_mode;
338 info->pscan_period_mode = data->pscan_period_mode;
339 info->pscan_mode = data->pscan_mode;
340 memcpy(info->dev_class, data->dev_class, 3);
341 info->clock_offset = data->clock_offset;
b57c1a56 342
1da177e4 343 info++;
b57c1a56 344 copied++;
1da177e4
LT
345 }
346
347 BT_DBG("cache %p, copied %d", cache, copied);
348 return copied;
349}
350
a1d01db1 351static int hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
352{
353 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 354 struct hci_dev *hdev = req->hdev;
1da177e4
LT
355 struct hci_cp_inquiry cp;
356
357 BT_DBG("%s", hdev->name);
358
359 if (test_bit(HCI_INQUIRY, &hdev->flags))
a1d01db1 360 return 0;
1da177e4
LT
361
362 /* Start Inquiry */
363 memcpy(&cp.lap, &ir->lap, 3);
364 cp.length = ir->length;
365 cp.num_rsp = ir->num_rsp;
42c6b129 366 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
a1d01db1
JH
367
368 return 0;
1da177e4
LT
369}
370
371int hci_inquiry(void __user *arg)
372{
373 __u8 __user *ptr = arg;
374 struct hci_inquiry_req ir;
375 struct hci_dev *hdev;
376 int err = 0, do_inquiry = 0, max_rsp;
377 long timeo;
378 __u8 *buf;
379
380 if (copy_from_user(&ir, ptr, sizeof(ir)))
381 return -EFAULT;
382
5a08ecce
AE
383 hdev = hci_dev_get(ir.dev_id);
384 if (!hdev)
1da177e4
LT
385 return -ENODEV;
386
d7a5a11d 387 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
388 err = -EBUSY;
389 goto done;
390 }
391
d7a5a11d 392 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
393 err = -EOPNOTSUPP;
394 goto done;
395 }
396
ca8bee5d 397 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
398 err = -EOPNOTSUPP;
399 goto done;
400 }
401
d7a5a11d 402 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
403 err = -EOPNOTSUPP;
404 goto done;
405 }
406
f41a4b2b
PS
407 /* Restrict maximum inquiry length to 60 seconds */
408 if (ir.length > 60) {
409 err = -EINVAL;
410 goto done;
411 }
412
09fd0de5 413 hci_dev_lock(hdev);
8e87d142 414 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 415 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 416 hci_inquiry_cache_flush(hdev);
1da177e4
LT
417 do_inquiry = 1;
418 }
09fd0de5 419 hci_dev_unlock(hdev);
1da177e4 420
04837f64 421 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
422
423 if (do_inquiry) {
01178cd4 424 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
4ebeee2d 425 timeo, NULL);
70f23020
AE
426 if (err < 0)
427 goto done;
3e13fa1e
AG
428
429 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
430 * cleared). If it is interrupted by a signal, return -EINTR.
431 */
74316201 432 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
28a758c8
PB
433 TASK_INTERRUPTIBLE)) {
434 err = -EINTR;
435 goto done;
436 }
70f23020 437 }
1da177e4 438
8fc9ced3
GP
439 /* for unlimited number of responses we will use buffer with
440 * 255 entries
441 */
1da177e4
LT
442 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
443
444 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
445 * copy it to the user space.
446 */
6da2ec56 447 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
70f23020 448 if (!buf) {
1da177e4
LT
449 err = -ENOMEM;
450 goto done;
451 }
452
09fd0de5 453 hci_dev_lock(hdev);
1da177e4 454 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 455 hci_dev_unlock(hdev);
1da177e4
LT
456
457 BT_DBG("num_rsp %d", ir.num_rsp);
458
459 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
460 ptr += sizeof(ir);
461 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 462 ir.num_rsp))
1da177e4 463 err = -EFAULT;
8e87d142 464 } else
1da177e4
LT
465 err = -EFAULT;
466
467 kfree(buf);
468
469done:
470 hci_dev_put(hdev);
471 return err;
472}
473
cf75ad8b
LAD
474static int hci_dev_do_open(struct hci_dev *hdev)
475{
476 int ret = 0;
477
478 BT_DBG("%s %p", hdev->name, hdev);
479
480 hci_req_sync_lock(hdev);
481
482 ret = hci_dev_open_sync(hdev);
483
b504430c 484 hci_req_sync_unlock(hdev);
1da177e4
LT
485 return ret;
486}
487
cbed0ca1
JH
488/* ---- HCI ioctl helpers ---- */
489
490int hci_dev_open(__u16 dev)
491{
492 struct hci_dev *hdev;
493 int err;
494
495 hdev = hci_dev_get(dev);
496 if (!hdev)
497 return -ENODEV;
498
4a964404 499 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
500 * up as user channel. Trying to bring them up as normal devices
501 * will result into a failure. Only user channel operation is
502 * possible.
503 *
504 * When this function is called for a user channel, the flag
505 * HCI_USER_CHANNEL will be set first before attempting to
506 * open the device.
507 */
d7a5a11d
MH
508 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
509 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
510 err = -EOPNOTSUPP;
511 goto done;
512 }
513
e1d08f40
JH
514 /* We need to ensure that no other power on/off work is pending
515 * before proceeding to call hci_dev_do_open. This is
516 * particularly important if the setup procedure has not yet
517 * completed.
518 */
a69d8927 519 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
520 cancel_delayed_work(&hdev->power_off);
521
a5c8f270
MH
522 /* After this call it is guaranteed that the setup procedure
523 * has finished. This means that error conditions like RFKILL
524 * or no valid public or static random address apply.
525 */
e1d08f40
JH
526 flush_workqueue(hdev->req_workqueue);
527
12aa4f0a 528 /* For controllers not using the management interface and that
b6ae8457 529 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
530 * so that pairing works for them. Once the management interface
531 * is in use this bit will be cleared again and userspace has
532 * to explicitly enable it.
533 */
d7a5a11d
MH
534 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
535 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 536 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 537
cbed0ca1
JH
538 err = hci_dev_do_open(hdev);
539
fee746b0 540done:
cbed0ca1 541 hci_dev_put(hdev);
cbed0ca1
JH
542 return err;
543}
544
cf75ad8b
LAD
545int hci_dev_do_close(struct hci_dev *hdev)
546{
547 int err;
548
549 BT_DBG("%s %p", hdev->name, hdev);
550
551 hci_req_sync_lock(hdev);
552
553 err = hci_dev_close_sync(hdev);
554
b504430c 555 hci_req_sync_unlock(hdev);
1da177e4 556
61969ef8 557 return err;
1da177e4
LT
558}
559
560int hci_dev_close(__u16 dev)
561{
562 struct hci_dev *hdev;
563 int err;
564
70f23020
AE
565 hdev = hci_dev_get(dev);
566 if (!hdev)
1da177e4 567 return -ENODEV;
8ee56540 568
d7a5a11d 569 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
570 err = -EBUSY;
571 goto done;
572 }
573
a69d8927 574 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
575 cancel_delayed_work(&hdev->power_off);
576
1da177e4 577 err = hci_dev_do_close(hdev);
8ee56540 578
0736cfa8 579done:
1da177e4
LT
580 hci_dev_put(hdev);
581 return err;
582}
583
5c912495 584static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 585{
5c912495 586 int ret;
1da177e4 587
5c912495 588 BT_DBG("%s %p", hdev->name, hdev);
1da177e4 589
b504430c 590 hci_req_sync_lock(hdev);
1da177e4 591
1da177e4
LT
592 /* Drop queues */
593 skb_queue_purge(&hdev->rx_q);
594 skb_queue_purge(&hdev->cmd_q);
595
76727c02
JH
596 /* Avoid potential lockdep warnings from the *_flush() calls by
597 * ensuring the workqueue is empty up front.
598 */
599 drain_workqueue(hdev->workqueue);
600
09fd0de5 601 hci_dev_lock(hdev);
1f9b9a5d 602 hci_inquiry_cache_flush(hdev);
1da177e4 603 hci_conn_hash_flush(hdev);
09fd0de5 604 hci_dev_unlock(hdev);
1da177e4
LT
605
606 if (hdev->flush)
607 hdev->flush(hdev);
608
8e87d142 609 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 610 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 611
d0b13706 612 ret = hci_reset_sync(hdev);
1da177e4 613
b504430c 614 hci_req_sync_unlock(hdev);
1da177e4
LT
615 return ret;
616}
617
5c912495
MH
618int hci_dev_reset(__u16 dev)
619{
620 struct hci_dev *hdev;
621 int err;
622
623 hdev = hci_dev_get(dev);
624 if (!hdev)
625 return -ENODEV;
626
627 if (!test_bit(HCI_UP, &hdev->flags)) {
628 err = -ENETDOWN;
629 goto done;
630 }
631
d7a5a11d 632 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
633 err = -EBUSY;
634 goto done;
635 }
636
d7a5a11d 637 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
638 err = -EOPNOTSUPP;
639 goto done;
640 }
641
642 err = hci_dev_do_reset(hdev);
643
644done:
645 hci_dev_put(hdev);
646 return err;
647}
648
1da177e4
LT
649int hci_dev_reset_stat(__u16 dev)
650{
651 struct hci_dev *hdev;
652 int ret = 0;
653
70f23020
AE
654 hdev = hci_dev_get(dev);
655 if (!hdev)
1da177e4
LT
656 return -ENODEV;
657
d7a5a11d 658 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
659 ret = -EBUSY;
660 goto done;
661 }
662
d7a5a11d 663 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
664 ret = -EOPNOTSUPP;
665 goto done;
666 }
667
1da177e4
LT
668 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
669
0736cfa8 670done:
1da177e4 671 hci_dev_put(hdev);
1da177e4
LT
672 return ret;
673}
674
5bee2fd6 675static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
123abc08 676{
bc6d2d04 677 bool conn_changed, discov_changed;
123abc08
JH
678
679 BT_DBG("%s scan 0x%02x", hdev->name, scan);
680
681 if ((scan & SCAN_PAGE))
238be788
MH
682 conn_changed = !hci_dev_test_and_set_flag(hdev,
683 HCI_CONNECTABLE);
123abc08 684 else
a69d8927
MH
685 conn_changed = hci_dev_test_and_clear_flag(hdev,
686 HCI_CONNECTABLE);
123abc08 687
bc6d2d04 688 if ((scan & SCAN_INQUIRY)) {
238be788
MH
689 discov_changed = !hci_dev_test_and_set_flag(hdev,
690 HCI_DISCOVERABLE);
bc6d2d04 691 } else {
a358dc11 692 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
693 discov_changed = hci_dev_test_and_clear_flag(hdev,
694 HCI_DISCOVERABLE);
bc6d2d04
JH
695 }
696
d7a5a11d 697 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
698 return;
699
bc6d2d04
JH
700 if (conn_changed || discov_changed) {
701 /* In case this was disabled through mgmt */
a1536da2 702 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 703
d7a5a11d 704 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
cab054ab 705 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
bc6d2d04 706
123abc08 707 mgmt_new_settings(hdev);
bc6d2d04 708 }
123abc08
JH
709}
710
1da177e4
LT
711int hci_dev_cmd(unsigned int cmd, void __user *arg)
712{
713 struct hci_dev *hdev;
714 struct hci_dev_req dr;
715 int err = 0;
716
717 if (copy_from_user(&dr, arg, sizeof(dr)))
718 return -EFAULT;
719
70f23020
AE
720 hdev = hci_dev_get(dr.dev_id);
721 if (!hdev)
1da177e4
LT
722 return -ENODEV;
723
d7a5a11d 724 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
725 err = -EBUSY;
726 goto done;
727 }
728
d7a5a11d 729 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
730 err = -EOPNOTSUPP;
731 goto done;
732 }
733
ca8bee5d 734 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
735 err = -EOPNOTSUPP;
736 goto done;
737 }
738
d7a5a11d 739 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
740 err = -EOPNOTSUPP;
741 goto done;
742 }
743
1da177e4
LT
744 switch (cmd) {
745 case HCISETAUTH:
01178cd4 746 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 747 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
748 break;
749
750 case HCISETENCRYPT:
751 if (!lmp_encrypt_capable(hdev)) {
752 err = -EOPNOTSUPP;
753 break;
754 }
755
756 if (!test_bit(HCI_AUTH, &hdev->flags)) {
757 /* Auth must be enabled first */
01178cd4 758 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 759 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
760 if (err)
761 break;
762 }
763
01178cd4 764 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
4ebeee2d 765 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
766 break;
767
768 case HCISETSCAN:
01178cd4 769 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
4ebeee2d 770 HCI_INIT_TIMEOUT, NULL);
91a668b0 771
bc6d2d04
JH
772 /* Ensure that the connectable and discoverable states
773 * get correctly modified as this was a non-mgmt change.
91a668b0 774 */
123abc08 775 if (!err)
5bee2fd6 776 hci_update_passive_scan_state(hdev, dr.dev_opt);
1da177e4
LT
777 break;
778
1da177e4 779 case HCISETLINKPOL:
01178cd4 780 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
4ebeee2d 781 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
782 break;
783
784 case HCISETLINKMODE:
e4e8e37c
MH
785 hdev->link_mode = ((__u16) dr.dev_opt) &
786 (HCI_LM_MASTER | HCI_LM_ACCEPT);
787 break;
788
789 case HCISETPTYPE:
b7c23df8
JK
790 if (hdev->pkt_type == (__u16) dr.dev_opt)
791 break;
792
e4e8e37c 793 hdev->pkt_type = (__u16) dr.dev_opt;
b7c23df8 794 mgmt_phy_configuration_changed(hdev, NULL);
1da177e4
LT
795 break;
796
797 case HCISETACLMTU:
e4e8e37c
MH
798 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
799 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
800 break;
801
802 case HCISETSCOMTU:
e4e8e37c
MH
803 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
804 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
805 break;
806
807 default:
808 err = -EINVAL;
809 break;
810 }
e4e8e37c 811
0736cfa8 812done:
1da177e4
LT
813 hci_dev_put(hdev);
814 return err;
815}
816
817int hci_get_dev_list(void __user *arg)
818{
8035ded4 819 struct hci_dev *hdev;
1da177e4
LT
820 struct hci_dev_list_req *dl;
821 struct hci_dev_req *dr;
1da177e4
LT
822 int n = 0, size, err;
823 __u16 dev_num;
824
825 if (get_user(dev_num, (__u16 __user *) arg))
826 return -EFAULT;
827
828 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
829 return -EINVAL;
830
831 size = sizeof(*dl) + dev_num * sizeof(*dr);
832
70f23020
AE
833 dl = kzalloc(size, GFP_KERNEL);
834 if (!dl)
1da177e4
LT
835 return -ENOMEM;
836
837 dr = dl->dev_req;
838
f20d09d5 839 read_lock(&hci_dev_list_lock);
8035ded4 840 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 841 unsigned long flags = hdev->flags;
c542a06c 842
2e84d8db
MH
843 /* When the auto-off is configured it means the transport
844 * is running, but in that case still indicate that the
845 * device is actually down.
846 */
d7a5a11d 847 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 848 flags &= ~BIT(HCI_UP);
c542a06c 849
1da177e4 850 (dr + n)->dev_id = hdev->id;
2e84d8db 851 (dr + n)->dev_opt = flags;
c542a06c 852
1da177e4
LT
853 if (++n >= dev_num)
854 break;
855 }
f20d09d5 856 read_unlock(&hci_dev_list_lock);
1da177e4
LT
857
858 dl->dev_num = n;
859 size = sizeof(*dl) + n * sizeof(*dr);
860
861 err = copy_to_user(arg, dl, size);
862 kfree(dl);
863
864 return err ? -EFAULT : 0;
865}
866
867int hci_get_dev_info(void __user *arg)
868{
869 struct hci_dev *hdev;
870 struct hci_dev_info di;
2e84d8db 871 unsigned long flags;
1da177e4
LT
872 int err = 0;
873
874 if (copy_from_user(&di, arg, sizeof(di)))
875 return -EFAULT;
876
70f23020
AE
877 hdev = hci_dev_get(di.dev_id);
878 if (!hdev)
1da177e4
LT
879 return -ENODEV;
880
2e84d8db
MH
881 /* When the auto-off is configured it means the transport
882 * is running, but in that case still indicate that the
883 * device is actually down.
884 */
d7a5a11d 885 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
886 flags = hdev->flags & ~BIT(HCI_UP);
887 else
888 flags = hdev->flags;
c542a06c 889
1da177e4
LT
890 strcpy(di.name, hdev->name);
891 di.bdaddr = hdev->bdaddr;
60f2a3ed 892 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 893 di.flags = flags;
1da177e4 894 di.pkt_type = hdev->pkt_type;
572c7f84
JH
895 if (lmp_bredr_capable(hdev)) {
896 di.acl_mtu = hdev->acl_mtu;
897 di.acl_pkts = hdev->acl_pkts;
898 di.sco_mtu = hdev->sco_mtu;
899 di.sco_pkts = hdev->sco_pkts;
900 } else {
901 di.acl_mtu = hdev->le_mtu;
902 di.acl_pkts = hdev->le_pkts;
903 di.sco_mtu = 0;
904 di.sco_pkts = 0;
905 }
1da177e4
LT
906 di.link_policy = hdev->link_policy;
907 di.link_mode = hdev->link_mode;
908
909 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
910 memcpy(&di.features, &hdev->features, sizeof(di.features));
911
912 if (copy_to_user(arg, &di, sizeof(di)))
913 err = -EFAULT;
914
915 hci_dev_put(hdev);
916
917 return err;
918}
919
920/* ---- Interface to HCI drivers ---- */
921
611b30f7
MH
922static int hci_rfkill_set_block(void *data, bool blocked)
923{
924 struct hci_dev *hdev = data;
925
926 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
927
d7a5a11d 928 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
929 return -EBUSY;
930
5e130367 931 if (blocked) {
a1536da2 932 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
933 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
934 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 935 hci_dev_do_close(hdev);
5e130367 936 } else {
a358dc11 937 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 938 }
611b30f7
MH
939
940 return 0;
941}
942
943static const struct rfkill_ops hci_rfkill_ops = {
944 .set_block = hci_rfkill_set_block,
945};
946
ab81cbf9
JH
947static void hci_power_on(struct work_struct *work)
948{
949 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 950 int err;
ab81cbf9
JH
951
952 BT_DBG("%s", hdev->name);
953
2ff13894
JH
954 if (test_bit(HCI_UP, &hdev->flags) &&
955 hci_dev_test_flag(hdev, HCI_MGMT) &&
956 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
d82142a8 957 cancel_delayed_work(&hdev->power_off);
cf75ad8b 958 err = hci_powered_update_sync(hdev);
2ff13894
JH
959 mgmt_power_on(hdev, err);
960 return;
961 }
962
cbed0ca1 963 err = hci_dev_do_open(hdev);
96570ffc 964 if (err < 0) {
3ad67582 965 hci_dev_lock(hdev);
96570ffc 966 mgmt_set_powered_failed(hdev, err);
3ad67582 967 hci_dev_unlock(hdev);
ab81cbf9 968 return;
96570ffc 969 }
ab81cbf9 970
a5c8f270
MH
971 /* During the HCI setup phase, a few error conditions are
972 * ignored and they need to be checked now. If they are still
973 * valid, it is important to turn the device back off.
974 */
d7a5a11d
MH
975 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
976 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
ca8bee5d 977 (hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
978 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
979 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 980 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 981 hci_dev_do_close(hdev);
d7a5a11d 982 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
983 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
984 HCI_AUTO_OFF_TIMEOUT);
bf543036 985 }
ab81cbf9 986
a69d8927 987 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
988 /* For unconfigured devices, set the HCI_RAW flag
989 * so that userspace can easily identify them.
4a964404 990 */
d7a5a11d 991 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 992 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
993
994 /* For fully configured devices, this will send
995 * the Index Added event. For unconfigured devices,
996 * it will send Unconfigued Index Added event.
997 *
998 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
999 * and no event will be send.
1000 */
1001 mgmt_index_added(hdev);
a69d8927 1002 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
1003 /* When the controller is now configured, then it
1004 * is important to clear the HCI_RAW flag.
1005 */
d7a5a11d 1006 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
1007 clear_bit(HCI_RAW, &hdev->flags);
1008
d603b76b
MH
1009 /* Powering on the controller with HCI_CONFIG set only
1010 * happens with the transition from unconfigured to
1011 * configured. This will send the Index Added event.
1012 */
744cf19e 1013 mgmt_index_added(hdev);
fee746b0 1014 }
ab81cbf9
JH
1015}
1016
1017static void hci_power_off(struct work_struct *work)
1018{
3243553f 1019 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1020 power_off.work);
ab81cbf9
JH
1021
1022 BT_DBG("%s", hdev->name);
1023
8ee56540 1024 hci_dev_do_close(hdev);
ab81cbf9
JH
1025}
1026
c7741d16
MH
1027static void hci_error_reset(struct work_struct *work)
1028{
1029 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1030
1031 BT_DBG("%s", hdev->name);
1032
1033 if (hdev->hw_error)
1034 hdev->hw_error(hdev, hdev->hw_error_code);
1035 else
2064ee33 1036 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
c7741d16
MH
1037
1038 if (hci_dev_do_close(hdev))
1039 return;
1040
c7741d16
MH
1041 hci_dev_do_open(hdev);
1042}
1043
35f7498a 1044void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 1045{
4821002c 1046 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1047
4821002c
JH
1048 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1049 list_del(&uuid->list);
2aeb9a1a
JH
1050 kfree(uuid);
1051 }
2aeb9a1a
JH
1052}
1053
35f7498a 1054void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 1055{
0378b597 1056 struct link_key *key;
55ed8ca1 1057
d7d41682 1058 list_for_each_entry(key, &hdev->link_keys, list) {
0378b597
JH
1059 list_del_rcu(&key->list);
1060 kfree_rcu(key, rcu);
55ed8ca1 1061 }
55ed8ca1
JH
1062}
1063
35f7498a 1064void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 1065{
970d0f1b 1066 struct smp_ltk *k;
b899efaf 1067
d7d41682 1068 list_for_each_entry(k, &hdev->long_term_keys, list) {
970d0f1b
JH
1069 list_del_rcu(&k->list);
1070 kfree_rcu(k, rcu);
b899efaf 1071 }
b899efaf
VCG
1072}
1073
970c4e46
JH
1074void hci_smp_irks_clear(struct hci_dev *hdev)
1075{
adae20cb 1076 struct smp_irk *k;
970c4e46 1077
d7d41682 1078 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
adae20cb
JH
1079 list_del_rcu(&k->list);
1080 kfree_rcu(k, rcu);
970c4e46
JH
1081 }
1082}
1083
600a8749
AM
1084void hci_blocked_keys_clear(struct hci_dev *hdev)
1085{
1086 struct blocked_key *b;
1087
d7d41682 1088 list_for_each_entry(b, &hdev->blocked_keys, list) {
600a8749
AM
1089 list_del_rcu(&b->list);
1090 kfree_rcu(b, rcu);
1091 }
1092}
1093
1094bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1095{
1096 bool blocked = false;
1097 struct blocked_key *b;
1098
1099 rcu_read_lock();
0c2ac7d4 1100 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
600a8749
AM
1101 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1102 blocked = true;
1103 break;
1104 }
1105 }
1106
1107 rcu_read_unlock();
1108 return blocked;
1109}
1110
55ed8ca1
JH
1111struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1112{
8035ded4 1113 struct link_key *k;
55ed8ca1 1114
0378b597
JH
1115 rcu_read_lock();
1116 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1117 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1118 rcu_read_unlock();
600a8749
AM
1119
1120 if (hci_is_blocked_key(hdev,
1121 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1122 k->val)) {
1123 bt_dev_warn_ratelimited(hdev,
1124 "Link key blocked for %pMR",
1125 &k->bdaddr);
1126 return NULL;
1127 }
1128
55ed8ca1 1129 return k;
0378b597
JH
1130 }
1131 }
1132 rcu_read_unlock();
55ed8ca1
JH
1133
1134 return NULL;
1135}
1136
745c0ce3 1137static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1138 u8 key_type, u8 old_key_type)
d25e28ab
JH
1139{
1140 /* Legacy key */
1141 if (key_type < 0x03)
745c0ce3 1142 return true;
d25e28ab
JH
1143
1144 /* Debug keys are insecure so don't store them persistently */
1145 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1146 return false;
d25e28ab
JH
1147
1148 /* Changed combination key and there's no previous one */
1149 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1150 return false;
d25e28ab
JH
1151
1152 /* Security mode 3 case */
1153 if (!conn)
745c0ce3 1154 return true;
d25e28ab 1155
e3befab9
JH
1156 /* BR/EDR key derived using SC from an LE link */
1157 if (conn->type == LE_LINK)
1158 return true;
1159
d25e28ab
JH
1160 /* Neither local nor remote side had no-bonding as requirement */
1161 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1162 return true;
d25e28ab
JH
1163
1164 /* Local side had dedicated bonding as requirement */
1165 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1166 return true;
d25e28ab
JH
1167
1168 /* Remote side had dedicated bonding as requirement */
1169 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1170 return true;
d25e28ab
JH
1171
1172 /* If none of the above criteria match, then don't store the key
1173 * persistently */
745c0ce3 1174 return false;
d25e28ab
JH
1175}
1176
e804d25d 1177static u8 ltk_role(u8 type)
98a0b845 1178{
e804d25d
JH
1179 if (type == SMP_LTK)
1180 return HCI_ROLE_MASTER;
98a0b845 1181
e804d25d 1182 return HCI_ROLE_SLAVE;
98a0b845
JH
1183}
1184
f3a73d97
JH
1185struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1186 u8 addr_type, u8 role)
75d262c2 1187{
c9839a11 1188 struct smp_ltk *k;
75d262c2 1189
970d0f1b
JH
1190 rcu_read_lock();
1191 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
1192 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1193 continue;
1194
923e2414 1195 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 1196 rcu_read_unlock();
600a8749
AM
1197
1198 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1199 k->val)) {
1200 bt_dev_warn_ratelimited(hdev,
1201 "LTK blocked for %pMR",
1202 &k->bdaddr);
1203 return NULL;
1204 }
1205
75d262c2 1206 return k;
970d0f1b
JH
1207 }
1208 }
1209 rcu_read_unlock();
75d262c2
VCG
1210
1211 return NULL;
1212}
75d262c2 1213
970c4e46
JH
1214struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1215{
600a8749 1216 struct smp_irk *irk_to_return = NULL;
970c4e46
JH
1217 struct smp_irk *irk;
1218
adae20cb
JH
1219 rcu_read_lock();
1220 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1221 if (!bacmp(&irk->rpa, rpa)) {
600a8749
AM
1222 irk_to_return = irk;
1223 goto done;
adae20cb 1224 }
970c4e46
JH
1225 }
1226
adae20cb 1227 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 1228 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 1229 bacpy(&irk->rpa, rpa);
600a8749
AM
1230 irk_to_return = irk;
1231 goto done;
970c4e46
JH
1232 }
1233 }
600a8749
AM
1234
1235done:
1236 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1237 irk_to_return->val)) {
1238 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1239 &irk_to_return->bdaddr);
1240 irk_to_return = NULL;
1241 }
1242
adae20cb 1243 rcu_read_unlock();
970c4e46 1244
600a8749 1245 return irk_to_return;
970c4e46
JH
1246}
1247
1248struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1249 u8 addr_type)
1250{
600a8749 1251 struct smp_irk *irk_to_return = NULL;
970c4e46
JH
1252 struct smp_irk *irk;
1253
6cfc9988
JH
1254 /* Identity Address must be public or static random */
1255 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1256 return NULL;
1257
adae20cb
JH
1258 rcu_read_lock();
1259 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 1260 if (addr_type == irk->addr_type &&
adae20cb 1261 bacmp(bdaddr, &irk->bdaddr) == 0) {
600a8749
AM
1262 irk_to_return = irk;
1263 goto done;
adae20cb 1264 }
970c4e46 1265 }
600a8749
AM
1266
1267done:
1268
1269 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1270 irk_to_return->val)) {
1271 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1272 &irk_to_return->bdaddr);
1273 irk_to_return = NULL;
1274 }
1275
adae20cb 1276 rcu_read_unlock();
970c4e46 1277
600a8749 1278 return irk_to_return;
970c4e46
JH
1279}
1280
567fa2aa 1281struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
1282 bdaddr_t *bdaddr, u8 *val, u8 type,
1283 u8 pin_len, bool *persistent)
55ed8ca1
JH
1284{
1285 struct link_key *key, *old_key;
745c0ce3 1286 u8 old_key_type;
55ed8ca1
JH
1287
1288 old_key = hci_find_link_key(hdev, bdaddr);
1289 if (old_key) {
1290 old_key_type = old_key->type;
1291 key = old_key;
1292 } else {
12adcf3a 1293 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 1294 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 1295 if (!key)
567fa2aa 1296 return NULL;
0378b597 1297 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
1298 }
1299
6ed93dc6 1300 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1301
d25e28ab
JH
1302 /* Some buggy controller combinations generate a changed
1303 * combination key for legacy pairing even when there's no
1304 * previous key */
1305 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1306 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1307 type = HCI_LK_COMBINATION;
655fe6ec
JH
1308 if (conn)
1309 conn->key_type = type;
1310 }
d25e28ab 1311
55ed8ca1 1312 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1313 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1314 key->pin_len = pin_len;
1315
b6020ba0 1316 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1317 key->type = old_key_type;
4748fed2
JH
1318 else
1319 key->type = type;
1320
7652ff6a
JH
1321 if (persistent)
1322 *persistent = hci_persistent_key(hdev, conn, type,
1323 old_key_type);
4df378a1 1324
567fa2aa 1325 return key;
55ed8ca1
JH
1326}
1327
ca9142b8 1328struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 1329 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 1330 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 1331{
c9839a11 1332 struct smp_ltk *key, *old_key;
e804d25d 1333 u8 role = ltk_role(type);
75d262c2 1334
f3a73d97 1335 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 1336 if (old_key)
75d262c2 1337 key = old_key;
c9839a11 1338 else {
0a14ab41 1339 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 1340 if (!key)
ca9142b8 1341 return NULL;
970d0f1b 1342 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1343 }
1344
75d262c2 1345 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1346 key->bdaddr_type = addr_type;
1347 memcpy(key->val, tk, sizeof(key->val));
1348 key->authenticated = authenticated;
1349 key->ediv = ediv;
fe39c7b2 1350 key->rand = rand;
c9839a11
VCG
1351 key->enc_size = enc_size;
1352 key->type = type;
75d262c2 1353
ca9142b8 1354 return key;
75d262c2
VCG
1355}
1356
ca9142b8
JH
1357struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1358 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
1359{
1360 struct smp_irk *irk;
1361
1362 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1363 if (!irk) {
1364 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1365 if (!irk)
ca9142b8 1366 return NULL;
970c4e46
JH
1367
1368 bacpy(&irk->bdaddr, bdaddr);
1369 irk->addr_type = addr_type;
1370
adae20cb 1371 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
1372 }
1373
1374 memcpy(irk->val, val, 16);
1375 bacpy(&irk->rpa, rpa);
1376
ca9142b8 1377 return irk;
970c4e46
JH
1378}
1379
55ed8ca1
JH
1380int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1381{
1382 struct link_key *key;
1383
1384 key = hci_find_link_key(hdev, bdaddr);
1385 if (!key)
1386 return -ENOENT;
1387
6ed93dc6 1388 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 1389
0378b597
JH
1390 list_del_rcu(&key->list);
1391 kfree_rcu(key, rcu);
55ed8ca1
JH
1392
1393 return 0;
1394}
1395
e0b2b27e 1396int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 1397{
970d0f1b 1398 struct smp_ltk *k;
c51ffa0b 1399 int removed = 0;
b899efaf 1400
970d0f1b 1401 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 1402 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
1403 continue;
1404
6ed93dc6 1405 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 1406
970d0f1b
JH
1407 list_del_rcu(&k->list);
1408 kfree_rcu(k, rcu);
c51ffa0b 1409 removed++;
b899efaf
VCG
1410 }
1411
c51ffa0b 1412 return removed ? 0 : -ENOENT;
b899efaf
VCG
1413}
1414
a7ec7338
JH
1415void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1416{
adae20cb 1417 struct smp_irk *k;
a7ec7338 1418
adae20cb 1419 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
1420 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1421 continue;
1422
1423 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1424
adae20cb
JH
1425 list_del_rcu(&k->list);
1426 kfree_rcu(k, rcu);
a7ec7338
JH
1427 }
1428}
1429
55e76b38
JH
1430bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1431{
1432 struct smp_ltk *k;
4ba9faf3 1433 struct smp_irk *irk;
55e76b38
JH
1434 u8 addr_type;
1435
1436 if (type == BDADDR_BREDR) {
1437 if (hci_find_link_key(hdev, bdaddr))
1438 return true;
1439 return false;
1440 }
1441
1442 /* Convert to HCI addr type which struct smp_ltk uses */
1443 if (type == BDADDR_LE_PUBLIC)
1444 addr_type = ADDR_LE_DEV_PUBLIC;
1445 else
1446 addr_type = ADDR_LE_DEV_RANDOM;
1447
4ba9faf3
JH
1448 irk = hci_get_irk(hdev, bdaddr, addr_type);
1449 if (irk) {
1450 bdaddr = &irk->bdaddr;
1451 addr_type = irk->addr_type;
1452 }
1453
55e76b38
JH
1454 rcu_read_lock();
1455 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
1456 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1457 rcu_read_unlock();
55e76b38 1458 return true;
87c8b28d 1459 }
55e76b38
JH
1460 }
1461 rcu_read_unlock();
1462
1463 return false;
1464}
1465
6bd32326 1466/* HCI command timer function */
65cc2b49 1467static void hci_cmd_timeout(struct work_struct *work)
6bd32326 1468{
65cc2b49
MH
1469 struct hci_dev *hdev = container_of(work, struct hci_dev,
1470 cmd_timer.work);
6bd32326 1471
bda4f23a
AE
1472 if (hdev->sent_cmd) {
1473 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1474 u16 opcode = __le16_to_cpu(sent->opcode);
1475
2064ee33 1476 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
bda4f23a 1477 } else {
2064ee33 1478 bt_dev_err(hdev, "command tx timeout");
bda4f23a
AE
1479 }
1480
e2bef384
RJ
1481 if (hdev->cmd_timeout)
1482 hdev->cmd_timeout(hdev);
1483
6bd32326 1484 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1485 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1486}
1487
de75cd0d
MM
1488/* HCI ncmd timer function */
1489static void hci_ncmd_timeout(struct work_struct *work)
1490{
1491 struct hci_dev *hdev = container_of(work, struct hci_dev,
1492 ncmd_timer.work);
1493
1494 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1495
1496 /* During HCI_INIT phase no events can be injected if the ncmd timer
1497 * triggers since the procedure has its own timeout handling.
1498 */
1499 if (test_bit(HCI_INIT, &hdev->flags))
1500 return;
1501
1502 /* This is an irrecoverable state, inject hardware error event */
1503 hci_reset_dev(hdev);
1504}
1505
2763eda6 1506struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 1507 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
1508{
1509 struct oob_data *data;
1510
6928a924
JH
1511 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1512 if (bacmp(bdaddr, &data->bdaddr) != 0)
1513 continue;
1514 if (data->bdaddr_type != bdaddr_type)
1515 continue;
1516 return data;
1517 }
2763eda6
SJ
1518
1519 return NULL;
1520}
1521
6928a924
JH
1522int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1523 u8 bdaddr_type)
2763eda6
SJ
1524{
1525 struct oob_data *data;
1526
6928a924 1527 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
1528 if (!data)
1529 return -ENOENT;
1530
6928a924 1531 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
1532
1533 list_del(&data->list);
1534 kfree(data);
1535
1536 return 0;
1537}
1538
35f7498a 1539void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
1540{
1541 struct oob_data *data, *n;
1542
1543 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1544 list_del(&data->list);
1545 kfree(data);
1546 }
2763eda6
SJ
1547}
1548
0798872e 1549int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 1550 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 1551 u8 *hash256, u8 *rand256)
2763eda6
SJ
1552{
1553 struct oob_data *data;
1554
6928a924 1555 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 1556 if (!data) {
0a14ab41 1557 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
1558 if (!data)
1559 return -ENOMEM;
1560
1561 bacpy(&data->bdaddr, bdaddr);
6928a924 1562 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
1563 list_add(&data->list, &hdev->remote_oob_data);
1564 }
1565
81328d5c
JH
1566 if (hash192 && rand192) {
1567 memcpy(data->hash192, hash192, sizeof(data->hash192));
1568 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
1569 if (hash256 && rand256)
1570 data->present = 0x03;
81328d5c
JH
1571 } else {
1572 memset(data->hash192, 0, sizeof(data->hash192));
1573 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
1574 if (hash256 && rand256)
1575 data->present = 0x02;
1576 else
1577 data->present = 0x00;
0798872e
MH
1578 }
1579
81328d5c
JH
1580 if (hash256 && rand256) {
1581 memcpy(data->hash256, hash256, sizeof(data->hash256));
1582 memcpy(data->rand256, rand256, sizeof(data->rand256));
1583 } else {
1584 memset(data->hash256, 0, sizeof(data->hash256));
1585 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
1586 if (hash192 && rand192)
1587 data->present = 0x01;
81328d5c 1588 }
0798872e 1589
6ed93dc6 1590 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1591
1592 return 0;
1593}
1594
d2609b34
FG
1595/* This function requires the caller holds hdev->lock */
1596struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1597{
1598 struct adv_info *adv_instance;
1599
1600 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1601 if (adv_instance->instance == instance)
1602 return adv_instance;
1603 }
1604
1605 return NULL;
1606}
1607
1608/* This function requires the caller holds hdev->lock */
74b93e9f
PK
1609struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1610{
d2609b34
FG
1611 struct adv_info *cur_instance;
1612
1613 cur_instance = hci_find_adv_instance(hdev, instance);
1614 if (!cur_instance)
1615 return NULL;
1616
1617 if (cur_instance == list_last_entry(&hdev->adv_instances,
1618 struct adv_info, list))
1619 return list_first_entry(&hdev->adv_instances,
1620 struct adv_info, list);
1621 else
1622 return list_next_entry(cur_instance, list);
1623}
1624
1625/* This function requires the caller holds hdev->lock */
1626int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1627{
1628 struct adv_info *adv_instance;
1629
1630 adv_instance = hci_find_adv_instance(hdev, instance);
1631 if (!adv_instance)
1632 return -ENOENT;
1633
1634 BT_DBG("%s removing %dMR", hdev->name, instance);
1635
cab054ab
JH
1636 if (hdev->cur_adv_instance == instance) {
1637 if (hdev->adv_instance_timeout) {
1638 cancel_delayed_work(&hdev->adv_instance_expire);
1639 hdev->adv_instance_timeout = 0;
1640 }
1641 hdev->cur_adv_instance = 0x00;
5d900e46
FG
1642 }
1643
a73c046a
JK
1644 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1645
d2609b34
FG
1646 list_del(&adv_instance->list);
1647 kfree(adv_instance);
1648
1649 hdev->adv_instance_cnt--;
1650
1651 return 0;
1652}
1653
a73c046a
JK
1654void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1655{
1656 struct adv_info *adv_instance, *n;
1657
1658 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1659 adv_instance->rpa_expired = rpa_expired;
1660}
1661
d2609b34
FG
1662/* This function requires the caller holds hdev->lock */
1663void hci_adv_instances_clear(struct hci_dev *hdev)
1664{
1665 struct adv_info *adv_instance, *n;
1666
5d900e46
FG
1667 if (hdev->adv_instance_timeout) {
1668 cancel_delayed_work(&hdev->adv_instance_expire);
1669 hdev->adv_instance_timeout = 0;
1670 }
1671
d2609b34 1672 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
a73c046a 1673 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
d2609b34
FG
1674 list_del(&adv_instance->list);
1675 kfree(adv_instance);
1676 }
1677
1678 hdev->adv_instance_cnt = 0;
cab054ab 1679 hdev->cur_adv_instance = 0x00;
d2609b34
FG
1680}
1681
a73c046a
JK
1682static void adv_instance_rpa_expired(struct work_struct *work)
1683{
1684 struct adv_info *adv_instance = container_of(work, struct adv_info,
1685 rpa_expired_cb.work);
1686
1687 BT_DBG("");
1688
1689 adv_instance->rpa_expired = true;
1690}
1691
d2609b34
FG
1692/* This function requires the caller holds hdev->lock */
1693int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
1694 u16 adv_data_len, u8 *adv_data,
1695 u16 scan_rsp_len, u8 *scan_rsp_data,
9bf9f4b6
DW
1696 u16 timeout, u16 duration, s8 tx_power,
1697 u32 min_interval, u32 max_interval)
d2609b34
FG
1698{
1699 struct adv_info *adv_instance;
1700
1701 adv_instance = hci_find_adv_instance(hdev, instance);
1702 if (adv_instance) {
1703 memset(adv_instance->adv_data, 0,
1704 sizeof(adv_instance->adv_data));
1705 memset(adv_instance->scan_rsp_data, 0,
1706 sizeof(adv_instance->scan_rsp_data));
1707 } else {
1d0fac2c 1708 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
87597482 1709 instance < 1 || instance > hdev->le_num_of_adv_sets)
d2609b34
FG
1710 return -EOVERFLOW;
1711
39ecfad6 1712 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
1713 if (!adv_instance)
1714 return -ENOMEM;
1715
fffd38bc 1716 adv_instance->pending = true;
d2609b34
FG
1717 adv_instance->instance = instance;
1718 list_add(&adv_instance->list, &hdev->adv_instances);
1719 hdev->adv_instance_cnt++;
1720 }
1721
1722 adv_instance->flags = flags;
1723 adv_instance->adv_data_len = adv_data_len;
1724 adv_instance->scan_rsp_len = scan_rsp_len;
9bf9f4b6
DW
1725 adv_instance->min_interval = min_interval;
1726 adv_instance->max_interval = max_interval;
1727 adv_instance->tx_power = tx_power;
d2609b34
FG
1728
1729 if (adv_data_len)
1730 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1731
1732 if (scan_rsp_len)
1733 memcpy(adv_instance->scan_rsp_data,
1734 scan_rsp_data, scan_rsp_len);
1735
1736 adv_instance->timeout = timeout;
5d900e46 1737 adv_instance->remaining_time = timeout;
d2609b34
FG
1738
1739 if (duration == 0)
10873f99 1740 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
d2609b34
FG
1741 else
1742 adv_instance->duration = duration;
1743
a73c046a
JK
1744 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
1745 adv_instance_rpa_expired);
1746
d2609b34
FG
1747 BT_DBG("%s for %dMR", hdev->name, instance);
1748
1749 return 0;
1750}
1751
31aab5c2
DW
1752/* This function requires the caller holds hdev->lock */
1753int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1754 u16 adv_data_len, u8 *adv_data,
1755 u16 scan_rsp_len, u8 *scan_rsp_data)
1756{
1757 struct adv_info *adv_instance;
1758
1759 adv_instance = hci_find_adv_instance(hdev, instance);
1760
1761 /* If advertisement doesn't exist, we can't modify its data */
1762 if (!adv_instance)
1763 return -ENOENT;
1764
1765 if (adv_data_len) {
1766 memset(adv_instance->adv_data, 0,
1767 sizeof(adv_instance->adv_data));
1768 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1769 adv_instance->adv_data_len = adv_data_len;
1770 }
1771
1772 if (scan_rsp_len) {
1773 memset(adv_instance->scan_rsp_data, 0,
1774 sizeof(adv_instance->scan_rsp_data));
1775 memcpy(adv_instance->scan_rsp_data,
1776 scan_rsp_data, scan_rsp_len);
1777 adv_instance->scan_rsp_len = scan_rsp_len;
1778 }
1779
1780 return 0;
1781}
1782
01ce70b0
LAD
1783/* This function requires the caller holds hdev->lock */
1784u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1785{
1786 u32 flags;
1787 struct adv_info *adv;
1788
1789 if (instance == 0x00) {
1790 /* Instance 0 always manages the "Tx Power" and "Flags"
1791 * fields
1792 */
1793 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1794
1795 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1796 * corresponds to the "connectable" instance flag.
1797 */
1798 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1799 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1800
1801 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1802 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1803 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1804 flags |= MGMT_ADV_FLAG_DISCOV;
1805
1806 return flags;
1807 }
1808
1809 adv = hci_find_adv_instance(hdev, instance);
1810
1811 /* Return 0 when we got an invalid instance identifier. */
1812 if (!adv)
1813 return 0;
1814
1815 return adv->flags;
1816}
1817
1818bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1819{
1820 struct adv_info *adv;
1821
1822 /* Instance 0x00 always set local name */
1823 if (instance == 0x00)
1824 return true;
1825
1826 adv = hci_find_adv_instance(hdev, instance);
1827 if (!adv)
1828 return false;
1829
1830 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1831 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1832 return true;
1833
1834 return adv->scan_rsp_len ? true : false;
1835}
1836
e5e1e7fd
MC
1837/* This function requires the caller holds hdev->lock */
1838void hci_adv_monitors_clear(struct hci_dev *hdev)
1839{
b139553d
MC
1840 struct adv_monitor *monitor;
1841 int handle;
1842
1843 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
66bd095a 1844 hci_free_adv_monitor(hdev, monitor);
b139553d 1845
e5e1e7fd
MC
1846 idr_destroy(&hdev->adv_monitors_idr);
1847}
1848
66bd095a
AP
1849/* Frees the monitor structure and do some bookkeepings.
1850 * This function requires the caller holds hdev->lock.
1851 */
1852void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
b139553d
MC
1853{
1854 struct adv_pattern *pattern;
1855 struct adv_pattern *tmp;
1856
1857 if (!monitor)
1858 return;
1859
66bd095a
AP
1860 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1861 list_del(&pattern->list);
b139553d 1862 kfree(pattern);
66bd095a
AP
1863 }
1864
1865 if (monitor->handle)
1866 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1867
1868 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1869 hdev->adv_monitors_cnt--;
1870 mgmt_adv_monitor_removed(hdev, monitor->handle);
1871 }
b139553d
MC
1872
1873 kfree(monitor);
1874}
1875
a2a4dedf
AP
1876int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
1877{
1878 return mgmt_add_adv_patterns_monitor_complete(hdev, status);
1879}
1880
66bd095a
AP
1881int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
1882{
1883 return mgmt_remove_adv_monitor_complete(hdev, status);
1884}
1885
a2a4dedf
AP
1886/* Assigns handle to a monitor, and if offloading is supported and power is on,
1887 * also attempts to forward the request to the controller.
1888 * Returns true if request is forwarded (result is pending), false otherwise.
1889 * This function requires the caller holds hdev->lock.
1890 */
1891bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
1892 int *err)
b139553d
MC
1893{
1894 int min, max, handle;
1895
a2a4dedf
AP
1896 *err = 0;
1897
1898 if (!monitor) {
1899 *err = -EINVAL;
1900 return false;
1901 }
b139553d
MC
1902
1903 min = HCI_MIN_ADV_MONITOR_HANDLE;
1904 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1905 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1906 GFP_KERNEL);
a2a4dedf
AP
1907 if (handle < 0) {
1908 *err = handle;
1909 return false;
1910 }
b139553d 1911
b139553d 1912 monitor->handle = handle;
8208f5a9 1913
a2a4dedf
AP
1914 if (!hdev_is_powered(hdev))
1915 return false;
8208f5a9 1916
a2a4dedf
AP
1917 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1918 case HCI_ADV_MONITOR_EXT_NONE:
5bee2fd6 1919 hci_update_passive_scan(hdev);
a2a4dedf
AP
1920 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
1921 /* Message was not forwarded to controller - not an error */
1922 return false;
1923 case HCI_ADV_MONITOR_EXT_MSFT:
1924 *err = msft_add_monitor_pattern(hdev, monitor);
1925 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
1926 *err);
1927 break;
1928 }
1929
1930 return (*err == 0);
b139553d
MC
1931}
1932
66bd095a
AP
1933/* Attempts to tell the controller and free the monitor. If somehow the
1934 * controller doesn't have a corresponding handle, remove anyway.
1935 * Returns true if request is forwarded (result is pending), false otherwise.
1936 * This function requires the caller holds hdev->lock.
1937 */
1938static bool hci_remove_adv_monitor(struct hci_dev *hdev,
1939 struct adv_monitor *monitor,
1940 u16 handle, int *err)
bd2fbc6c 1941{
66bd095a 1942 *err = 0;
bd2fbc6c 1943
66bd095a
AP
1944 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1945 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1946 goto free_monitor;
1947 case HCI_ADV_MONITOR_EXT_MSFT:
1948 *err = msft_remove_monitor(hdev, monitor, handle);
1949 break;
1950 }
bd2fbc6c 1951
66bd095a
AP
1952 /* In case no matching handle registered, just free the monitor */
1953 if (*err == -ENOENT)
1954 goto free_monitor;
1955
1956 return (*err == 0);
1957
1958free_monitor:
1959 if (*err == -ENOENT)
1960 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1961 monitor->handle);
1962 hci_free_adv_monitor(hdev, monitor);
1963
1964 *err = 0;
1965 return false;
bd2fbc6c
MC
1966}
1967
66bd095a
AP
1968/* Returns true if request is forwarded (result is pending), false otherwise.
1969 * This function requires the caller holds hdev->lock.
1970 */
1971bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
1972{
1973 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1974 bool pending;
1975
1976 if (!monitor) {
1977 *err = -EINVAL;
1978 return false;
1979 }
1980
1981 pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
1982 if (!*err && !pending)
5bee2fd6 1983 hci_update_passive_scan(hdev);
66bd095a
AP
1984
1985 bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
1986 hdev->name, handle, *err, pending ? "" : "not ");
1987
1988 return pending;
1989}
1990
1991/* Returns true if request is forwarded (result is pending), false otherwise.
1992 * This function requires the caller holds hdev->lock.
1993 */
1994bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
bd2fbc6c
MC
1995{
1996 struct adv_monitor *monitor;
66bd095a
AP
1997 int idr_next_id = 0;
1998 bool pending = false;
1999 bool update = false;
bd2fbc6c 2000
66bd095a
AP
2001 *err = 0;
2002
2003 while (!*err && !pending) {
2004 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
bd2fbc6c 2005 if (!monitor)
66bd095a 2006 break;
bd2fbc6c 2007
66bd095a
AP
2008 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
2009
2010 if (!*err && !pending)
2011 update = true;
bd2fbc6c
MC
2012 }
2013
66bd095a 2014 if (update)
5bee2fd6 2015 hci_update_passive_scan(hdev);
8208f5a9 2016
66bd095a
AP
2017 bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
2018 hdev->name, *err, pending ? "" : "not ");
2019
2020 return pending;
bd2fbc6c
MC
2021}
2022
8208f5a9
MC
2023/* This function requires the caller holds hdev->lock */
2024bool hci_is_adv_monitoring(struct hci_dev *hdev)
2025{
2026 return !idr_is_empty(&hdev->adv_monitors_idr);
2027}
2028
a2a4dedf
AP
2029int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2030{
2031 if (msft_monitor_supported(hdev))
2032 return HCI_ADV_MONITOR_EXT_MSFT;
2033
2034 return HCI_ADV_MONITOR_EXT_NONE;
2035}
2036
dcc36c16 2037struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2038 bdaddr_t *bdaddr, u8 type)
b2a66aad 2039{
8035ded4 2040 struct bdaddr_list *b;
b2a66aad 2041
dcc36c16 2042 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2043 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2044 return b;
b9ee0a78 2045 }
b2a66aad
AJ
2046
2047 return NULL;
2048}
2049
b950aa88
AN
2050struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2051 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2052 u8 type)
2053{
2054 struct bdaddr_list_with_irk *b;
2055
2056 list_for_each_entry(b, bdaddr_list, list) {
2057 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2058 return b;
2059 }
2060
2061 return NULL;
2062}
2063
8baaa403
APS
2064struct bdaddr_list_with_flags *
2065hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2066 bdaddr_t *bdaddr, u8 type)
2067{
2068 struct bdaddr_list_with_flags *b;
2069
2070 list_for_each_entry(b, bdaddr_list, list) {
2071 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2072 return b;
2073 }
2074
2075 return NULL;
2076}
2077
dcc36c16 2078void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad 2079{
7eb7404f 2080 struct bdaddr_list *b, *n;
b2a66aad 2081
7eb7404f
GT
2082 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2083 list_del(&b->list);
b2a66aad
AJ
2084 kfree(b);
2085 }
b2a66aad
AJ
2086}
2087
dcc36c16 2088int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2089{
2090 struct bdaddr_list *entry;
b2a66aad 2091
b9ee0a78 2092 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2093 return -EBADF;
2094
dcc36c16 2095 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2096 return -EEXIST;
b2a66aad 2097
27f70f3e 2098 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2099 if (!entry)
2100 return -ENOMEM;
b2a66aad
AJ
2101
2102 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2103 entry->bdaddr_type = type;
b2a66aad 2104
dcc36c16 2105 list_add(&entry->list, list);
b2a66aad 2106
2a8357f2 2107 return 0;
b2a66aad
AJ
2108}
2109
b950aa88
AN
2110int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2111 u8 type, u8 *peer_irk, u8 *local_irk)
2112{
2113 struct bdaddr_list_with_irk *entry;
2114
2115 if (!bacmp(bdaddr, BDADDR_ANY))
2116 return -EBADF;
2117
2118 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2119 return -EEXIST;
2120
2121 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2122 if (!entry)
2123 return -ENOMEM;
2124
2125 bacpy(&entry->bdaddr, bdaddr);
2126 entry->bdaddr_type = type;
2127
2128 if (peer_irk)
2129 memcpy(entry->peer_irk, peer_irk, 16);
2130
2131 if (local_irk)
2132 memcpy(entry->local_irk, local_irk, 16);
2133
2134 list_add(&entry->list, list);
2135
2136 return 0;
2137}
2138
8baaa403
APS
2139int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2140 u8 type, u32 flags)
2141{
2142 struct bdaddr_list_with_flags *entry;
2143
2144 if (!bacmp(bdaddr, BDADDR_ANY))
2145 return -EBADF;
2146
2147 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2148 return -EEXIST;
2149
2150 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2151 if (!entry)
2152 return -ENOMEM;
2153
2154 bacpy(&entry->bdaddr, bdaddr);
2155 entry->bdaddr_type = type;
fe92ee64 2156 bitmap_from_u64(entry->flags, flags);
8baaa403
APS
2157
2158 list_add(&entry->list, list);
2159
2160 return 0;
2161}
2162
dcc36c16 2163int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2164{
2165 struct bdaddr_list *entry;
b2a66aad 2166
35f7498a 2167 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2168 hci_bdaddr_list_clear(list);
35f7498a
JH
2169 return 0;
2170 }
b2a66aad 2171
dcc36c16 2172 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2173 if (!entry)
2174 return -ENOENT;
2175
2176 list_del(&entry->list);
2177 kfree(entry);
2178
2179 return 0;
2180}
2181
b950aa88
AN
2182int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2183 u8 type)
2184{
2185 struct bdaddr_list_with_irk *entry;
2186
2187 if (!bacmp(bdaddr, BDADDR_ANY)) {
2188 hci_bdaddr_list_clear(list);
2189 return 0;
2190 }
2191
2192 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2193 if (!entry)
2194 return -ENOENT;
2195
2196 list_del(&entry->list);
2197 kfree(entry);
2198
2199 return 0;
2200}
2201
8baaa403
APS
2202int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2203 u8 type)
2204{
2205 struct bdaddr_list_with_flags *entry;
2206
2207 if (!bacmp(bdaddr, BDADDR_ANY)) {
2208 hci_bdaddr_list_clear(list);
2209 return 0;
2210 }
2211
2212 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2213 if (!entry)
2214 return -ENOENT;
2215
2216 list_del(&entry->list);
2217 kfree(entry);
2218
2219 return 0;
2220}
2221
15819a70
AG
2222/* This function requires the caller holds hdev->lock */
2223struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2224 bdaddr_t *addr, u8 addr_type)
2225{
2226 struct hci_conn_params *params;
2227
2228 list_for_each_entry(params, &hdev->le_conn_params, list) {
2229 if (bacmp(&params->addr, addr) == 0 &&
2230 params->addr_type == addr_type) {
2231 return params;
2232 }
2233 }
2234
2235 return NULL;
2236}
2237
4b10966f 2238/* This function requires the caller holds hdev->lock */
501f8827
JH
2239struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2240 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2241{
912b42ef 2242 struct hci_conn_params *param;
a9b0a04c 2243
501f8827 2244 list_for_each_entry(param, list, action) {
912b42ef
JH
2245 if (bacmp(&param->addr, addr) == 0 &&
2246 param->addr_type == addr_type)
2247 return param;
4b10966f
MH
2248 }
2249
2250 return NULL;
a9b0a04c
AG
2251}
2252
15819a70 2253/* This function requires the caller holds hdev->lock */
51d167c0
MH
2254struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2255 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2256{
2257 struct hci_conn_params *params;
2258
2259 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2260 if (params)
51d167c0 2261 return params;
15819a70
AG
2262
2263 params = kzalloc(sizeof(*params), GFP_KERNEL);
2264 if (!params) {
2064ee33 2265 bt_dev_err(hdev, "out of memory");
51d167c0 2266 return NULL;
15819a70
AG
2267 }
2268
2269 bacpy(&params->addr, addr);
2270 params->addr_type = addr_type;
cef952ce
AG
2271
2272 list_add(&params->list, &hdev->le_conn_params);
93450c75 2273 INIT_LIST_HEAD(&params->action);
cef952ce 2274
bf5b3c8b
MH
2275 params->conn_min_interval = hdev->le_conn_min_interval;
2276 params->conn_max_interval = hdev->le_conn_max_interval;
2277 params->conn_latency = hdev->le_conn_latency;
2278 params->supervision_timeout = hdev->le_supv_timeout;
2279 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2280
2281 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2282
51d167c0 2283 return params;
bf5b3c8b
MH
2284}
2285
f6c63249 2286static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2287{
f8aaf9b6 2288 if (params->conn) {
f161dd41 2289 hci_conn_drop(params->conn);
f8aaf9b6
JH
2290 hci_conn_put(params->conn);
2291 }
f161dd41 2292
95305baa 2293 list_del(&params->action);
15819a70
AG
2294 list_del(&params->list);
2295 kfree(params);
f6c63249
JH
2296}
2297
2298/* This function requires the caller holds hdev->lock */
2299void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2300{
2301 struct hci_conn_params *params;
2302
2303 params = hci_conn_params_lookup(hdev, addr, addr_type);
2304 if (!params)
2305 return;
2306
2307 hci_conn_params_free(params);
15819a70 2308
5bee2fd6 2309 hci_update_passive_scan(hdev);
95305baa 2310
15819a70
AG
2311 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2312}
2313
2314/* This function requires the caller holds hdev->lock */
55af49a8 2315void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
2316{
2317 struct hci_conn_params *params, *tmp;
2318
2319 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
2320 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2321 continue;
f75113a2 2322
91641b79 2323 /* If trying to establish one time connection to disabled
f75113a2
JP
2324 * device, leave the params, but mark them as just once.
2325 */
2326 if (params->explicit_connect) {
2327 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2328 continue;
2329 }
2330
15819a70
AG
2331 list_del(&params->list);
2332 kfree(params);
2333 }
2334
55af49a8 2335 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
2336}
2337
2338/* This function requires the caller holds hdev->lock */
030e7f81 2339static void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 2340{
15819a70 2341 struct hci_conn_params *params, *tmp;
77a77a30 2342
f6c63249
JH
2343 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2344 hci_conn_params_free(params);
77a77a30 2345
15819a70 2346 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
2347}
2348
a1f4c318
JH
2349/* Copy the Identity Address of the controller.
2350 *
2351 * If the controller has a public BD_ADDR, then by default use that one.
2352 * If this is a LE only controller without a public address, default to
2353 * the static random address.
2354 *
2355 * For debugging purposes it is possible to force controllers with a
2356 * public address to use the static random address instead.
50b5b952
MH
2357 *
2358 * In case BR/EDR has been disabled on a dual-mode controller and
2359 * userspace has configured a static address, then that address
2360 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
2361 */
2362void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2363 u8 *bdaddr_type)
2364{
b7cb93e5 2365 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 2366 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 2367 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 2368 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
2369 bacpy(bdaddr, &hdev->static_addr);
2370 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2371 } else {
2372 bacpy(bdaddr, &hdev->bdaddr);
2373 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2374 }
2375}
2376
2f20216c
APS
2377static void hci_clear_wake_reason(struct hci_dev *hdev)
2378{
2379 hci_dev_lock(hdev);
2380
2381 hdev->wake_reason = 0;
2382 bacpy(&hdev->wake_addr, BDADDR_ANY);
2383 hdev->wake_addr_type = 0;
2384
2385 hci_dev_unlock(hdev);
2386}
2387
9952d90e
APS
2388static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2389 void *data)
2390{
2391 struct hci_dev *hdev =
2392 container_of(nb, struct hci_dev, suspend_notifier);
2393 int ret = 0;
9952d90e 2394
e1b77d68
LAD
2395 if (action == PM_SUSPEND_PREPARE)
2396 ret = hci_suspend_dev(hdev);
2397 else if (action == PM_POST_SUSPEND)
2398 ret = hci_resume_dev(hdev);
2f20216c 2399
a9ec8423
APS
2400 if (ret)
2401 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2402 action, ret);
2403
24b06572 2404 return NOTIFY_DONE;
9952d90e 2405}
8731840a 2406
9be0dab7 2407/* Alloc HCI device */
6ec56613 2408struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
9be0dab7
DH
2409{
2410 struct hci_dev *hdev;
6ec56613 2411 unsigned int alloc_size;
9be0dab7 2412
6ec56613
THJA
2413 alloc_size = sizeof(*hdev);
2414 if (sizeof_priv) {
2415 /* Fixme: May need ALIGN-ment? */
2416 alloc_size += sizeof_priv;
2417 }
9be0dab7 2418
6ec56613 2419 hdev = kzalloc(alloc_size, GFP_KERNEL);
9be0dab7
DH
2420 if (!hdev)
2421 return NULL;
2422
b1b813d4
DH
2423 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2424 hdev->esco_type = (ESCO_HV1);
2425 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
2426 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2427 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 2428 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
2429 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2430 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
2431 hdev->adv_instance_cnt = 0;
2432 hdev->cur_adv_instance = 0x00;
5d900e46 2433 hdev->adv_instance_timeout = 0;
b1b813d4 2434
c4f1f408
HC
2435 hdev->advmon_allowlist_duration = 300;
2436 hdev->advmon_no_filter_duration = 500;
80af16a3 2437 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
c4f1f408 2438
b1b813d4
DH
2439 hdev->sniff_max_interval = 800;
2440 hdev->sniff_min_interval = 80;
2441
3f959d46 2442 hdev->le_adv_channel_map = 0x07;
628531c9
GL
2443 hdev->le_adv_min_interval = 0x0800;
2444 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
2445 hdev->le_scan_interval = 0x0060;
2446 hdev->le_scan_window = 0x0030;
10873f99
AM
2447 hdev->le_scan_int_suspend = 0x0400;
2448 hdev->le_scan_window_suspend = 0x0012;
2449 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2450 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
ba29d036
MH
2451 hdev->le_scan_int_adv_monitor = 0x0060;
2452 hdev->le_scan_window_adv_monitor = 0x0030;
10873f99
AM
2453 hdev->le_scan_int_connect = 0x0060;
2454 hdev->le_scan_window_connect = 0x0060;
b48c3b59
JH
2455 hdev->le_conn_min_interval = 0x0018;
2456 hdev->le_conn_max_interval = 0x0028;
04fb7d90
MH
2457 hdev->le_conn_latency = 0x0000;
2458 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
2459 hdev->le_def_tx_len = 0x001b;
2460 hdev->le_def_tx_time = 0x0148;
2461 hdev->le_max_tx_len = 0x001b;
2462 hdev->le_max_tx_time = 0x0148;
2463 hdev->le_max_rx_len = 0x001b;
2464 hdev->le_max_rx_time = 0x0148;
30d65e08
MK
2465 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2466 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
6decb5b4
JK
2467 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2468 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
1d0fac2c 2469 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
10873f99 2470 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
49b020c1 2471 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
7c395ea5
DW
2472 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2473 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
bef64738 2474
d6bfd59c 2475 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 2476 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
2477 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2478 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
302975cb 2479 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
58a96fc3 2480 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
d6bfd59c 2481
10873f99
AM
2482 /* default 1.28 sec page scan */
2483 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2484 hdev->def_page_scan_int = 0x0800;
2485 hdev->def_page_scan_window = 0x0012;
2486
b1b813d4
DH
2487 mutex_init(&hdev->lock);
2488 mutex_init(&hdev->req_lock);
2489
2490 INIT_LIST_HEAD(&hdev->mgmt_pending);
3d4f9c00
AP
2491 INIT_LIST_HEAD(&hdev->reject_list);
2492 INIT_LIST_HEAD(&hdev->accept_list);
b1b813d4
DH
2493 INIT_LIST_HEAD(&hdev->uuids);
2494 INIT_LIST_HEAD(&hdev->link_keys);
2495 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 2496 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 2497 INIT_LIST_HEAD(&hdev->remote_oob_data);
3d4f9c00 2498 INIT_LIST_HEAD(&hdev->le_accept_list);
cfdb0c2d 2499 INIT_LIST_HEAD(&hdev->le_resolv_list);
15819a70 2500 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 2501 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 2502 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 2503 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 2504 INIT_LIST_HEAD(&hdev->adv_instances);
600a8749 2505 INIT_LIST_HEAD(&hdev->blocked_keys);
b1b813d4 2506
8961987f 2507 INIT_LIST_HEAD(&hdev->local_codecs);
b1b813d4
DH
2508 INIT_WORK(&hdev->rx_work, hci_rx_work);
2509 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2510 INIT_WORK(&hdev->tx_work, hci_tx_work);
2511 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 2512 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 2513
6a98e383
MH
2514 hci_cmd_sync_init(hdev);
2515
b1b813d4 2516 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
b1b813d4 2517
b1b813d4
DH
2518 skb_queue_head_init(&hdev->rx_q);
2519 skb_queue_head_init(&hdev->cmd_q);
2520 skb_queue_head_init(&hdev->raw_q);
2521
2522 init_waitqueue_head(&hdev->req_wait_q);
2523
65cc2b49 2524 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
de75cd0d 2525 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
b1b813d4 2526
5fc16cc4
JH
2527 hci_request_setup(hdev);
2528
b1b813d4
DH
2529 hci_init_sysfs(hdev);
2530 discovery_init(hdev);
9be0dab7
DH
2531
2532 return hdev;
2533}
6ec56613 2534EXPORT_SYMBOL(hci_alloc_dev_priv);
9be0dab7
DH
2535
2536/* Free HCI device */
2537void hci_free_dev(struct hci_dev *hdev)
2538{
9be0dab7
DH
2539 /* will free via device release */
2540 put_device(&hdev->dev);
2541}
2542EXPORT_SYMBOL(hci_free_dev);
2543
1da177e4
LT
2544/* Register HCI device */
2545int hci_register_dev(struct hci_dev *hdev)
2546{
b1b813d4 2547 int id, error;
1da177e4 2548
74292d5a 2549 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
2550 return -EINVAL;
2551
08add513
MM
2552 /* Do not allow HCI_AMP devices to register at index 0,
2553 * so the index can be used as the AMP controller ID.
2554 */
3df92b31 2555 switch (hdev->dev_type) {
ca8bee5d 2556 case HCI_PRIMARY:
3df92b31
SL
2557 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2558 break;
2559 case HCI_AMP:
2560 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2561 break;
2562 default:
2563 return -EINVAL;
1da177e4 2564 }
8e87d142 2565
3df92b31
SL
2566 if (id < 0)
2567 return id;
2568
1da177e4
LT
2569 sprintf(hdev->name, "hci%d", id);
2570 hdev->id = id;
2d8b3a11
AE
2571
2572 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2573
29e2dd0d 2574 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
33ca954d
DH
2575 if (!hdev->workqueue) {
2576 error = -ENOMEM;
2577 goto err;
2578 }
f48fd9c8 2579
29e2dd0d
TH
2580 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2581 hdev->name);
6ead1bbc
JH
2582 if (!hdev->req_workqueue) {
2583 destroy_workqueue(hdev->workqueue);
2584 error = -ENOMEM;
2585 goto err;
2586 }
2587
0153e2ec
MH
2588 if (!IS_ERR_OR_NULL(bt_debugfs))
2589 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2590
bdc3e0f1
MH
2591 dev_set_name(&hdev->dev, "%s", hdev->name);
2592
2593 error = device_add(&hdev->dev);
33ca954d 2594 if (error < 0)
54506918 2595 goto err_wqueue;
1da177e4 2596
6d5d2ee6
HK
2597 hci_leds_init(hdev);
2598
611b30f7 2599 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2600 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2601 hdev);
611b30f7
MH
2602 if (hdev->rfkill) {
2603 if (rfkill_register(hdev->rfkill) < 0) {
2604 rfkill_destroy(hdev->rfkill);
2605 hdev->rfkill = NULL;
2606 }
2607 }
2608
5e130367 2609 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 2610 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 2611
a1536da2
MH
2612 hci_dev_set_flag(hdev, HCI_SETUP);
2613 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 2614
ca8bee5d 2615 if (hdev->dev_type == HCI_PRIMARY) {
56f87901
JH
2616 /* Assume BR/EDR support until proven otherwise (such as
2617 * through reading supported features during init.
2618 */
a1536da2 2619 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 2620 }
ce2be9ac 2621
fcee3377
GP
2622 write_lock(&hci_dev_list_lock);
2623 list_add(&hdev->list, &hci_dev_list);
2624 write_unlock(&hci_dev_list_lock);
2625
4a964404
MH
2626 /* Devices that are marked for raw-only usage are unconfigured
2627 * and should not be included in normal operation.
fee746b0
MH
2628 */
2629 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 2630 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 2631
fe92ee64
LAD
2632 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2633 * callback.
2634 */
2635 if (hdev->wakeup)
2636 set_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, hdev->conn_flags);
2637
05fcd4c4 2638 hci_sock_dev_event(hdev, HCI_DEV_REG);
dc946bd8 2639 hci_dev_hold(hdev);
1da177e4 2640
219991e6
HG
2641 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2642 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2643 error = register_pm_notifier(&hdev->suspend_notifier);
2644 if (error)
2645 goto err_wqueue;
2646 }
9952d90e 2647
19202573 2648 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2649
e5e1e7fd 2650 idr_init(&hdev->adv_monitors_idr);
5031ffcc 2651 msft_register(hdev);
e5e1e7fd 2652
1da177e4 2653 return id;
f48fd9c8 2654
33ca954d 2655err_wqueue:
5a4bb6a8 2656 debugfs_remove_recursive(hdev->debugfs);
33ca954d 2657 destroy_workqueue(hdev->workqueue);
6ead1bbc 2658 destroy_workqueue(hdev->req_workqueue);
33ca954d 2659err:
3df92b31 2660 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 2661
33ca954d 2662 return error;
1da177e4
LT
2663}
2664EXPORT_SYMBOL(hci_register_dev);
2665
2666/* Unregister HCI device */
59735631 2667void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2668{
c13854ce 2669 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2670
a1536da2 2671 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 2672
f20d09d5 2673 write_lock(&hci_dev_list_lock);
1da177e4 2674 list_del(&hdev->list);
f20d09d5 2675 write_unlock(&hci_dev_list_lock);
1da177e4 2676
b9b5ef18
GP
2677 cancel_work_sync(&hdev->power_on);
2678
6a98e383
MH
2679 hci_cmd_sync_clear(hdev);
2680
182ee45d 2681 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks))
219991e6 2682 unregister_pm_notifier(&hdev->suspend_notifier);
4e8c36c3 2683
5031ffcc
MC
2684 msft_unregister(hdev);
2685
4e8c36c3 2686 hci_dev_do_close(hdev);
9952d90e 2687
ab81cbf9 2688 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
2689 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2690 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 2691 hci_dev_lock(hdev);
744cf19e 2692 mgmt_index_removed(hdev);
09fd0de5 2693 hci_dev_unlock(hdev);
56e5cb86 2694 }
ab81cbf9 2695
2e58ef3e
JH
2696 /* mgmt_index_removed should take care of emptying the
2697 * pending list */
2698 BUG_ON(!list_empty(&hdev->mgmt_pending));
2699
05fcd4c4 2700 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
1da177e4 2701
611b30f7
MH
2702 if (hdev->rfkill) {
2703 rfkill_unregister(hdev->rfkill);
2704 rfkill_destroy(hdev->rfkill);
2705 }
2706
bdc3e0f1 2707 device_del(&hdev->dev);
e61fbee7 2708 /* Actual cleanup is deferred until hci_release_dev(). */
e0448092
TH
2709 hci_dev_put(hdev);
2710}
2711EXPORT_SYMBOL(hci_unregister_dev);
147e2d59 2712
58ce6d5b
TH
2713/* Release HCI device */
2714void hci_release_dev(struct hci_dev *hdev)
e0448092 2715{
0153e2ec 2716 debugfs_remove_recursive(hdev->debugfs);
5177a838
MH
2717 kfree_const(hdev->hw_info);
2718 kfree_const(hdev->fw_info);
0153e2ec 2719
f48fd9c8 2720 destroy_workqueue(hdev->workqueue);
6ead1bbc 2721 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2722
09fd0de5 2723 hci_dev_lock(hdev);
3d4f9c00
AP
2724 hci_bdaddr_list_clear(&hdev->reject_list);
2725 hci_bdaddr_list_clear(&hdev->accept_list);
2aeb9a1a 2726 hci_uuids_clear(hdev);
55ed8ca1 2727 hci_link_keys_clear(hdev);
b899efaf 2728 hci_smp_ltks_clear(hdev);
970c4e46 2729 hci_smp_irks_clear(hdev);
2763eda6 2730 hci_remote_oob_data_clear(hdev);
d2609b34 2731 hci_adv_instances_clear(hdev);
e5e1e7fd 2732 hci_adv_monitors_clear(hdev);
3d4f9c00 2733 hci_bdaddr_list_clear(&hdev->le_accept_list);
cfdb0c2d 2734 hci_bdaddr_list_clear(&hdev->le_resolv_list);
373110c5 2735 hci_conn_params_clear_all(hdev);
22078800 2736 hci_discovery_filter_clear(hdev);
600a8749 2737 hci_blocked_keys_clear(hdev);
09fd0de5 2738 hci_dev_unlock(hdev);
e2e0cacb 2739
e0448092 2740 ida_simple_remove(&hci_index_ida, hdev->id);
58ce6d5b 2741 kfree(hdev);
1da177e4 2742}
58ce6d5b 2743EXPORT_SYMBOL(hci_release_dev);
1da177e4
LT
2744
2745/* Suspend HCI device */
2746int hci_suspend_dev(struct hci_dev *hdev)
2747{
e1b77d68 2748 int ret;
e1b77d68
LAD
2749
2750 bt_dev_dbg(hdev, "");
2751
2752 /* Suspend should only act on when powered. */
2753 if (!hdev_is_powered(hdev) ||
2754 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2755 return 0;
2756
182ee45d
LAD
2757 /* If powering down don't attempt to suspend */
2758 if (mgmt_powering_down(hdev))
2759 return 0;
4539ca67 2760
182ee45d
LAD
2761 hci_req_sync_lock(hdev);
2762 ret = hci_suspend_sync(hdev);
2763 hci_req_sync_unlock(hdev);
e1b77d68
LAD
2764
2765 hci_clear_wake_reason(hdev);
182ee45d 2766 mgmt_suspending(hdev, hdev->suspend_state);
e1b77d68 2767
05fcd4c4 2768 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
e1b77d68 2769 return ret;
1da177e4
LT
2770}
2771EXPORT_SYMBOL(hci_suspend_dev);
2772
2773/* Resume HCI device */
2774int hci_resume_dev(struct hci_dev *hdev)
2775{
e1b77d68
LAD
2776 int ret;
2777
2778 bt_dev_dbg(hdev, "");
2779
2780 /* Resume should only act on when powered. */
2781 if (!hdev_is_powered(hdev) ||
2782 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2783 return 0;
2784
2785 /* If powering down don't attempt to resume */
2786 if (mgmt_powering_down(hdev))
2787 return 0;
2788
182ee45d
LAD
2789 hci_req_sync_lock(hdev);
2790 ret = hci_resume_sync(hdev);
2791 hci_req_sync_unlock(hdev);
e1b77d68
LAD
2792
2793 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
182ee45d 2794 hdev->wake_addr_type);
e1b77d68 2795
05fcd4c4 2796 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
e1b77d68 2797 return ret;
1da177e4
LT
2798}
2799EXPORT_SYMBOL(hci_resume_dev);
2800
75e0569f
MH
2801/* Reset HCI device */
2802int hci_reset_dev(struct hci_dev *hdev)
2803{
1e4b6e91 2804 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
75e0569f
MH
2805 struct sk_buff *skb;
2806
2807 skb = bt_skb_alloc(3, GFP_ATOMIC);
2808 if (!skb)
2809 return -ENOMEM;
2810
d79f34e3 2811 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
59ae1d12 2812 skb_put_data(skb, hw_err, 3);
75e0569f 2813
de75cd0d
MM
2814 bt_dev_err(hdev, "Injecting HCI hardware error event");
2815
75e0569f
MH
2816 /* Send Hardware Error to upper stack */
2817 return hci_recv_frame(hdev, skb);
2818}
2819EXPORT_SYMBOL(hci_reset_dev);
2820
76bca880 2821/* Receive frame from HCI drivers */
e1a26170 2822int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 2823{
76bca880 2824 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2825 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2826 kfree_skb(skb);
2827 return -ENXIO;
2828 }
2829
d79f34e3
MH
2830 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
2831 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
cc974003
MH
2832 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
2833 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
fe806dce
MH
2834 kfree_skb(skb);
2835 return -EINVAL;
2836 }
2837
d82603c6 2838 /* Incoming skb */
76bca880
MH
2839 bt_cb(skb)->incoming = 1;
2840
2841 /* Time stamp */
2842 __net_timestamp(skb);
2843
76bca880 2844 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2845 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2846
76bca880
MH
2847 return 0;
2848}
2849EXPORT_SYMBOL(hci_recv_frame);
2850
e875ff84
MH
2851/* Receive diagnostic message from HCI drivers */
2852int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2853{
581d6fd6 2854 /* Mark as diagnostic packet */
d79f34e3 2855 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
581d6fd6 2856
e875ff84
MH
2857 /* Time stamp */
2858 __net_timestamp(skb);
2859
581d6fd6
MH
2860 skb_queue_tail(&hdev->rx_q, skb);
2861 queue_work(hdev->workqueue, &hdev->rx_work);
e875ff84 2862
e875ff84
MH
2863 return 0;
2864}
2865EXPORT_SYMBOL(hci_recv_diag);
2866
5177a838
MH
2867void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2868{
2869 va_list vargs;
2870
2871 va_start(vargs, fmt);
2872 kfree_const(hdev->hw_info);
2873 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2874 va_end(vargs);
2875}
2876EXPORT_SYMBOL(hci_set_hw_info);
2877
2878void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2879{
2880 va_list vargs;
2881
2882 va_start(vargs, fmt);
2883 kfree_const(hdev->fw_info);
2884 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2885 va_end(vargs);
2886}
2887EXPORT_SYMBOL(hci_set_fw_info);
2888
1da177e4
LT
2889/* ---- Interface to upper protocols ---- */
2890
1da177e4
LT
2891int hci_register_cb(struct hci_cb *cb)
2892{
2893 BT_DBG("%p name %s", cb, cb->name);
2894
fba7ecf0 2895 mutex_lock(&hci_cb_list_lock);
00629e0f 2896 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 2897 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
2898
2899 return 0;
2900}
2901EXPORT_SYMBOL(hci_register_cb);
2902
2903int hci_unregister_cb(struct hci_cb *cb)
2904{
2905 BT_DBG("%p name %s", cb, cb->name);
2906
fba7ecf0 2907 mutex_lock(&hci_cb_list_lock);
1da177e4 2908 list_del(&cb->list);
fba7ecf0 2909 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
2910
2911 return 0;
2912}
2913EXPORT_SYMBOL(hci_unregister_cb);
2914
2250abad 2915static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 2916{
cdc52faa
MH
2917 int err;
2918
d79f34e3
MH
2919 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2920 skb->len);
1da177e4 2921
cd82e61c
MH
2922 /* Time stamp */
2923 __net_timestamp(skb);
1da177e4 2924
cd82e61c
MH
2925 /* Send copy to monitor */
2926 hci_send_to_monitor(hdev, skb);
2927
2928 if (atomic_read(&hdev->promisc)) {
2929 /* Send copy to the sockets */
470fe1b5 2930 hci_send_to_sock(hdev, skb);
1da177e4
LT
2931 }
2932
2933 /* Get rid of skb owner, prior to sending to the driver. */
2934 skb_orphan(skb);
2935
73d0d3c8
MH
2936 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
2937 kfree_skb(skb);
2250abad 2938 return -EINVAL;
73d0d3c8
MH
2939 }
2940
cdc52faa
MH
2941 err = hdev->send(hdev, skb);
2942 if (err < 0) {
2064ee33 2943 bt_dev_err(hdev, "sending frame failed (%d)", err);
cdc52faa 2944 kfree_skb(skb);
2250abad 2945 return err;
cdc52faa 2946 }
2250abad
BB
2947
2948 return 0;
1da177e4
LT
2949}
2950
1ca3a9d0 2951/* Send HCI command */
07dc93dd
JH
2952int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2953 const void *param)
1ca3a9d0
JH
2954{
2955 struct sk_buff *skb;
2956
2957 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2958
2959 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2960 if (!skb) {
2064ee33 2961 bt_dev_err(hdev, "no memory for command");
1ca3a9d0
JH
2962 return -ENOMEM;
2963 }
2964
49c922bb 2965 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
2966 * single-command requests.
2967 */
44d27137 2968 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
11714b3d 2969
1da177e4 2970 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2971 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2972
2973 return 0;
2974}
1da177e4 2975
d6ee6ad7
LP
2976int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
2977 const void *param)
2978{
2979 struct sk_buff *skb;
2980
2981 if (hci_opcode_ogf(opcode) != 0x3f) {
2982 /* A controller receiving a command shall respond with either
2983 * a Command Status Event or a Command Complete Event.
2984 * Therefore, all standard HCI commands must be sent via the
2985 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
2986 * Some vendors do not comply with this rule for vendor-specific
2987 * commands and do not return any event. We want to support
2988 * unresponded commands for such cases only.
2989 */
2990 bt_dev_err(hdev, "unresponded command not supported");
2991 return -EINVAL;
2992 }
2993
2994 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2995 if (!skb) {
2996 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
2997 opcode);
2998 return -ENOMEM;
2999 }
3000
3001 hci_send_frame(hdev, skb);
3002
3003 return 0;
3004}
3005EXPORT_SYMBOL(__hci_cmd_send);
3006
1da177e4 3007/* Get data from the previously sent command */
a9de9248 3008void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3009{
3010 struct hci_command_hdr *hdr;
3011
3012 if (!hdev->sent_cmd)
3013 return NULL;
3014
3015 hdr = (void *) hdev->sent_cmd->data;
3016
a9de9248 3017 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3018 return NULL;
3019
f0e09510 3020 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3021
3022 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3023}
3024
3025/* Send ACL data */
3026static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3027{
3028 struct hci_acl_hdr *hdr;
3029 int len = skb->len;
3030
badff6d0
ACM
3031 skb_push(skb, HCI_ACL_HDR_SIZE);
3032 skb_reset_transport_header(skb);
9c70220b 3033 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3034 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3035 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3036}
3037
ee22be7e 3038static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3039 struct sk_buff *skb, __u16 flags)
1da177e4 3040{
ee22be7e 3041 struct hci_conn *conn = chan->conn;
1da177e4
LT
3042 struct hci_dev *hdev = conn->hdev;
3043 struct sk_buff *list;
3044
087bfd99
GP
3045 skb->len = skb_headlen(skb);
3046 skb->data_len = 0;
3047
d79f34e3 3048 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
204a6e54
AE
3049
3050 switch (hdev->dev_type) {
ca8bee5d 3051 case HCI_PRIMARY:
204a6e54
AE
3052 hci_add_acl_hdr(skb, conn->handle, flags);
3053 break;
3054 case HCI_AMP:
3055 hci_add_acl_hdr(skb, chan->handle, flags);
3056 break;
3057 default:
2064ee33 3058 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
204a6e54
AE
3059 return;
3060 }
087bfd99 3061
70f23020
AE
3062 list = skb_shinfo(skb)->frag_list;
3063 if (!list) {
1da177e4
LT
3064 /* Non fragmented */
3065 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3066
73d80deb 3067 skb_queue_tail(queue, skb);
1da177e4
LT
3068 } else {
3069 /* Fragmented */
3070 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3071
3072 skb_shinfo(skb)->frag_list = NULL;
3073
9cfd5a23
JR
3074 /* Queue all fragments atomically. We need to use spin_lock_bh
3075 * here because of 6LoWPAN links, as there this function is
3076 * called from softirq and using normal spin lock could cause
3077 * deadlocks.
3078 */
3079 spin_lock_bh(&queue->lock);
1da177e4 3080
73d80deb 3081 __skb_queue_tail(queue, skb);
e702112f
AE
3082
3083 flags &= ~ACL_START;
3084 flags |= ACL_CONT;
1da177e4
LT
3085 do {
3086 skb = list; list = list->next;
8e87d142 3087
d79f34e3 3088 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
e702112f 3089 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3090
3091 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3092
73d80deb 3093 __skb_queue_tail(queue, skb);
1da177e4
LT
3094 } while (list);
3095
9cfd5a23 3096 spin_unlock_bh(&queue->lock);
1da177e4 3097 }
73d80deb
LAD
3098}
3099
3100void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3101{
ee22be7e 3102 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3103
f0e09510 3104 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3105
ee22be7e 3106 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3107
3eff45ea 3108 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3109}
1da177e4
LT
3110
3111/* Send SCO data */
0d861d8b 3112void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3113{
3114 struct hci_dev *hdev = conn->hdev;
3115 struct hci_sco_hdr hdr;
3116
3117 BT_DBG("%s len %d", hdev->name, skb->len);
3118
aca3192c 3119 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3120 hdr.dlen = skb->len;
3121
badff6d0
ACM
3122 skb_push(skb, HCI_SCO_HDR_SIZE);
3123 skb_reset_transport_header(skb);
9c70220b 3124 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3125
d79f34e3 3126 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
c78ae283 3127
1da177e4 3128 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3129 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3130}
1da177e4
LT
3131
3132/* ---- HCI TX task (outgoing data) ---- */
3133
3134/* HCI Connection scheduler */
6039aa73
GP
3135static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3136 int *quote)
1da177e4
LT
3137{
3138 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3139 struct hci_conn *conn = NULL, *c;
abc5de8f 3140 unsigned int num = 0, min = ~0;
1da177e4 3141
8e87d142 3142 /* We don't have to lock device here. Connections are always
1da177e4 3143 * added and removed with TX task disabled. */
bf4c6325
GP
3144
3145 rcu_read_lock();
3146
3147 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3148 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3149 continue;
769be974
MH
3150
3151 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3152 continue;
3153
1da177e4
LT
3154 num++;
3155
3156 if (c->sent < min) {
3157 min = c->sent;
3158 conn = c;
3159 }
52087a79
LAD
3160
3161 if (hci_conn_num(hdev, type) == num)
3162 break;
1da177e4
LT
3163 }
3164
bf4c6325
GP
3165 rcu_read_unlock();
3166
1da177e4 3167 if (conn) {
6ed58ec5
VT
3168 int cnt, q;
3169
3170 switch (conn->type) {
3171 case ACL_LINK:
3172 cnt = hdev->acl_cnt;
3173 break;
3174 case SCO_LINK:
3175 case ESCO_LINK:
3176 cnt = hdev->sco_cnt;
3177 break;
3178 case LE_LINK:
3179 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3180 break;
3181 default:
3182 cnt = 0;
2064ee33 3183 bt_dev_err(hdev, "unknown link type %d", conn->type);
6ed58ec5
VT
3184 }
3185
3186 q = cnt / num;
1da177e4
LT
3187 *quote = q ? q : 1;
3188 } else
3189 *quote = 0;
3190
3191 BT_DBG("conn %p quote %d", conn, *quote);
3192 return conn;
3193}
3194
6039aa73 3195static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3196{
3197 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3198 struct hci_conn *c;
1da177e4 3199
2064ee33 3200 bt_dev_err(hdev, "link tx timeout");
1da177e4 3201
bf4c6325
GP
3202 rcu_read_lock();
3203
1da177e4 3204 /* Kill stalled connections */
bf4c6325 3205 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3206 if (c->type == type && c->sent) {
2064ee33
MH
3207 bt_dev_err(hdev, "killing stalled connection %pMR",
3208 &c->dst);
bed71748 3209 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3210 }
3211 }
bf4c6325
GP
3212
3213 rcu_read_unlock();
1da177e4
LT
3214}
3215
6039aa73
GP
3216static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3217 int *quote)
1da177e4 3218{
73d80deb
LAD
3219 struct hci_conn_hash *h = &hdev->conn_hash;
3220 struct hci_chan *chan = NULL;
abc5de8f 3221 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3222 struct hci_conn *conn;
73d80deb
LAD
3223 int cnt, q, conn_num = 0;
3224
3225 BT_DBG("%s", hdev->name);
3226
bf4c6325
GP
3227 rcu_read_lock();
3228
3229 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3230 struct hci_chan *tmp;
3231
3232 if (conn->type != type)
3233 continue;
3234
3235 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3236 continue;
3237
3238 conn_num++;
3239
8192edef 3240 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3241 struct sk_buff *skb;
3242
3243 if (skb_queue_empty(&tmp->data_q))
3244 continue;
3245
3246 skb = skb_peek(&tmp->data_q);
3247 if (skb->priority < cur_prio)
3248 continue;
3249
3250 if (skb->priority > cur_prio) {
3251 num = 0;
3252 min = ~0;
3253 cur_prio = skb->priority;
3254 }
3255
3256 num++;
3257
3258 if (conn->sent < min) {
3259 min = conn->sent;
3260 chan = tmp;
3261 }
3262 }
3263
3264 if (hci_conn_num(hdev, type) == conn_num)
3265 break;
3266 }
3267
bf4c6325
GP
3268 rcu_read_unlock();
3269
73d80deb
LAD
3270 if (!chan)
3271 return NULL;
3272
3273 switch (chan->conn->type) {
3274 case ACL_LINK:
3275 cnt = hdev->acl_cnt;
3276 break;
bd1eb66b
AE
3277 case AMP_LINK:
3278 cnt = hdev->block_cnt;
3279 break;
73d80deb
LAD
3280 case SCO_LINK:
3281 case ESCO_LINK:
3282 cnt = hdev->sco_cnt;
3283 break;
3284 case LE_LINK:
3285 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3286 break;
3287 default:
3288 cnt = 0;
2064ee33 3289 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
73d80deb
LAD
3290 }
3291
3292 q = cnt / num;
3293 *quote = q ? q : 1;
3294 BT_DBG("chan %p quote %d", chan, *quote);
3295 return chan;
3296}
3297
02b20f0b
LAD
3298static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3299{
3300 struct hci_conn_hash *h = &hdev->conn_hash;
3301 struct hci_conn *conn;
3302 int num = 0;
3303
3304 BT_DBG("%s", hdev->name);
3305
bf4c6325
GP
3306 rcu_read_lock();
3307
3308 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3309 struct hci_chan *chan;
3310
3311 if (conn->type != type)
3312 continue;
3313
3314 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3315 continue;
3316
3317 num++;
3318
8192edef 3319 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3320 struct sk_buff *skb;
3321
3322 if (chan->sent) {
3323 chan->sent = 0;
3324 continue;
3325 }
3326
3327 if (skb_queue_empty(&chan->data_q))
3328 continue;
3329
3330 skb = skb_peek(&chan->data_q);
3331 if (skb->priority >= HCI_PRIO_MAX - 1)
3332 continue;
3333
3334 skb->priority = HCI_PRIO_MAX - 1;
3335
3336 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3337 skb->priority);
02b20f0b
LAD
3338 }
3339
3340 if (hci_conn_num(hdev, type) == num)
3341 break;
3342 }
bf4c6325
GP
3343
3344 rcu_read_unlock();
3345
02b20f0b
LAD
3346}
3347
b71d385a
AE
3348static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3349{
3350 /* Calculate count of blocks used by this packet */
3351 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3352}
3353
6039aa73 3354static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3355{
d7a5a11d 3356 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
3357 /* ACL tx timeout must be longer than maximum
3358 * link supervision timeout (40.9 seconds) */
63d2bc1b 3359 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3360 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3361 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3362 }
63d2bc1b 3363}
1da177e4 3364
7fedd3bb
APS
3365/* Schedule SCO */
3366static void hci_sched_sco(struct hci_dev *hdev)
3367{
3368 struct hci_conn *conn;
3369 struct sk_buff *skb;
3370 int quote;
3371
3372 BT_DBG("%s", hdev->name);
3373
3374 if (!hci_conn_num(hdev, SCO_LINK))
3375 return;
3376
3377 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3378 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3379 BT_DBG("skb %p len %d", skb, skb->len);
3380 hci_send_frame(hdev, skb);
3381
3382 conn->sent++;
3383 if (conn->sent == ~0)
3384 conn->sent = 0;
3385 }
3386 }
3387}
3388
3389static void hci_sched_esco(struct hci_dev *hdev)
3390{
3391 struct hci_conn *conn;
3392 struct sk_buff *skb;
3393 int quote;
3394
3395 BT_DBG("%s", hdev->name);
3396
3397 if (!hci_conn_num(hdev, ESCO_LINK))
3398 return;
3399
3400 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3401 &quote))) {
3402 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3403 BT_DBG("skb %p len %d", skb, skb->len);
3404 hci_send_frame(hdev, skb);
3405
3406 conn->sent++;
3407 if (conn->sent == ~0)
3408 conn->sent = 0;
3409 }
3410 }
3411}
3412
6039aa73 3413static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3414{
3415 unsigned int cnt = hdev->acl_cnt;
3416 struct hci_chan *chan;
3417 struct sk_buff *skb;
3418 int quote;
3419
3420 __check_timeout(hdev, cnt);
04837f64 3421
73d80deb 3422 while (hdev->acl_cnt &&
a8c5fb1a 3423 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3424 u32 priority = (skb_peek(&chan->data_q))->priority;
3425 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3426 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3427 skb->len, skb->priority);
73d80deb 3428
ec1cce24
LAD
3429 /* Stop if priority has changed */
3430 if (skb->priority < priority)
3431 break;
3432
3433 skb = skb_dequeue(&chan->data_q);
3434
73d80deb 3435 hci_conn_enter_active_mode(chan->conn,
04124681 3436 bt_cb(skb)->force_active);
04837f64 3437
57d17d70 3438 hci_send_frame(hdev, skb);
1da177e4
LT
3439 hdev->acl_last_tx = jiffies;
3440
3441 hdev->acl_cnt--;
73d80deb
LAD
3442 chan->sent++;
3443 chan->conn->sent++;
7fedd3bb
APS
3444
3445 /* Send pending SCO packets right away */
3446 hci_sched_sco(hdev);
3447 hci_sched_esco(hdev);
1da177e4
LT
3448 }
3449 }
02b20f0b
LAD
3450
3451 if (cnt != hdev->acl_cnt)
3452 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3453}
3454
6039aa73 3455static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3456{
63d2bc1b 3457 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3458 struct hci_chan *chan;
3459 struct sk_buff *skb;
3460 int quote;
bd1eb66b 3461 u8 type;
b71d385a 3462
63d2bc1b 3463 __check_timeout(hdev, cnt);
b71d385a 3464
bd1eb66b
AE
3465 BT_DBG("%s", hdev->name);
3466
3467 if (hdev->dev_type == HCI_AMP)
3468 type = AMP_LINK;
3469 else
3470 type = ACL_LINK;
3471
b71d385a 3472 while (hdev->block_cnt > 0 &&
bd1eb66b 3473 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3474 u32 priority = (skb_peek(&chan->data_q))->priority;
3475 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3476 int blocks;
3477
3478 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3479 skb->len, skb->priority);
b71d385a
AE
3480
3481 /* Stop if priority has changed */
3482 if (skb->priority < priority)
3483 break;
3484
3485 skb = skb_dequeue(&chan->data_q);
3486
3487 blocks = __get_blocks(hdev, skb);
3488 if (blocks > hdev->block_cnt)
3489 return;
3490
3491 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3492 bt_cb(skb)->force_active);
b71d385a 3493
57d17d70 3494 hci_send_frame(hdev, skb);
b71d385a
AE
3495 hdev->acl_last_tx = jiffies;
3496
3497 hdev->block_cnt -= blocks;
3498 quote -= blocks;
3499
3500 chan->sent += blocks;
3501 chan->conn->sent += blocks;
3502 }
3503 }
3504
3505 if (cnt != hdev->block_cnt)
bd1eb66b 3506 hci_prio_recalculate(hdev, type);
b71d385a
AE
3507}
3508
6039aa73 3509static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3510{
3511 BT_DBG("%s", hdev->name);
3512
bd1eb66b 3513 /* No ACL link over BR/EDR controller */
ca8bee5d 3514 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
bd1eb66b
AE
3515 return;
3516
3517 /* No AMP link over AMP controller */
3518 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3519 return;
3520
3521 switch (hdev->flow_ctl_mode) {
3522 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3523 hci_sched_acl_pkt(hdev);
3524 break;
3525
3526 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3527 hci_sched_acl_blk(hdev);
3528 break;
3529 }
3530}
3531
6039aa73 3532static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3533{
73d80deb 3534 struct hci_chan *chan;
6ed58ec5 3535 struct sk_buff *skb;
02b20f0b 3536 int quote, cnt, tmp;
6ed58ec5
VT
3537
3538 BT_DBG("%s", hdev->name);
3539
52087a79
LAD
3540 if (!hci_conn_num(hdev, LE_LINK))
3541 return;
3542
6ed58ec5 3543 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1b1d29e5
LAD
3544
3545 __check_timeout(hdev, cnt);
3546
02b20f0b 3547 tmp = cnt;
73d80deb 3548 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3549 u32 priority = (skb_peek(&chan->data_q))->priority;
3550 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3551 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3552 skb->len, skb->priority);
6ed58ec5 3553
ec1cce24
LAD
3554 /* Stop if priority has changed */
3555 if (skb->priority < priority)
3556 break;
3557
3558 skb = skb_dequeue(&chan->data_q);
3559
57d17d70 3560 hci_send_frame(hdev, skb);
6ed58ec5
VT
3561 hdev->le_last_tx = jiffies;
3562
3563 cnt--;
73d80deb
LAD
3564 chan->sent++;
3565 chan->conn->sent++;
7fedd3bb
APS
3566
3567 /* Send pending SCO packets right away */
3568 hci_sched_sco(hdev);
3569 hci_sched_esco(hdev);
6ed58ec5
VT
3570 }
3571 }
73d80deb 3572
6ed58ec5
VT
3573 if (hdev->le_pkts)
3574 hdev->le_cnt = cnt;
3575 else
3576 hdev->acl_cnt = cnt;
02b20f0b
LAD
3577
3578 if (cnt != tmp)
3579 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3580}
3581
3eff45ea 3582static void hci_tx_work(struct work_struct *work)
1da177e4 3583{
3eff45ea 3584 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3585 struct sk_buff *skb;
3586
6ed58ec5 3587 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3588 hdev->sco_cnt, hdev->le_cnt);
1da177e4 3589
d7a5a11d 3590 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e 3591 /* Schedule queues and send stuff to HCI driver */
52de599e
MH
3592 hci_sched_sco(hdev);
3593 hci_sched_esco(hdev);
7fedd3bb 3594 hci_sched_acl(hdev);
52de599e
MH
3595 hci_sched_le(hdev);
3596 }
6ed58ec5 3597
1da177e4
LT
3598 /* Send next queued raw (unknown type) packet */
3599 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 3600 hci_send_frame(hdev, skb);
1da177e4
LT
3601}
3602
25985edc 3603/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3604
3605/* ACL data packet */
6039aa73 3606static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3607{
3608 struct hci_acl_hdr *hdr = (void *) skb->data;
3609 struct hci_conn *conn;
3610 __u16 handle, flags;
3611
3612 skb_pull(skb, HCI_ACL_HDR_SIZE);
3613
3614 handle = __le16_to_cpu(hdr->handle);
3615 flags = hci_flags(handle);
3616 handle = hci_handle(handle);
3617
f0e09510 3618 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3619 handle, flags);
1da177e4
LT
3620
3621 hdev->stat.acl_rx++;
3622
3623 hci_dev_lock(hdev);
3624 conn = hci_conn_hash_lookup_handle(hdev, handle);
3625 hci_dev_unlock(hdev);
8e87d142 3626
1da177e4 3627 if (conn) {
65983fc7 3628 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3629
1da177e4 3630 /* Send to upper protocol */
686ebf28
UF
3631 l2cap_recv_acldata(conn, skb, flags);
3632 return;
1da177e4 3633 } else {
2064ee33
MH
3634 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3635 handle);
1da177e4
LT
3636 }
3637
3638 kfree_skb(skb);
3639}
3640
3641/* SCO data packet */
6039aa73 3642static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3643{
3644 struct hci_sco_hdr *hdr = (void *) skb->data;
3645 struct hci_conn *conn;
debdedf2 3646 __u16 handle, flags;
1da177e4
LT
3647
3648 skb_pull(skb, HCI_SCO_HDR_SIZE);
3649
3650 handle = __le16_to_cpu(hdr->handle);
debdedf2
MH
3651 flags = hci_flags(handle);
3652 handle = hci_handle(handle);
1da177e4 3653
debdedf2
MH
3654 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3655 handle, flags);
1da177e4
LT
3656
3657 hdev->stat.sco_rx++;
3658
3659 hci_dev_lock(hdev);
3660 conn = hci_conn_hash_lookup_handle(hdev, handle);
3661 hci_dev_unlock(hdev);
3662
3663 if (conn) {
1da177e4 3664 /* Send to upper protocol */
00398e1d 3665 bt_cb(skb)->sco.pkt_status = flags & 0x03;
686ebf28
UF
3666 sco_recv_scodata(conn, skb);
3667 return;
1da177e4 3668 } else {
2064ee33
MH
3669 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
3670 handle);
1da177e4
LT
3671 }
3672
3673 kfree_skb(skb);
3674}
3675
9238f36a
JH
3676static bool hci_req_is_complete(struct hci_dev *hdev)
3677{
3678 struct sk_buff *skb;
3679
3680 skb = skb_peek(&hdev->cmd_q);
3681 if (!skb)
3682 return true;
3683
44d27137 3684 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
9238f36a
JH
3685}
3686
42c6b129
JH
3687static void hci_resend_last(struct hci_dev *hdev)
3688{
3689 struct hci_command_hdr *sent;
3690 struct sk_buff *skb;
3691 u16 opcode;
3692
3693 if (!hdev->sent_cmd)
3694 return;
3695
3696 sent = (void *) hdev->sent_cmd->data;
3697 opcode = __le16_to_cpu(sent->opcode);
3698 if (opcode == HCI_OP_RESET)
3699 return;
3700
3701 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3702 if (!skb)
3703 return;
3704
3705 skb_queue_head(&hdev->cmd_q, skb);
3706 queue_work(hdev->workqueue, &hdev->cmd_work);
3707}
3708
e6214487
JH
3709void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3710 hci_req_complete_t *req_complete,
3711 hci_req_complete_skb_t *req_complete_skb)
9238f36a 3712{
9238f36a
JH
3713 struct sk_buff *skb;
3714 unsigned long flags;
3715
3716 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3717
42c6b129
JH
3718 /* If the completed command doesn't match the last one that was
3719 * sent we need to do special handling of it.
9238f36a 3720 */
42c6b129
JH
3721 if (!hci_sent_cmd_data(hdev, opcode)) {
3722 /* Some CSR based controllers generate a spontaneous
3723 * reset complete event during init and any pending
3724 * command will never be completed. In such a case we
3725 * need to resend whatever was the last sent
3726 * command.
3727 */
3728 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3729 hci_resend_last(hdev);
3730
9238f36a 3731 return;
42c6b129 3732 }
9238f36a 3733
f80c5dad
JPRV
3734 /* If we reach this point this event matches the last command sent */
3735 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3736
9238f36a
JH
3737 /* If the command succeeded and there's still more commands in
3738 * this request the request is not yet complete.
3739 */
3740 if (!status && !hci_req_is_complete(hdev))
3741 return;
3742
3743 /* If this was the last command in a request the complete
3744 * callback would be found in hdev->sent_cmd instead of the
3745 * command queue (hdev->cmd_q).
3746 */
44d27137
JH
3747 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
3748 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
e6214487
JH
3749 return;
3750 }
53e21fbc 3751
44d27137
JH
3752 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
3753 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
e6214487 3754 return;
9238f36a
JH
3755 }
3756
3757 /* Remove all pending commands belonging to this request */
3758 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3759 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
44d27137 3760 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
9238f36a
JH
3761 __skb_queue_head(&hdev->cmd_q, skb);
3762 break;
3763 }
3764
3bd7594e
DA
3765 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3766 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3767 else
3768 *req_complete = bt_cb(skb)->hci.req_complete;
9238f36a
JH
3769 kfree_skb(skb);
3770 }
3771 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
3772}
3773
b78752cc 3774static void hci_rx_work(struct work_struct *work)
1da177e4 3775{
b78752cc 3776 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3777 struct sk_buff *skb;
3778
3779 BT_DBG("%s", hdev->name);
3780
1da177e4 3781 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3782 /* Send copy to monitor */
3783 hci_send_to_monitor(hdev, skb);
3784
1da177e4
LT
3785 if (atomic_read(&hdev->promisc)) {
3786 /* Send copy to the sockets */
470fe1b5 3787 hci_send_to_sock(hdev, skb);
1da177e4
LT
3788 }
3789
eb8c101e
MK
3790 /* If the device has been opened in HCI_USER_CHANNEL,
3791 * the userspace has exclusive access to device.
3792 * When device is HCI_INIT, we still need to process
3793 * the data packets to the driver in order
3794 * to complete its setup().
3795 */
3796 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3797 !test_bit(HCI_INIT, &hdev->flags)) {
1da177e4
LT
3798 kfree_skb(skb);
3799 continue;
3800 }
3801
3802 if (test_bit(HCI_INIT, &hdev->flags)) {
3803 /* Don't process data packets in this states. */
d79f34e3 3804 switch (hci_skb_pkt_type(skb)) {
1da177e4
LT
3805 case HCI_ACLDATA_PKT:
3806 case HCI_SCODATA_PKT:
cc974003 3807 case HCI_ISODATA_PKT:
1da177e4
LT
3808 kfree_skb(skb);
3809 continue;
3ff50b79 3810 }
1da177e4
LT
3811 }
3812
3813 /* Process frame */
d79f34e3 3814 switch (hci_skb_pkt_type(skb)) {
1da177e4 3815 case HCI_EVENT_PKT:
b78752cc 3816 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3817 hci_event_packet(hdev, skb);
3818 break;
3819
3820 case HCI_ACLDATA_PKT:
3821 BT_DBG("%s ACL data packet", hdev->name);
3822 hci_acldata_packet(hdev, skb);
3823 break;
3824
3825 case HCI_SCODATA_PKT:
3826 BT_DBG("%s SCO data packet", hdev->name);
3827 hci_scodata_packet(hdev, skb);
3828 break;
3829
3830 default:
3831 kfree_skb(skb);
3832 break;
3833 }
3834 }
1da177e4
LT
3835}
3836
c347b765 3837static void hci_cmd_work(struct work_struct *work)
1da177e4 3838{
c347b765 3839 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3840 struct sk_buff *skb;
3841
2104786b
AE
3842 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3843 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3844
1da177e4 3845 /* Send queued commands */
5a08ecce
AE
3846 if (atomic_read(&hdev->cmd_cnt)) {
3847 skb = skb_dequeue(&hdev->cmd_q);
3848 if (!skb)
3849 return;
3850
7585b97a 3851 kfree_skb(hdev->sent_cmd);
1da177e4 3852
a675d7f1 3853 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 3854 if (hdev->sent_cmd) {
2250abad 3855 int res;
f80c5dad
JPRV
3856 if (hci_req_status_pend(hdev))
3857 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
1da177e4 3858 atomic_dec(&hdev->cmd_cnt);
2250abad
BB
3859
3860 res = hci_send_frame(hdev, skb);
3861 if (res < 0)
3862 hci_cmd_sync_cancel(hdev, -res);
3863
7bdb8a5c 3864 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 3865 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 3866 else
65cc2b49
MH
3867 schedule_delayed_work(&hdev->cmd_timer,
3868 HCI_CMD_TIMEOUT);
1da177e4
LT
3869 } else {
3870 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3871 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3872 }
3873 }
3874}