Bluetooth: Add support for Atheros [13d3:3362]
[linux-block.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4 42#include <linux/interrupt.h>
611b30f7 43#include <linux/rfkill.h>
6bd32326 44#include <linux/timer.h>
3a0259bb 45#include <linux/crypto.h>
1da177e4
LT
46#include <net/sock.h>
47
48#include <asm/system.h>
70f23020 49#include <linux/uaccess.h>
1da177e4
LT
50#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
ab81cbf9
JH
55#define AUTO_OFF_TIMEOUT 2000
56
b78752cc 57static void hci_rx_work(struct work_struct *work);
c347b765 58static void hci_cmd_work(struct work_struct *work);
3eff45ea 59static void hci_tx_work(struct work_struct *work);
1da177e4 60
1da177e4
LT
61/* HCI device list */
62LIST_HEAD(hci_dev_list);
63DEFINE_RWLOCK(hci_dev_list_lock);
64
65/* HCI callback list */
66LIST_HEAD(hci_cb_list);
67DEFINE_RWLOCK(hci_cb_list_lock);
68
1da177e4
LT
69/* ---- HCI notifications ---- */
70
6516455d 71static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 72{
040030ef 73 hci_sock_dev_event(hdev, event);
1da177e4
LT
74}
75
76/* ---- HCI requests ---- */
77
23bb5763 78void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 79{
23bb5763
JH
80 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81
a5040efa
JH
82 /* If this is the init phase check if the completed command matches
83 * the last init command, and if not just return.
84 */
75fb0e32
JH
85 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
86 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
87 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
96 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
97 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
23bb5763 105 return;
75fb0e32 106 }
1da177e4
LT
107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115static void hci_req_cancel(struct hci_dev *hdev, int err)
116{
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124}
125
126/* Execute request and wait for completion. */
8e87d142 127static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 128 unsigned long opt, __u32 timeout)
1da177e4
LT
129{
130 DECLARE_WAITQUEUE(wait, current);
131 int err = 0;
132
133 BT_DBG("%s start", hdev->name);
134
135 hdev->req_status = HCI_REQ_PEND;
136
137 add_wait_queue(&hdev->req_wait_q, &wait);
138 set_current_state(TASK_INTERRUPTIBLE);
139
140 req(hdev, opt);
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return -EINTR;
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
e175072f 150 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
3ff50b79 160 }
1da177e4 161
a5040efa 162 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
163
164 BT_DBG("%s end: err %d", hdev->name, err);
165
166 return err;
167}
168
169static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 170 unsigned long opt, __u32 timeout)
1da177e4
LT
171{
172 int ret;
173
7c6a329e
MH
174 if (!test_bit(HCI_UP, &hdev->flags))
175 return -ENETDOWN;
176
1da177e4
LT
177 /* Serialize all requests */
178 hci_req_lock(hdev);
179 ret = __hci_request(hdev, req, opt, timeout);
180 hci_req_unlock(hdev);
181
182 return ret;
183}
184
185static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186{
187 BT_DBG("%s %ld", hdev->name, opt);
188
189 /* Reset device */
f630cf0d 190 set_bit(HCI_RESET, &hdev->flags);
a9de9248 191 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
192}
193
e61ef499 194static void bredr_init(struct hci_dev *hdev)
1da177e4 195{
b0916ea0 196 struct hci_cp_delete_stored_link_key cp;
1ebb9252 197 __le16 param;
89f2783d 198 __u8 flt_type;
1da177e4 199
2455a3ea
AE
200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
1da177e4
LT
202 /* Mandatory initialization */
203
204 /* Reset */
f630cf0d 205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
e61ef499
AE
206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 208 }
1da177e4
LT
209
210 /* Read Local Supported Features */
a9de9248 211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 212
1143e5a6 213 /* Read Local Version */
a9de9248 214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 215
1da177e4 216 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 217 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 218
1da177e4 219 /* Read BD Address */
a9de9248
MH
220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222 /* Read Class of Device */
223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225 /* Read Local Name */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
227
228 /* Read Voice Setting */
a9de9248 229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
230
231 /* Optional initialization */
232
233 /* Clear Event Filters */
89f2783d 234 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 236
1da177e4 237 /* Connection accept timeout ~20 secs */
aca3192c 238 param = cpu_to_le16(0x7d00);
a9de9248 239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
240
241 bacpy(&cp.bdaddr, BDADDR_ANY);
242 cp.delete_all = 1;
243 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
244}
245
e61ef499
AE
246static void amp_init(struct hci_dev *hdev)
247{
2455a3ea
AE
248 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249
e61ef499
AE
250 /* Reset */
251 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253 /* Read Local Version */
254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255}
256
257static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
258{
259 struct sk_buff *skb;
260
261 BT_DBG("%s %ld", hdev->name, opt);
262
263 /* Driver initialization */
264
265 /* Special commands */
266 while ((skb = skb_dequeue(&hdev->driver_init))) {
267 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
268 skb->dev = (void *) hdev;
269
270 skb_queue_tail(&hdev->cmd_q, skb);
271 queue_work(hdev->workqueue, &hdev->cmd_work);
272 }
273 skb_queue_purge(&hdev->driver_init);
274
275 switch (hdev->dev_type) {
276 case HCI_BREDR:
277 bredr_init(hdev);
278 break;
279
280 case HCI_AMP:
281 amp_init(hdev);
282 break;
283
284 default:
285 BT_ERR("Unknown device type %d", hdev->dev_type);
286 break;
287 }
288
289}
290
6ed58ec5
VT
291static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
292{
293 BT_DBG("%s", hdev->name);
294
295 /* Read LE buffer size */
296 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
297}
298
1da177e4
LT
299static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 scan = opt;
302
303 BT_DBG("%s %x", hdev->name, scan);
304
305 /* Inquiry and Page scans */
a9de9248 306 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
307}
308
309static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 auth = opt;
312
313 BT_DBG("%s %x", hdev->name, auth);
314
315 /* Authentication */
a9de9248 316 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
317}
318
319static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __u8 encrypt = opt;
322
323 BT_DBG("%s %x", hdev->name, encrypt);
324
e4e8e37c 325 /* Encryption */
a9de9248 326 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
327}
328
e4e8e37c
MH
329static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
330{
331 __le16 policy = cpu_to_le16(opt);
332
a418b893 333 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
334
335 /* Default link policy */
336 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
337}
338
8e87d142 339/* Get HCI device by index.
1da177e4
LT
340 * Device is held on return. */
341struct hci_dev *hci_dev_get(int index)
342{
8035ded4 343 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
344
345 BT_DBG("%d", index);
346
347 if (index < 0)
348 return NULL;
349
350 read_lock(&hci_dev_list_lock);
8035ded4 351 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
352 if (d->id == index) {
353 hdev = hci_dev_hold(d);
354 break;
355 }
356 }
357 read_unlock(&hci_dev_list_lock);
358 return hdev;
359}
1da177e4
LT
360
361/* ---- Inquiry support ---- */
ff9ef578 362
30dc78e1
JH
363bool hci_discovery_active(struct hci_dev *hdev)
364{
365 struct discovery_state *discov = &hdev->discovery;
366
6fbe195d 367 switch (discov->state) {
343f935b 368 case DISCOVERY_FINDING:
6fbe195d 369 case DISCOVERY_RESOLVING:
30dc78e1
JH
370 return true;
371
6fbe195d
AG
372 default:
373 return false;
374 }
30dc78e1
JH
375}
376
ff9ef578
JH
377void hci_discovery_set_state(struct hci_dev *hdev, int state)
378{
379 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
380
381 if (hdev->discovery.state == state)
382 return;
383
384 switch (state) {
385 case DISCOVERY_STOPPED:
7b99b659
AG
386 if (hdev->discovery.state != DISCOVERY_STARTING)
387 mgmt_discovering(hdev, 0);
f963e8e9 388 hdev->discovery.type = 0;
ff9ef578
JH
389 break;
390 case DISCOVERY_STARTING:
391 break;
343f935b 392 case DISCOVERY_FINDING:
ff9ef578
JH
393 mgmt_discovering(hdev, 1);
394 break;
30dc78e1
JH
395 case DISCOVERY_RESOLVING:
396 break;
ff9ef578
JH
397 case DISCOVERY_STOPPING:
398 break;
399 }
400
401 hdev->discovery.state = state;
402}
403
1da177e4
LT
404static void inquiry_cache_flush(struct hci_dev *hdev)
405{
30883512 406 struct discovery_state *cache = &hdev->discovery;
b57c1a56 407 struct inquiry_entry *p, *n;
1da177e4 408
561aafbc
JH
409 list_for_each_entry_safe(p, n, &cache->all, all) {
410 list_del(&p->all);
b57c1a56 411 kfree(p);
1da177e4 412 }
561aafbc
JH
413
414 INIT_LIST_HEAD(&cache->unknown);
415 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
416}
417
418struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
419{
30883512 420 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
421 struct inquiry_entry *e;
422
423 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
424
561aafbc
JH
425 list_for_each_entry(e, &cache->all, all) {
426 if (!bacmp(&e->data.bdaddr, bdaddr))
427 return e;
428 }
429
430 return NULL;
431}
432
433struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 434 bdaddr_t *bdaddr)
561aafbc 435{
30883512 436 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
437 struct inquiry_entry *e;
438
439 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
440
441 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 442 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
443 return e;
444 }
445
446 return NULL;
1da177e4
LT
447}
448
30dc78e1 449struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
450 bdaddr_t *bdaddr,
451 int state)
30dc78e1
JH
452{
453 struct discovery_state *cache = &hdev->discovery;
454 struct inquiry_entry *e;
455
456 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
457
458 list_for_each_entry(e, &cache->resolve, list) {
459 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
460 return e;
461 if (!bacmp(&e->data.bdaddr, bdaddr))
462 return e;
463 }
464
465 return NULL;
466}
467
a3d4e20a 468void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 469 struct inquiry_entry *ie)
a3d4e20a
JH
470{
471 struct discovery_state *cache = &hdev->discovery;
472 struct list_head *pos = &cache->resolve;
473 struct inquiry_entry *p;
474
475 list_del(&ie->list);
476
477 list_for_each_entry(p, &cache->resolve, list) {
478 if (p->name_state != NAME_PENDING &&
479 abs(p->data.rssi) >= abs(ie->data.rssi))
480 break;
481 pos = &p->list;
482 }
483
484 list_add(&ie->list, pos);
485}
486
3175405b 487bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 488 bool name_known, bool *ssp)
1da177e4 489{
30883512 490 struct discovery_state *cache = &hdev->discovery;
70f23020 491 struct inquiry_entry *ie;
1da177e4
LT
492
493 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
494
388fc8fa
JH
495 if (ssp)
496 *ssp = data->ssp_mode;
497
70f23020 498 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 499 if (ie) {
388fc8fa
JH
500 if (ie->data.ssp_mode && ssp)
501 *ssp = true;
502
a3d4e20a
JH
503 if (ie->name_state == NAME_NEEDED &&
504 data->rssi != ie->data.rssi) {
505 ie->data.rssi = data->rssi;
506 hci_inquiry_cache_update_resolve(hdev, ie);
507 }
508
561aafbc 509 goto update;
a3d4e20a 510 }
561aafbc
JH
511
512 /* Entry not in the cache. Add new one. */
513 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
514 if (!ie)
3175405b 515 return false;
561aafbc
JH
516
517 list_add(&ie->all, &cache->all);
518
519 if (name_known) {
520 ie->name_state = NAME_KNOWN;
521 } else {
522 ie->name_state = NAME_NOT_KNOWN;
523 list_add(&ie->list, &cache->unknown);
524 }
70f23020 525
561aafbc
JH
526update:
527 if (name_known && ie->name_state != NAME_KNOWN &&
528 ie->name_state != NAME_PENDING) {
529 ie->name_state = NAME_KNOWN;
530 list_del(&ie->list);
1da177e4
LT
531 }
532
70f23020
AE
533 memcpy(&ie->data, data, sizeof(*data));
534 ie->timestamp = jiffies;
1da177e4 535 cache->timestamp = jiffies;
3175405b
JH
536
537 if (ie->name_state == NAME_NOT_KNOWN)
538 return false;
539
540 return true;
1da177e4
LT
541}
542
543static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
544{
30883512 545 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
546 struct inquiry_info *info = (struct inquiry_info *) buf;
547 struct inquiry_entry *e;
548 int copied = 0;
549
561aafbc 550 list_for_each_entry(e, &cache->all, all) {
1da177e4 551 struct inquiry_data *data = &e->data;
b57c1a56
JH
552
553 if (copied >= num)
554 break;
555
1da177e4
LT
556 bacpy(&info->bdaddr, &data->bdaddr);
557 info->pscan_rep_mode = data->pscan_rep_mode;
558 info->pscan_period_mode = data->pscan_period_mode;
559 info->pscan_mode = data->pscan_mode;
560 memcpy(info->dev_class, data->dev_class, 3);
561 info->clock_offset = data->clock_offset;
b57c1a56 562
1da177e4 563 info++;
b57c1a56 564 copied++;
1da177e4
LT
565 }
566
567 BT_DBG("cache %p, copied %d", cache, copied);
568 return copied;
569}
570
571static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
572{
573 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
574 struct hci_cp_inquiry cp;
575
576 BT_DBG("%s", hdev->name);
577
578 if (test_bit(HCI_INQUIRY, &hdev->flags))
579 return;
580
581 /* Start Inquiry */
582 memcpy(&cp.lap, &ir->lap, 3);
583 cp.length = ir->length;
584 cp.num_rsp = ir->num_rsp;
a9de9248 585 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
586}
587
588int hci_inquiry(void __user *arg)
589{
590 __u8 __user *ptr = arg;
591 struct hci_inquiry_req ir;
592 struct hci_dev *hdev;
593 int err = 0, do_inquiry = 0, max_rsp;
594 long timeo;
595 __u8 *buf;
596
597 if (copy_from_user(&ir, ptr, sizeof(ir)))
598 return -EFAULT;
599
5a08ecce
AE
600 hdev = hci_dev_get(ir.dev_id);
601 if (!hdev)
1da177e4
LT
602 return -ENODEV;
603
09fd0de5 604 hci_dev_lock(hdev);
8e87d142 605 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
606 inquiry_cache_empty(hdev) ||
607 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
608 inquiry_cache_flush(hdev);
609 do_inquiry = 1;
610 }
09fd0de5 611 hci_dev_unlock(hdev);
1da177e4 612
04837f64 613 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
614
615 if (do_inquiry) {
616 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
617 if (err < 0)
618 goto done;
619 }
1da177e4
LT
620
621 /* for unlimited number of responses we will use buffer with 255 entries */
622 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
623
624 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
625 * copy it to the user space.
626 */
01df8c31 627 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 628 if (!buf) {
1da177e4
LT
629 err = -ENOMEM;
630 goto done;
631 }
632
09fd0de5 633 hci_dev_lock(hdev);
1da177e4 634 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 635 hci_dev_unlock(hdev);
1da177e4
LT
636
637 BT_DBG("num_rsp %d", ir.num_rsp);
638
639 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
640 ptr += sizeof(ir);
641 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
642 ir.num_rsp))
643 err = -EFAULT;
8e87d142 644 } else
1da177e4
LT
645 err = -EFAULT;
646
647 kfree(buf);
648
649done:
650 hci_dev_put(hdev);
651 return err;
652}
653
654/* ---- HCI ioctl helpers ---- */
655
656int hci_dev_open(__u16 dev)
657{
658 struct hci_dev *hdev;
659 int ret = 0;
660
5a08ecce
AE
661 hdev = hci_dev_get(dev);
662 if (!hdev)
1da177e4
LT
663 return -ENODEV;
664
665 BT_DBG("%s %p", hdev->name, hdev);
666
667 hci_req_lock(hdev);
668
94324962
JH
669 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
670 ret = -ENODEV;
671 goto done;
672 }
673
611b30f7
MH
674 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
675 ret = -ERFKILL;
676 goto done;
677 }
678
1da177e4
LT
679 if (test_bit(HCI_UP, &hdev->flags)) {
680 ret = -EALREADY;
681 goto done;
682 }
683
684 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
685 set_bit(HCI_RAW, &hdev->flags);
686
07e3b94a
AE
687 /* Treat all non BR/EDR controllers as raw devices if
688 enable_hs is not set */
689 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
690 set_bit(HCI_RAW, &hdev->flags);
691
1da177e4
LT
692 if (hdev->open(hdev)) {
693 ret = -EIO;
694 goto done;
695 }
696
697 if (!test_bit(HCI_RAW, &hdev->flags)) {
698 atomic_set(&hdev->cmd_cnt, 1);
699 set_bit(HCI_INIT, &hdev->flags);
a5040efa 700 hdev->init_last_cmd = 0;
1da177e4 701
04837f64
MH
702 ret = __hci_request(hdev, hci_init_req, 0,
703 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 704
eead27da 705 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
706 ret = __hci_request(hdev, hci_le_init_req, 0,
707 msecs_to_jiffies(HCI_INIT_TIMEOUT));
708
1da177e4
LT
709 clear_bit(HCI_INIT, &hdev->flags);
710 }
711
712 if (!ret) {
713 hci_dev_hold(hdev);
714 set_bit(HCI_UP, &hdev->flags);
715 hci_notify(hdev, HCI_DEV_UP);
a8b2d5c2 716 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 717 hci_dev_lock(hdev);
744cf19e 718 mgmt_powered(hdev, 1);
09fd0de5 719 hci_dev_unlock(hdev);
56e5cb86 720 }
8e87d142 721 } else {
1da177e4 722 /* Init failed, cleanup */
3eff45ea 723 flush_work(&hdev->tx_work);
c347b765 724 flush_work(&hdev->cmd_work);
b78752cc 725 flush_work(&hdev->rx_work);
1da177e4
LT
726
727 skb_queue_purge(&hdev->cmd_q);
728 skb_queue_purge(&hdev->rx_q);
729
730 if (hdev->flush)
731 hdev->flush(hdev);
732
733 if (hdev->sent_cmd) {
734 kfree_skb(hdev->sent_cmd);
735 hdev->sent_cmd = NULL;
736 }
737
738 hdev->close(hdev);
739 hdev->flags = 0;
740 }
741
742done:
743 hci_req_unlock(hdev);
744 hci_dev_put(hdev);
745 return ret;
746}
747
748static int hci_dev_do_close(struct hci_dev *hdev)
749{
750 BT_DBG("%s %p", hdev->name, hdev);
751
28b75a89
AG
752 cancel_work_sync(&hdev->le_scan);
753
1da177e4
LT
754 hci_req_cancel(hdev, ENODEV);
755 hci_req_lock(hdev);
756
757 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 758 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
759 hci_req_unlock(hdev);
760 return 0;
761 }
762
3eff45ea
GP
763 /* Flush RX and TX works */
764 flush_work(&hdev->tx_work);
b78752cc 765 flush_work(&hdev->rx_work);
1da177e4 766
16ab91ab 767 if (hdev->discov_timeout > 0) {
e0f9309f 768 cancel_delayed_work(&hdev->discov_off);
16ab91ab 769 hdev->discov_timeout = 0;
5e5282bb 770 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
771 }
772
a8b2d5c2 773 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
774 cancel_delayed_work(&hdev->service_cache);
775
7ba8b4be
AG
776 cancel_delayed_work_sync(&hdev->le_scan_disable);
777
09fd0de5 778 hci_dev_lock(hdev);
1da177e4
LT
779 inquiry_cache_flush(hdev);
780 hci_conn_hash_flush(hdev);
09fd0de5 781 hci_dev_unlock(hdev);
1da177e4
LT
782
783 hci_notify(hdev, HCI_DEV_DOWN);
784
785 if (hdev->flush)
786 hdev->flush(hdev);
787
788 /* Reset device */
789 skb_queue_purge(&hdev->cmd_q);
790 atomic_set(&hdev->cmd_cnt, 1);
8af59467
JH
791 if (!test_bit(HCI_RAW, &hdev->flags) &&
792 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
1da177e4 793 set_bit(HCI_INIT, &hdev->flags);
04837f64 794 __hci_request(hdev, hci_reset_req, 0,
cad44c2b 795 msecs_to_jiffies(250));
1da177e4
LT
796 clear_bit(HCI_INIT, &hdev->flags);
797 }
798
c347b765
GP
799 /* flush cmd work */
800 flush_work(&hdev->cmd_work);
1da177e4
LT
801
802 /* Drop queues */
803 skb_queue_purge(&hdev->rx_q);
804 skb_queue_purge(&hdev->cmd_q);
805 skb_queue_purge(&hdev->raw_q);
806
807 /* Drop last sent command */
808 if (hdev->sent_cmd) {
b79f44c1 809 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
810 kfree_skb(hdev->sent_cmd);
811 hdev->sent_cmd = NULL;
812 }
813
814 /* After this point our queues are empty
815 * and no tasks are scheduled. */
816 hdev->close(hdev);
817
8ee56540
MH
818 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
819 hci_dev_lock(hdev);
820 mgmt_powered(hdev, 0);
821 hci_dev_unlock(hdev);
822 }
5add6af8 823
1da177e4
LT
824 /* Clear flags */
825 hdev->flags = 0;
826
e59fda8d 827 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 828 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 829
1da177e4
LT
830 hci_req_unlock(hdev);
831
832 hci_dev_put(hdev);
833 return 0;
834}
835
836int hci_dev_close(__u16 dev)
837{
838 struct hci_dev *hdev;
839 int err;
840
70f23020
AE
841 hdev = hci_dev_get(dev);
842 if (!hdev)
1da177e4 843 return -ENODEV;
8ee56540
MH
844
845 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
846 cancel_delayed_work(&hdev->power_off);
847
1da177e4 848 err = hci_dev_do_close(hdev);
8ee56540 849
1da177e4
LT
850 hci_dev_put(hdev);
851 return err;
852}
853
854int hci_dev_reset(__u16 dev)
855{
856 struct hci_dev *hdev;
857 int ret = 0;
858
70f23020
AE
859 hdev = hci_dev_get(dev);
860 if (!hdev)
1da177e4
LT
861 return -ENODEV;
862
863 hci_req_lock(hdev);
1da177e4
LT
864
865 if (!test_bit(HCI_UP, &hdev->flags))
866 goto done;
867
868 /* Drop queues */
869 skb_queue_purge(&hdev->rx_q);
870 skb_queue_purge(&hdev->cmd_q);
871
09fd0de5 872 hci_dev_lock(hdev);
1da177e4
LT
873 inquiry_cache_flush(hdev);
874 hci_conn_hash_flush(hdev);
09fd0de5 875 hci_dev_unlock(hdev);
1da177e4
LT
876
877 if (hdev->flush)
878 hdev->flush(hdev);
879
8e87d142 880 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 881 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
882
883 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
884 ret = __hci_request(hdev, hci_reset_req, 0,
885 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
886
887done:
1da177e4
LT
888 hci_req_unlock(hdev);
889 hci_dev_put(hdev);
890 return ret;
891}
892
893int hci_dev_reset_stat(__u16 dev)
894{
895 struct hci_dev *hdev;
896 int ret = 0;
897
70f23020
AE
898 hdev = hci_dev_get(dev);
899 if (!hdev)
1da177e4
LT
900 return -ENODEV;
901
902 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
903
904 hci_dev_put(hdev);
905
906 return ret;
907}
908
909int hci_dev_cmd(unsigned int cmd, void __user *arg)
910{
911 struct hci_dev *hdev;
912 struct hci_dev_req dr;
913 int err = 0;
914
915 if (copy_from_user(&dr, arg, sizeof(dr)))
916 return -EFAULT;
917
70f23020
AE
918 hdev = hci_dev_get(dr.dev_id);
919 if (!hdev)
1da177e4
LT
920 return -ENODEV;
921
922 switch (cmd) {
923 case HCISETAUTH:
04837f64
MH
924 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
925 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
926 break;
927
928 case HCISETENCRYPT:
929 if (!lmp_encrypt_capable(hdev)) {
930 err = -EOPNOTSUPP;
931 break;
932 }
933
934 if (!test_bit(HCI_AUTH, &hdev->flags)) {
935 /* Auth must be enabled first */
04837f64
MH
936 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
937 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
938 if (err)
939 break;
940 }
941
04837f64
MH
942 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
943 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
944 break;
945
946 case HCISETSCAN:
04837f64
MH
947 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
948 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
949 break;
950
1da177e4 951 case HCISETLINKPOL:
e4e8e37c
MH
952 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
953 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
954 break;
955
956 case HCISETLINKMODE:
e4e8e37c
MH
957 hdev->link_mode = ((__u16) dr.dev_opt) &
958 (HCI_LM_MASTER | HCI_LM_ACCEPT);
959 break;
960
961 case HCISETPTYPE:
962 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
963 break;
964
965 case HCISETACLMTU:
e4e8e37c
MH
966 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
967 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
968 break;
969
970 case HCISETSCOMTU:
e4e8e37c
MH
971 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
972 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
973 break;
974
975 default:
976 err = -EINVAL;
977 break;
978 }
e4e8e37c 979
1da177e4
LT
980 hci_dev_put(hdev);
981 return err;
982}
983
984int hci_get_dev_list(void __user *arg)
985{
8035ded4 986 struct hci_dev *hdev;
1da177e4
LT
987 struct hci_dev_list_req *dl;
988 struct hci_dev_req *dr;
1da177e4
LT
989 int n = 0, size, err;
990 __u16 dev_num;
991
992 if (get_user(dev_num, (__u16 __user *) arg))
993 return -EFAULT;
994
995 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
996 return -EINVAL;
997
998 size = sizeof(*dl) + dev_num * sizeof(*dr);
999
70f23020
AE
1000 dl = kzalloc(size, GFP_KERNEL);
1001 if (!dl)
1da177e4
LT
1002 return -ENOMEM;
1003
1004 dr = dl->dev_req;
1005
f20d09d5 1006 read_lock(&hci_dev_list_lock);
8035ded4 1007 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1008 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1009 cancel_delayed_work(&hdev->power_off);
c542a06c 1010
a8b2d5c2
JH
1011 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1012 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1013
1da177e4
LT
1014 (dr + n)->dev_id = hdev->id;
1015 (dr + n)->dev_opt = hdev->flags;
c542a06c 1016
1da177e4
LT
1017 if (++n >= dev_num)
1018 break;
1019 }
f20d09d5 1020 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1021
1022 dl->dev_num = n;
1023 size = sizeof(*dl) + n * sizeof(*dr);
1024
1025 err = copy_to_user(arg, dl, size);
1026 kfree(dl);
1027
1028 return err ? -EFAULT : 0;
1029}
1030
1031int hci_get_dev_info(void __user *arg)
1032{
1033 struct hci_dev *hdev;
1034 struct hci_dev_info di;
1035 int err = 0;
1036
1037 if (copy_from_user(&di, arg, sizeof(di)))
1038 return -EFAULT;
1039
70f23020
AE
1040 hdev = hci_dev_get(di.dev_id);
1041 if (!hdev)
1da177e4
LT
1042 return -ENODEV;
1043
a8b2d5c2 1044 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1045 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1046
a8b2d5c2
JH
1047 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1048 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1049
1da177e4
LT
1050 strcpy(di.name, hdev->name);
1051 di.bdaddr = hdev->bdaddr;
943da25d 1052 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1053 di.flags = hdev->flags;
1054 di.pkt_type = hdev->pkt_type;
1055 di.acl_mtu = hdev->acl_mtu;
1056 di.acl_pkts = hdev->acl_pkts;
1057 di.sco_mtu = hdev->sco_mtu;
1058 di.sco_pkts = hdev->sco_pkts;
1059 di.link_policy = hdev->link_policy;
1060 di.link_mode = hdev->link_mode;
1061
1062 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1063 memcpy(&di.features, &hdev->features, sizeof(di.features));
1064
1065 if (copy_to_user(arg, &di, sizeof(di)))
1066 err = -EFAULT;
1067
1068 hci_dev_put(hdev);
1069
1070 return err;
1071}
1072
1073/* ---- Interface to HCI drivers ---- */
1074
611b30f7
MH
1075static int hci_rfkill_set_block(void *data, bool blocked)
1076{
1077 struct hci_dev *hdev = data;
1078
1079 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1080
1081 if (!blocked)
1082 return 0;
1083
1084 hci_dev_do_close(hdev);
1085
1086 return 0;
1087}
1088
1089static const struct rfkill_ops hci_rfkill_ops = {
1090 .set_block = hci_rfkill_set_block,
1091};
1092
1da177e4
LT
1093/* Alloc HCI device */
1094struct hci_dev *hci_alloc_dev(void)
1095{
1096 struct hci_dev *hdev;
1097
25ea6db0 1098 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
1099 if (!hdev)
1100 return NULL;
1101
0ac7e700 1102 hci_init_sysfs(hdev);
1da177e4
LT
1103 skb_queue_head_init(&hdev->driver_init);
1104
1105 return hdev;
1106}
1107EXPORT_SYMBOL(hci_alloc_dev);
1108
1109/* Free HCI device */
1110void hci_free_dev(struct hci_dev *hdev)
1111{
1112 skb_queue_purge(&hdev->driver_init);
1113
a91f2e39
MH
1114 /* will free via device release */
1115 put_device(&hdev->dev);
1da177e4
LT
1116}
1117EXPORT_SYMBOL(hci_free_dev);
1118
ab81cbf9
JH
1119static void hci_power_on(struct work_struct *work)
1120{
1121 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1122
1123 BT_DBG("%s", hdev->name);
1124
1125 if (hci_dev_open(hdev->id) < 0)
1126 return;
1127
a8b2d5c2 1128 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
80b7ab33 1129 schedule_delayed_work(&hdev->power_off,
3243553f 1130 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9 1131
a8b2d5c2 1132 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1133 mgmt_index_added(hdev);
ab81cbf9
JH
1134}
1135
1136static void hci_power_off(struct work_struct *work)
1137{
3243553f
JH
1138 struct hci_dev *hdev = container_of(work, struct hci_dev,
1139 power_off.work);
ab81cbf9
JH
1140
1141 BT_DBG("%s", hdev->name);
1142
8ee56540 1143 hci_dev_do_close(hdev);
ab81cbf9
JH
1144}
1145
16ab91ab
JH
1146static void hci_discov_off(struct work_struct *work)
1147{
1148 struct hci_dev *hdev;
1149 u8 scan = SCAN_PAGE;
1150
1151 hdev = container_of(work, struct hci_dev, discov_off.work);
1152
1153 BT_DBG("%s", hdev->name);
1154
09fd0de5 1155 hci_dev_lock(hdev);
16ab91ab
JH
1156
1157 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1158
1159 hdev->discov_timeout = 0;
1160
09fd0de5 1161 hci_dev_unlock(hdev);
16ab91ab
JH
1162}
1163
2aeb9a1a
JH
1164int hci_uuids_clear(struct hci_dev *hdev)
1165{
1166 struct list_head *p, *n;
1167
1168 list_for_each_safe(p, n, &hdev->uuids) {
1169 struct bt_uuid *uuid;
1170
1171 uuid = list_entry(p, struct bt_uuid, list);
1172
1173 list_del(p);
1174 kfree(uuid);
1175 }
1176
1177 return 0;
1178}
1179
55ed8ca1
JH
1180int hci_link_keys_clear(struct hci_dev *hdev)
1181{
1182 struct list_head *p, *n;
1183
1184 list_for_each_safe(p, n, &hdev->link_keys) {
1185 struct link_key *key;
1186
1187 key = list_entry(p, struct link_key, list);
1188
1189 list_del(p);
1190 kfree(key);
1191 }
1192
1193 return 0;
1194}
1195
b899efaf
VCG
1196int hci_smp_ltks_clear(struct hci_dev *hdev)
1197{
1198 struct smp_ltk *k, *tmp;
1199
1200 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1201 list_del(&k->list);
1202 kfree(k);
1203 }
1204
1205 return 0;
1206}
1207
55ed8ca1
JH
1208struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1209{
8035ded4 1210 struct link_key *k;
55ed8ca1 1211
8035ded4 1212 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1213 if (bacmp(bdaddr, &k->bdaddr) == 0)
1214 return k;
55ed8ca1
JH
1215
1216 return NULL;
1217}
1218
d25e28ab
JH
1219static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1220 u8 key_type, u8 old_key_type)
1221{
1222 /* Legacy key */
1223 if (key_type < 0x03)
1224 return 1;
1225
1226 /* Debug keys are insecure so don't store them persistently */
1227 if (key_type == HCI_LK_DEBUG_COMBINATION)
1228 return 0;
1229
1230 /* Changed combination key and there's no previous one */
1231 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1232 return 0;
1233
1234 /* Security mode 3 case */
1235 if (!conn)
1236 return 1;
1237
1238 /* Neither local nor remote side had no-bonding as requirement */
1239 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1240 return 1;
1241
1242 /* Local side had dedicated bonding as requirement */
1243 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1244 return 1;
1245
1246 /* Remote side had dedicated bonding as requirement */
1247 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1248 return 1;
1249
1250 /* If none of the above criteria match, then don't store the key
1251 * persistently */
1252 return 0;
1253}
1254
c9839a11 1255struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1256{
c9839a11 1257 struct smp_ltk *k;
75d262c2 1258
c9839a11
VCG
1259 list_for_each_entry(k, &hdev->long_term_keys, list) {
1260 if (k->ediv != ediv ||
1261 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1262 continue;
1263
c9839a11 1264 return k;
75d262c2
VCG
1265 }
1266
1267 return NULL;
1268}
1269EXPORT_SYMBOL(hci_find_ltk);
1270
c9839a11 1271struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1272 u8 addr_type)
75d262c2 1273{
c9839a11 1274 struct smp_ltk *k;
75d262c2 1275
c9839a11
VCG
1276 list_for_each_entry(k, &hdev->long_term_keys, list)
1277 if (addr_type == k->bdaddr_type &&
1278 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1279 return k;
1280
1281 return NULL;
1282}
c9839a11 1283EXPORT_SYMBOL(hci_find_ltk_by_addr);
75d262c2 1284
d25e28ab 1285int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1286 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1287{
1288 struct link_key *key, *old_key;
4df378a1 1289 u8 old_key_type, persistent;
55ed8ca1
JH
1290
1291 old_key = hci_find_link_key(hdev, bdaddr);
1292 if (old_key) {
1293 old_key_type = old_key->type;
1294 key = old_key;
1295 } else {
12adcf3a 1296 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1297 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1298 if (!key)
1299 return -ENOMEM;
1300 list_add(&key->list, &hdev->link_keys);
1301 }
1302
1303 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1304
d25e28ab
JH
1305 /* Some buggy controller combinations generate a changed
1306 * combination key for legacy pairing even when there's no
1307 * previous key */
1308 if (type == HCI_LK_CHANGED_COMBINATION &&
1309 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1310 old_key_type == 0xff) {
d25e28ab 1311 type = HCI_LK_COMBINATION;
655fe6ec
JH
1312 if (conn)
1313 conn->key_type = type;
1314 }
d25e28ab 1315
55ed8ca1
JH
1316 bacpy(&key->bdaddr, bdaddr);
1317 memcpy(key->val, val, 16);
55ed8ca1
JH
1318 key->pin_len = pin_len;
1319
b6020ba0 1320 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1321 key->type = old_key_type;
4748fed2
JH
1322 else
1323 key->type = type;
1324
4df378a1
JH
1325 if (!new_key)
1326 return 0;
1327
1328 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1329
744cf19e 1330 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1331
1332 if (!persistent) {
1333 list_del(&key->list);
1334 kfree(key);
1335 }
55ed8ca1
JH
1336
1337 return 0;
1338}
1339
c9839a11 1340int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
04124681
GP
1341 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
1342 ediv, u8 rand[8])
75d262c2 1343{
c9839a11 1344 struct smp_ltk *key, *old_key;
75d262c2 1345
c9839a11
VCG
1346 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1347 return 0;
75d262c2 1348
c9839a11
VCG
1349 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1350 if (old_key)
75d262c2 1351 key = old_key;
c9839a11
VCG
1352 else {
1353 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1354 if (!key)
1355 return -ENOMEM;
c9839a11 1356 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1357 }
1358
75d262c2 1359 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1360 key->bdaddr_type = addr_type;
1361 memcpy(key->val, tk, sizeof(key->val));
1362 key->authenticated = authenticated;
1363 key->ediv = ediv;
1364 key->enc_size = enc_size;
1365 key->type = type;
1366 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1367
c9839a11
VCG
1368 if (!new_key)
1369 return 0;
75d262c2 1370
261cc5aa
VCG
1371 if (type & HCI_SMP_LTK)
1372 mgmt_new_ltk(hdev, key, 1);
1373
75d262c2
VCG
1374 return 0;
1375}
1376
55ed8ca1
JH
1377int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1378{
1379 struct link_key *key;
1380
1381 key = hci_find_link_key(hdev, bdaddr);
1382 if (!key)
1383 return -ENOENT;
1384
1385 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1386
1387 list_del(&key->list);
1388 kfree(key);
1389
1390 return 0;
1391}
1392
b899efaf
VCG
1393int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1394{
1395 struct smp_ltk *k, *tmp;
1396
1397 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1398 if (bacmp(bdaddr, &k->bdaddr))
1399 continue;
1400
1401 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1402
1403 list_del(&k->list);
1404 kfree(k);
1405 }
1406
1407 return 0;
1408}
1409
6bd32326
VT
1410/* HCI command timer function */
1411static void hci_cmd_timer(unsigned long arg)
1412{
1413 struct hci_dev *hdev = (void *) arg;
1414
1415 BT_ERR("%s command tx timeout", hdev->name);
1416 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1417 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1418}
1419
2763eda6 1420struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1421 bdaddr_t *bdaddr)
2763eda6
SJ
1422{
1423 struct oob_data *data;
1424
1425 list_for_each_entry(data, &hdev->remote_oob_data, list)
1426 if (bacmp(bdaddr, &data->bdaddr) == 0)
1427 return data;
1428
1429 return NULL;
1430}
1431
1432int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1433{
1434 struct oob_data *data;
1435
1436 data = hci_find_remote_oob_data(hdev, bdaddr);
1437 if (!data)
1438 return -ENOENT;
1439
1440 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1441
1442 list_del(&data->list);
1443 kfree(data);
1444
1445 return 0;
1446}
1447
1448int hci_remote_oob_data_clear(struct hci_dev *hdev)
1449{
1450 struct oob_data *data, *n;
1451
1452 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1453 list_del(&data->list);
1454 kfree(data);
1455 }
1456
1457 return 0;
1458}
1459
1460int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1461 u8 *randomizer)
2763eda6
SJ
1462{
1463 struct oob_data *data;
1464
1465 data = hci_find_remote_oob_data(hdev, bdaddr);
1466
1467 if (!data) {
1468 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1469 if (!data)
1470 return -ENOMEM;
1471
1472 bacpy(&data->bdaddr, bdaddr);
1473 list_add(&data->list, &hdev->remote_oob_data);
1474 }
1475
1476 memcpy(data->hash, hash, sizeof(data->hash));
1477 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1478
1479 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1480
1481 return 0;
1482}
1483
04124681 1484struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1485{
8035ded4 1486 struct bdaddr_list *b;
b2a66aad 1487
8035ded4 1488 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1489 if (bacmp(bdaddr, &b->bdaddr) == 0)
1490 return b;
b2a66aad
AJ
1491
1492 return NULL;
1493}
1494
1495int hci_blacklist_clear(struct hci_dev *hdev)
1496{
1497 struct list_head *p, *n;
1498
1499 list_for_each_safe(p, n, &hdev->blacklist) {
1500 struct bdaddr_list *b;
1501
1502 b = list_entry(p, struct bdaddr_list, list);
1503
1504 list_del(p);
1505 kfree(b);
1506 }
1507
1508 return 0;
1509}
1510
88c1fe4b 1511int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1512{
1513 struct bdaddr_list *entry;
b2a66aad
AJ
1514
1515 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1516 return -EBADF;
1517
5e762444
AJ
1518 if (hci_blacklist_lookup(hdev, bdaddr))
1519 return -EEXIST;
b2a66aad
AJ
1520
1521 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1522 if (!entry)
1523 return -ENOMEM;
b2a66aad
AJ
1524
1525 bacpy(&entry->bdaddr, bdaddr);
1526
1527 list_add(&entry->list, &hdev->blacklist);
1528
88c1fe4b 1529 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1530}
1531
88c1fe4b 1532int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1533{
1534 struct bdaddr_list *entry;
b2a66aad 1535
1ec918ce 1536 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1537 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1538
1539 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1540 if (!entry)
5e762444 1541 return -ENOENT;
b2a66aad
AJ
1542
1543 list_del(&entry->list);
1544 kfree(entry);
1545
88c1fe4b 1546 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1547}
1548
db323f2f 1549static void hci_clear_adv_cache(struct work_struct *work)
35815085 1550{
db323f2f 1551 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1552 adv_work.work);
35815085
AG
1553
1554 hci_dev_lock(hdev);
1555
1556 hci_adv_entries_clear(hdev);
1557
1558 hci_dev_unlock(hdev);
1559}
1560
76c8686f
AG
1561int hci_adv_entries_clear(struct hci_dev *hdev)
1562{
1563 struct adv_entry *entry, *tmp;
1564
1565 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1566 list_del(&entry->list);
1567 kfree(entry);
1568 }
1569
1570 BT_DBG("%s adv cache cleared", hdev->name);
1571
1572 return 0;
1573}
1574
1575struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1576{
1577 struct adv_entry *entry;
1578
1579 list_for_each_entry(entry, &hdev->adv_entries, list)
1580 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1581 return entry;
1582
1583 return NULL;
1584}
1585
1586static inline int is_connectable_adv(u8 evt_type)
1587{
1588 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1589 return 1;
1590
1591 return 0;
1592}
1593
1594int hci_add_adv_entry(struct hci_dev *hdev,
04124681 1595 struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
76c8686f
AG
1596 return -EINVAL;
1597
1598 /* Only new entries should be added to adv_entries. So, if
1599 * bdaddr was found, don't add it. */
1600 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1601 return 0;
1602
4777bfde 1603 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
76c8686f
AG
1604 if (!entry)
1605 return -ENOMEM;
1606
1607 bacpy(&entry->bdaddr, &ev->bdaddr);
1608 entry->bdaddr_type = ev->bdaddr_type;
1609
1610 list_add(&entry->list, &hdev->adv_entries);
1611
1612 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1613 batostr(&entry->bdaddr), entry->bdaddr_type);
1614
1615 return 0;
1616}
1617
7ba8b4be
AG
1618static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1619{
1620 struct le_scan_params *param = (struct le_scan_params *) opt;
1621 struct hci_cp_le_set_scan_param cp;
1622
1623 memset(&cp, 0, sizeof(cp));
1624 cp.type = param->type;
1625 cp.interval = cpu_to_le16(param->interval);
1626 cp.window = cpu_to_le16(param->window);
1627
1628 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1629}
1630
1631static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1632{
1633 struct hci_cp_le_set_scan_enable cp;
1634
1635 memset(&cp, 0, sizeof(cp));
1636 cp.enable = 1;
1637
1638 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1639}
1640
1641static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1642 u16 window, int timeout)
7ba8b4be
AG
1643{
1644 long timeo = msecs_to_jiffies(3000);
1645 struct le_scan_params param;
1646 int err;
1647
1648 BT_DBG("%s", hdev->name);
1649
1650 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1651 return -EINPROGRESS;
1652
1653 param.type = type;
1654 param.interval = interval;
1655 param.window = window;
1656
1657 hci_req_lock(hdev);
1658
1659 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
04124681 1660 timeo);
7ba8b4be
AG
1661 if (!err)
1662 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1663
1664 hci_req_unlock(hdev);
1665
1666 if (err < 0)
1667 return err;
1668
1669 schedule_delayed_work(&hdev->le_scan_disable,
04124681 1670 msecs_to_jiffies(timeout));
7ba8b4be
AG
1671
1672 return 0;
1673}
1674
1675static void le_scan_disable_work(struct work_struct *work)
1676{
1677 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1678 le_scan_disable.work);
7ba8b4be
AG
1679 struct hci_cp_le_set_scan_enable cp;
1680
1681 BT_DBG("%s", hdev->name);
1682
1683 memset(&cp, 0, sizeof(cp));
1684
1685 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1686}
1687
28b75a89
AG
1688static void le_scan_work(struct work_struct *work)
1689{
1690 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1691 struct le_scan_params *param = &hdev->le_scan_params;
1692
1693 BT_DBG("%s", hdev->name);
1694
04124681
GP
1695 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1696 param->timeout);
28b75a89
AG
1697}
1698
1699int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1700 int timeout)
28b75a89
AG
1701{
1702 struct le_scan_params *param = &hdev->le_scan_params;
1703
1704 BT_DBG("%s", hdev->name);
1705
1706 if (work_busy(&hdev->le_scan))
1707 return -EINPROGRESS;
1708
1709 param->type = type;
1710 param->interval = interval;
1711 param->window = window;
1712 param->timeout = timeout;
1713
1714 queue_work(system_long_wq, &hdev->le_scan);
1715
1716 return 0;
1717}
1718
1da177e4
LT
1719/* Register HCI device */
1720int hci_register_dev(struct hci_dev *hdev)
1721{
1722 struct list_head *head = &hci_dev_list, *p;
08add513 1723 int i, id, error;
1da177e4 1724
e9b9cfa1 1725 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1726
010666a1 1727 if (!hdev->open || !hdev->close)
1da177e4
LT
1728 return -EINVAL;
1729
08add513
MM
1730 /* Do not allow HCI_AMP devices to register at index 0,
1731 * so the index can be used as the AMP controller ID.
1732 */
1733 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1734
f20d09d5 1735 write_lock(&hci_dev_list_lock);
1da177e4
LT
1736
1737 /* Find first available device id */
1738 list_for_each(p, &hci_dev_list) {
1739 if (list_entry(p, struct hci_dev, list)->id != id)
1740 break;
1741 head = p; id++;
1742 }
8e87d142 1743
1da177e4
LT
1744 sprintf(hdev->name, "hci%d", id);
1745 hdev->id = id;
c6feeb28 1746 list_add_tail(&hdev->list, head);
1da177e4 1747
09fd0de5 1748 mutex_init(&hdev->lock);
1da177e4
LT
1749
1750 hdev->flags = 0;
d23264a8 1751 hdev->dev_flags = 0;
1da177e4 1752 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1753 hdev->esco_type = (ESCO_HV1);
1da177e4 1754 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1755 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1756
04837f64
MH
1757 hdev->idle_timeout = 0;
1758 hdev->sniff_max_interval = 800;
1759 hdev->sniff_min_interval = 80;
1760
b78752cc 1761 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1762 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1763 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1764
1da177e4
LT
1765
1766 skb_queue_head_init(&hdev->rx_q);
1767 skb_queue_head_init(&hdev->cmd_q);
1768 skb_queue_head_init(&hdev->raw_q);
1769
6bd32326
VT
1770 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1771
cd4c5391 1772 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1773 hdev->reassembly[i] = NULL;
1774
1da177e4 1775 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1776 mutex_init(&hdev->req_lock);
1da177e4 1777
30883512 1778 discovery_init(hdev);
1da177e4
LT
1779
1780 hci_conn_hash_init(hdev);
1781
2e58ef3e
JH
1782 INIT_LIST_HEAD(&hdev->mgmt_pending);
1783
ea4bd8ba 1784 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1785
2aeb9a1a
JH
1786 INIT_LIST_HEAD(&hdev->uuids);
1787
55ed8ca1 1788 INIT_LIST_HEAD(&hdev->link_keys);
b899efaf 1789 INIT_LIST_HEAD(&hdev->long_term_keys);
55ed8ca1 1790
2763eda6
SJ
1791 INIT_LIST_HEAD(&hdev->remote_oob_data);
1792
76c8686f
AG
1793 INIT_LIST_HEAD(&hdev->adv_entries);
1794
db323f2f 1795 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1796 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1797 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1798
16ab91ab
JH
1799 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1800
1da177e4
LT
1801 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1802
1803 atomic_set(&hdev->promisc, 0);
1804
28b75a89
AG
1805 INIT_WORK(&hdev->le_scan, le_scan_work);
1806
7ba8b4be
AG
1807 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1808
f20d09d5 1809 write_unlock(&hci_dev_list_lock);
1da177e4 1810
32845eb1
GP
1811 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1812 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1813 if (!hdev->workqueue) {
1814 error = -ENOMEM;
1815 goto err;
1816 }
f48fd9c8 1817
33ca954d
DH
1818 error = hci_add_sysfs(hdev);
1819 if (error < 0)
1820 goto err_wqueue;
1da177e4 1821
611b30f7
MH
1822 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1823 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1824 if (hdev->rfkill) {
1825 if (rfkill_register(hdev->rfkill) < 0) {
1826 rfkill_destroy(hdev->rfkill);
1827 hdev->rfkill = NULL;
1828 }
1829 }
1830
a8b2d5c2
JH
1831 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1832 set_bit(HCI_SETUP, &hdev->dev_flags);
7f971041 1833 schedule_work(&hdev->power_on);
ab81cbf9 1834
1da177e4 1835 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1836 hci_dev_hold(hdev);
1da177e4
LT
1837
1838 return id;
f48fd9c8 1839
33ca954d
DH
1840err_wqueue:
1841 destroy_workqueue(hdev->workqueue);
1842err:
f20d09d5 1843 write_lock(&hci_dev_list_lock);
f48fd9c8 1844 list_del(&hdev->list);
f20d09d5 1845 write_unlock(&hci_dev_list_lock);
f48fd9c8 1846
33ca954d 1847 return error;
1da177e4
LT
1848}
1849EXPORT_SYMBOL(hci_register_dev);
1850
1851/* Unregister HCI device */
59735631 1852void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1853{
ef222013
MH
1854 int i;
1855
c13854ce 1856 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1857
94324962
JH
1858 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1859
f20d09d5 1860 write_lock(&hci_dev_list_lock);
1da177e4 1861 list_del(&hdev->list);
f20d09d5 1862 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1863
1864 hci_dev_do_close(hdev);
1865
cd4c5391 1866 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1867 kfree_skb(hdev->reassembly[i]);
1868
ab81cbf9 1869 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8b2d5c2 1870 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1871 hci_dev_lock(hdev);
744cf19e 1872 mgmt_index_removed(hdev);
09fd0de5 1873 hci_dev_unlock(hdev);
56e5cb86 1874 }
ab81cbf9 1875
2e58ef3e
JH
1876 /* mgmt_index_removed should take care of emptying the
1877 * pending list */
1878 BUG_ON(!list_empty(&hdev->mgmt_pending));
1879
1da177e4
LT
1880 hci_notify(hdev, HCI_DEV_UNREG);
1881
611b30f7
MH
1882 if (hdev->rfkill) {
1883 rfkill_unregister(hdev->rfkill);
1884 rfkill_destroy(hdev->rfkill);
1885 }
1886
ce242970 1887 hci_del_sysfs(hdev);
147e2d59 1888
db323f2f 1889 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1890
f48fd9c8
MH
1891 destroy_workqueue(hdev->workqueue);
1892
09fd0de5 1893 hci_dev_lock(hdev);
e2e0cacb 1894 hci_blacklist_clear(hdev);
2aeb9a1a 1895 hci_uuids_clear(hdev);
55ed8ca1 1896 hci_link_keys_clear(hdev);
b899efaf 1897 hci_smp_ltks_clear(hdev);
2763eda6 1898 hci_remote_oob_data_clear(hdev);
76c8686f 1899 hci_adv_entries_clear(hdev);
09fd0de5 1900 hci_dev_unlock(hdev);
e2e0cacb 1901
dc946bd8 1902 hci_dev_put(hdev);
1da177e4
LT
1903}
1904EXPORT_SYMBOL(hci_unregister_dev);
1905
1906/* Suspend HCI device */
1907int hci_suspend_dev(struct hci_dev *hdev)
1908{
1909 hci_notify(hdev, HCI_DEV_SUSPEND);
1910 return 0;
1911}
1912EXPORT_SYMBOL(hci_suspend_dev);
1913
1914/* Resume HCI device */
1915int hci_resume_dev(struct hci_dev *hdev)
1916{
1917 hci_notify(hdev, HCI_DEV_RESUME);
1918 return 0;
1919}
1920EXPORT_SYMBOL(hci_resume_dev);
1921
76bca880
MH
1922/* Receive frame from HCI drivers */
1923int hci_recv_frame(struct sk_buff *skb)
1924{
1925 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1926 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1927 && !test_bit(HCI_INIT, &hdev->flags))) {
1928 kfree_skb(skb);
1929 return -ENXIO;
1930 }
1931
1932 /* Incomming skb */
1933 bt_cb(skb)->incoming = 1;
1934
1935 /* Time stamp */
1936 __net_timestamp(skb);
1937
76bca880 1938 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1939 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1940
76bca880
MH
1941 return 0;
1942}
1943EXPORT_SYMBOL(hci_recv_frame);
1944
33e882a5 1945static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1946 int count, __u8 index)
33e882a5
SS
1947{
1948 int len = 0;
1949 int hlen = 0;
1950 int remain = count;
1951 struct sk_buff *skb;
1952 struct bt_skb_cb *scb;
1953
1954 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1955 index >= NUM_REASSEMBLY)
1956 return -EILSEQ;
1957
1958 skb = hdev->reassembly[index];
1959
1960 if (!skb) {
1961 switch (type) {
1962 case HCI_ACLDATA_PKT:
1963 len = HCI_MAX_FRAME_SIZE;
1964 hlen = HCI_ACL_HDR_SIZE;
1965 break;
1966 case HCI_EVENT_PKT:
1967 len = HCI_MAX_EVENT_SIZE;
1968 hlen = HCI_EVENT_HDR_SIZE;
1969 break;
1970 case HCI_SCODATA_PKT:
1971 len = HCI_MAX_SCO_SIZE;
1972 hlen = HCI_SCO_HDR_SIZE;
1973 break;
1974 }
1975
1e429f38 1976 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1977 if (!skb)
1978 return -ENOMEM;
1979
1980 scb = (void *) skb->cb;
1981 scb->expect = hlen;
1982 scb->pkt_type = type;
1983
1984 skb->dev = (void *) hdev;
1985 hdev->reassembly[index] = skb;
1986 }
1987
1988 while (count) {
1989 scb = (void *) skb->cb;
89bb46d0 1990 len = min_t(uint, scb->expect, count);
33e882a5
SS
1991
1992 memcpy(skb_put(skb, len), data, len);
1993
1994 count -= len;
1995 data += len;
1996 scb->expect -= len;
1997 remain = count;
1998
1999 switch (type) {
2000 case HCI_EVENT_PKT:
2001 if (skb->len == HCI_EVENT_HDR_SIZE) {
2002 struct hci_event_hdr *h = hci_event_hdr(skb);
2003 scb->expect = h->plen;
2004
2005 if (skb_tailroom(skb) < scb->expect) {
2006 kfree_skb(skb);
2007 hdev->reassembly[index] = NULL;
2008 return -ENOMEM;
2009 }
2010 }
2011 break;
2012
2013 case HCI_ACLDATA_PKT:
2014 if (skb->len == HCI_ACL_HDR_SIZE) {
2015 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2016 scb->expect = __le16_to_cpu(h->dlen);
2017
2018 if (skb_tailroom(skb) < scb->expect) {
2019 kfree_skb(skb);
2020 hdev->reassembly[index] = NULL;
2021 return -ENOMEM;
2022 }
2023 }
2024 break;
2025
2026 case HCI_SCODATA_PKT:
2027 if (skb->len == HCI_SCO_HDR_SIZE) {
2028 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2029 scb->expect = h->dlen;
2030
2031 if (skb_tailroom(skb) < scb->expect) {
2032 kfree_skb(skb);
2033 hdev->reassembly[index] = NULL;
2034 return -ENOMEM;
2035 }
2036 }
2037 break;
2038 }
2039
2040 if (scb->expect == 0) {
2041 /* Complete frame */
2042
2043 bt_cb(skb)->pkt_type = type;
2044 hci_recv_frame(skb);
2045
2046 hdev->reassembly[index] = NULL;
2047 return remain;
2048 }
2049 }
2050
2051 return remain;
2052}
2053
ef222013
MH
2054int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2055{
f39a3c06
SS
2056 int rem = 0;
2057
ef222013
MH
2058 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2059 return -EILSEQ;
2060
da5f6c37 2061 while (count) {
1e429f38 2062 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2063 if (rem < 0)
2064 return rem;
ef222013 2065
f39a3c06
SS
2066 data += (count - rem);
2067 count = rem;
f81c6224 2068 }
ef222013 2069
f39a3c06 2070 return rem;
ef222013
MH
2071}
2072EXPORT_SYMBOL(hci_recv_fragment);
2073
99811510
SS
2074#define STREAM_REASSEMBLY 0
2075
2076int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2077{
2078 int type;
2079 int rem = 0;
2080
da5f6c37 2081 while (count) {
99811510
SS
2082 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2083
2084 if (!skb) {
2085 struct { char type; } *pkt;
2086
2087 /* Start of the frame */
2088 pkt = data;
2089 type = pkt->type;
2090
2091 data++;
2092 count--;
2093 } else
2094 type = bt_cb(skb)->pkt_type;
2095
1e429f38
GP
2096 rem = hci_reassembly(hdev, type, data, count,
2097 STREAM_REASSEMBLY);
99811510
SS
2098 if (rem < 0)
2099 return rem;
2100
2101 data += (count - rem);
2102 count = rem;
f81c6224 2103 }
99811510
SS
2104
2105 return rem;
2106}
2107EXPORT_SYMBOL(hci_recv_stream_fragment);
2108
1da177e4
LT
2109/* ---- Interface to upper protocols ---- */
2110
1da177e4
LT
2111int hci_register_cb(struct hci_cb *cb)
2112{
2113 BT_DBG("%p name %s", cb, cb->name);
2114
f20d09d5 2115 write_lock(&hci_cb_list_lock);
1da177e4 2116 list_add(&cb->list, &hci_cb_list);
f20d09d5 2117 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2118
2119 return 0;
2120}
2121EXPORT_SYMBOL(hci_register_cb);
2122
2123int hci_unregister_cb(struct hci_cb *cb)
2124{
2125 BT_DBG("%p name %s", cb, cb->name);
2126
f20d09d5 2127 write_lock(&hci_cb_list_lock);
1da177e4 2128 list_del(&cb->list);
f20d09d5 2129 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2130
2131 return 0;
2132}
2133EXPORT_SYMBOL(hci_unregister_cb);
2134
2135static int hci_send_frame(struct sk_buff *skb)
2136{
2137 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2138
2139 if (!hdev) {
2140 kfree_skb(skb);
2141 return -ENODEV;
2142 }
2143
0d48d939 2144 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2145
cd82e61c
MH
2146 /* Time stamp */
2147 __net_timestamp(skb);
1da177e4 2148
cd82e61c
MH
2149 /* Send copy to monitor */
2150 hci_send_to_monitor(hdev, skb);
2151
2152 if (atomic_read(&hdev->promisc)) {
2153 /* Send copy to the sockets */
470fe1b5 2154 hci_send_to_sock(hdev, skb);
1da177e4
LT
2155 }
2156
2157 /* Get rid of skb owner, prior to sending to the driver. */
2158 skb_orphan(skb);
2159
2160 return hdev->send(skb);
2161}
2162
2163/* Send HCI command */
a9de9248 2164int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2165{
2166 int len = HCI_COMMAND_HDR_SIZE + plen;
2167 struct hci_command_hdr *hdr;
2168 struct sk_buff *skb;
2169
a9de9248 2170 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2171
2172 skb = bt_skb_alloc(len, GFP_ATOMIC);
2173 if (!skb) {
ef222013 2174 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2175 return -ENOMEM;
2176 }
2177
2178 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2179 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2180 hdr->plen = plen;
2181
2182 if (plen)
2183 memcpy(skb_put(skb, plen), param, plen);
2184
2185 BT_DBG("skb len %d", skb->len);
2186
0d48d939 2187 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2188 skb->dev = (void *) hdev;
c78ae283 2189
a5040efa
JH
2190 if (test_bit(HCI_INIT, &hdev->flags))
2191 hdev->init_last_cmd = opcode;
2192
1da177e4 2193 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2194 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2195
2196 return 0;
2197}
1da177e4
LT
2198
2199/* Get data from the previously sent command */
a9de9248 2200void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2201{
2202 struct hci_command_hdr *hdr;
2203
2204 if (!hdev->sent_cmd)
2205 return NULL;
2206
2207 hdr = (void *) hdev->sent_cmd->data;
2208
a9de9248 2209 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2210 return NULL;
2211
a9de9248 2212 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
2213
2214 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2215}
2216
2217/* Send ACL data */
2218static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2219{
2220 struct hci_acl_hdr *hdr;
2221 int len = skb->len;
2222
badff6d0
ACM
2223 skb_push(skb, HCI_ACL_HDR_SIZE);
2224 skb_reset_transport_header(skb);
9c70220b 2225 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2226 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2227 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2228}
2229
73d80deb
LAD
2230static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2231 struct sk_buff *skb, __u16 flags)
1da177e4
LT
2232{
2233 struct hci_dev *hdev = conn->hdev;
2234 struct sk_buff *list;
2235
70f23020
AE
2236 list = skb_shinfo(skb)->frag_list;
2237 if (!list) {
1da177e4
LT
2238 /* Non fragmented */
2239 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2240
73d80deb 2241 skb_queue_tail(queue, skb);
1da177e4
LT
2242 } else {
2243 /* Fragmented */
2244 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2245
2246 skb_shinfo(skb)->frag_list = NULL;
2247
2248 /* Queue all fragments atomically */
af3e6359 2249 spin_lock(&queue->lock);
1da177e4 2250
73d80deb 2251 __skb_queue_tail(queue, skb);
e702112f
AE
2252
2253 flags &= ~ACL_START;
2254 flags |= ACL_CONT;
1da177e4
LT
2255 do {
2256 skb = list; list = list->next;
8e87d142 2257
1da177e4 2258 skb->dev = (void *) hdev;
0d48d939 2259 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2260 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2261
2262 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2263
73d80deb 2264 __skb_queue_tail(queue, skb);
1da177e4
LT
2265 } while (list);
2266
af3e6359 2267 spin_unlock(&queue->lock);
1da177e4 2268 }
73d80deb
LAD
2269}
2270
2271void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2272{
2273 struct hci_conn *conn = chan->conn;
2274 struct hci_dev *hdev = conn->hdev;
2275
2276 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2277
2278 skb->dev = (void *) hdev;
2279 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2280 hci_add_acl_hdr(skb, conn->handle, flags);
2281
2282 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2283
3eff45ea 2284 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2285}
2286EXPORT_SYMBOL(hci_send_acl);
2287
2288/* Send SCO data */
0d861d8b 2289void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2290{
2291 struct hci_dev *hdev = conn->hdev;
2292 struct hci_sco_hdr hdr;
2293
2294 BT_DBG("%s len %d", hdev->name, skb->len);
2295
aca3192c 2296 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2297 hdr.dlen = skb->len;
2298
badff6d0
ACM
2299 skb_push(skb, HCI_SCO_HDR_SIZE);
2300 skb_reset_transport_header(skb);
9c70220b 2301 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2302
2303 skb->dev = (void *) hdev;
0d48d939 2304 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2305
1da177e4 2306 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2307 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2308}
2309EXPORT_SYMBOL(hci_send_sco);
2310
2311/* ---- HCI TX task (outgoing data) ---- */
2312
2313/* HCI Connection scheduler */
2314static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2315{
2316 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2317 struct hci_conn *conn = NULL, *c;
1da177e4 2318 int num = 0, min = ~0;
1da177e4 2319
8e87d142 2320 /* We don't have to lock device here. Connections are always
1da177e4 2321 * added and removed with TX task disabled. */
bf4c6325
GP
2322
2323 rcu_read_lock();
2324
2325 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2326 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2327 continue;
769be974
MH
2328
2329 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2330 continue;
2331
1da177e4
LT
2332 num++;
2333
2334 if (c->sent < min) {
2335 min = c->sent;
2336 conn = c;
2337 }
52087a79
LAD
2338
2339 if (hci_conn_num(hdev, type) == num)
2340 break;
1da177e4
LT
2341 }
2342
bf4c6325
GP
2343 rcu_read_unlock();
2344
1da177e4 2345 if (conn) {
6ed58ec5
VT
2346 int cnt, q;
2347
2348 switch (conn->type) {
2349 case ACL_LINK:
2350 cnt = hdev->acl_cnt;
2351 break;
2352 case SCO_LINK:
2353 case ESCO_LINK:
2354 cnt = hdev->sco_cnt;
2355 break;
2356 case LE_LINK:
2357 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2358 break;
2359 default:
2360 cnt = 0;
2361 BT_ERR("Unknown link type");
2362 }
2363
2364 q = cnt / num;
1da177e4
LT
2365 *quote = q ? q : 1;
2366 } else
2367 *quote = 0;
2368
2369 BT_DBG("conn %p quote %d", conn, *quote);
2370 return conn;
2371}
2372
bae1f5d9 2373static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2374{
2375 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2376 struct hci_conn *c;
1da177e4 2377
bae1f5d9 2378 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2379
bf4c6325
GP
2380 rcu_read_lock();
2381
1da177e4 2382 /* Kill stalled connections */
bf4c6325 2383 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2384 if (c->type == type && c->sent) {
2385 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2386 hdev->name, batostr(&c->dst));
2387 hci_acl_disconn(c, 0x13);
2388 }
2389 }
bf4c6325
GP
2390
2391 rcu_read_unlock();
1da177e4
LT
2392}
2393
73d80deb
LAD
2394static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2395 int *quote)
1da177e4 2396{
73d80deb
LAD
2397 struct hci_conn_hash *h = &hdev->conn_hash;
2398 struct hci_chan *chan = NULL;
2399 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2400 struct hci_conn *conn;
73d80deb
LAD
2401 int cnt, q, conn_num = 0;
2402
2403 BT_DBG("%s", hdev->name);
2404
bf4c6325
GP
2405 rcu_read_lock();
2406
2407 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2408 struct hci_chan *tmp;
2409
2410 if (conn->type != type)
2411 continue;
2412
2413 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2414 continue;
2415
2416 conn_num++;
2417
8192edef 2418 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2419 struct sk_buff *skb;
2420
2421 if (skb_queue_empty(&tmp->data_q))
2422 continue;
2423
2424 skb = skb_peek(&tmp->data_q);
2425 if (skb->priority < cur_prio)
2426 continue;
2427
2428 if (skb->priority > cur_prio) {
2429 num = 0;
2430 min = ~0;
2431 cur_prio = skb->priority;
2432 }
2433
2434 num++;
2435
2436 if (conn->sent < min) {
2437 min = conn->sent;
2438 chan = tmp;
2439 }
2440 }
2441
2442 if (hci_conn_num(hdev, type) == conn_num)
2443 break;
2444 }
2445
bf4c6325
GP
2446 rcu_read_unlock();
2447
73d80deb
LAD
2448 if (!chan)
2449 return NULL;
2450
2451 switch (chan->conn->type) {
2452 case ACL_LINK:
2453 cnt = hdev->acl_cnt;
2454 break;
2455 case SCO_LINK:
2456 case ESCO_LINK:
2457 cnt = hdev->sco_cnt;
2458 break;
2459 case LE_LINK:
2460 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2461 break;
2462 default:
2463 cnt = 0;
2464 BT_ERR("Unknown link type");
2465 }
2466
2467 q = cnt / num;
2468 *quote = q ? q : 1;
2469 BT_DBG("chan %p quote %d", chan, *quote);
2470 return chan;
2471}
2472
02b20f0b
LAD
2473static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2474{
2475 struct hci_conn_hash *h = &hdev->conn_hash;
2476 struct hci_conn *conn;
2477 int num = 0;
2478
2479 BT_DBG("%s", hdev->name);
2480
bf4c6325
GP
2481 rcu_read_lock();
2482
2483 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2484 struct hci_chan *chan;
2485
2486 if (conn->type != type)
2487 continue;
2488
2489 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2490 continue;
2491
2492 num++;
2493
8192edef 2494 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2495 struct sk_buff *skb;
2496
2497 if (chan->sent) {
2498 chan->sent = 0;
2499 continue;
2500 }
2501
2502 if (skb_queue_empty(&chan->data_q))
2503 continue;
2504
2505 skb = skb_peek(&chan->data_q);
2506 if (skb->priority >= HCI_PRIO_MAX - 1)
2507 continue;
2508
2509 skb->priority = HCI_PRIO_MAX - 1;
2510
2511 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2512 skb->priority);
2513 }
2514
2515 if (hci_conn_num(hdev, type) == num)
2516 break;
2517 }
bf4c6325
GP
2518
2519 rcu_read_unlock();
2520
02b20f0b
LAD
2521}
2522
b71d385a
AE
2523static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2524{
2525 /* Calculate count of blocks used by this packet */
2526 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2527}
2528
63d2bc1b 2529static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2530{
1da177e4
LT
2531 if (!test_bit(HCI_RAW, &hdev->flags)) {
2532 /* ACL tx timeout must be longer than maximum
2533 * link supervision timeout (40.9 seconds) */
63d2bc1b 2534 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
cc48dc0a 2535 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
bae1f5d9 2536 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2537 }
63d2bc1b 2538}
1da177e4 2539
63d2bc1b
AE
2540static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2541{
2542 unsigned int cnt = hdev->acl_cnt;
2543 struct hci_chan *chan;
2544 struct sk_buff *skb;
2545 int quote;
2546
2547 __check_timeout(hdev, cnt);
04837f64 2548
73d80deb
LAD
2549 while (hdev->acl_cnt &&
2550 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2551 u32 priority = (skb_peek(&chan->data_q))->priority;
2552 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2553 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2554 skb->len, skb->priority);
2555
ec1cce24
LAD
2556 /* Stop if priority has changed */
2557 if (skb->priority < priority)
2558 break;
2559
2560 skb = skb_dequeue(&chan->data_q);
2561
73d80deb 2562 hci_conn_enter_active_mode(chan->conn,
04124681 2563 bt_cb(skb)->force_active);
04837f64 2564
1da177e4
LT
2565 hci_send_frame(skb);
2566 hdev->acl_last_tx = jiffies;
2567
2568 hdev->acl_cnt--;
73d80deb
LAD
2569 chan->sent++;
2570 chan->conn->sent++;
1da177e4
LT
2571 }
2572 }
02b20f0b
LAD
2573
2574 if (cnt != hdev->acl_cnt)
2575 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2576}
2577
b71d385a
AE
2578static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2579{
63d2bc1b 2580 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2581 struct hci_chan *chan;
2582 struct sk_buff *skb;
2583 int quote;
b71d385a 2584
63d2bc1b 2585 __check_timeout(hdev, cnt);
b71d385a
AE
2586
2587 while (hdev->block_cnt > 0 &&
2588 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2589 u32 priority = (skb_peek(&chan->data_q))->priority;
2590 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2591 int blocks;
2592
2593 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2594 skb->len, skb->priority);
2595
2596 /* Stop if priority has changed */
2597 if (skb->priority < priority)
2598 break;
2599
2600 skb = skb_dequeue(&chan->data_q);
2601
2602 blocks = __get_blocks(hdev, skb);
2603 if (blocks > hdev->block_cnt)
2604 return;
2605
2606 hci_conn_enter_active_mode(chan->conn,
2607 bt_cb(skb)->force_active);
2608
2609 hci_send_frame(skb);
2610 hdev->acl_last_tx = jiffies;
2611
2612 hdev->block_cnt -= blocks;
2613 quote -= blocks;
2614
2615 chan->sent += blocks;
2616 chan->conn->sent += blocks;
2617 }
2618 }
2619
2620 if (cnt != hdev->block_cnt)
2621 hci_prio_recalculate(hdev, ACL_LINK);
2622}
2623
2624static inline void hci_sched_acl(struct hci_dev *hdev)
2625{
2626 BT_DBG("%s", hdev->name);
2627
2628 if (!hci_conn_num(hdev, ACL_LINK))
2629 return;
2630
2631 switch (hdev->flow_ctl_mode) {
2632 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2633 hci_sched_acl_pkt(hdev);
2634 break;
2635
2636 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2637 hci_sched_acl_blk(hdev);
2638 break;
2639 }
2640}
2641
1da177e4
LT
2642/* Schedule SCO */
2643static inline void hci_sched_sco(struct hci_dev *hdev)
2644{
2645 struct hci_conn *conn;
2646 struct sk_buff *skb;
2647 int quote;
2648
2649 BT_DBG("%s", hdev->name);
2650
52087a79
LAD
2651 if (!hci_conn_num(hdev, SCO_LINK))
2652 return;
2653
1da177e4
LT
2654 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2655 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2656 BT_DBG("skb %p len %d", skb, skb->len);
2657 hci_send_frame(skb);
2658
2659 conn->sent++;
2660 if (conn->sent == ~0)
2661 conn->sent = 0;
2662 }
2663 }
2664}
2665
b6a0dc82
MH
2666static inline void hci_sched_esco(struct hci_dev *hdev)
2667{
2668 struct hci_conn *conn;
2669 struct sk_buff *skb;
2670 int quote;
2671
2672 BT_DBG("%s", hdev->name);
2673
52087a79
LAD
2674 if (!hci_conn_num(hdev, ESCO_LINK))
2675 return;
2676
b6a0dc82
MH
2677 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2678 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2679 BT_DBG("skb %p len %d", skb, skb->len);
2680 hci_send_frame(skb);
2681
2682 conn->sent++;
2683 if (conn->sent == ~0)
2684 conn->sent = 0;
2685 }
2686 }
2687}
2688
6ed58ec5
VT
2689static inline void hci_sched_le(struct hci_dev *hdev)
2690{
73d80deb 2691 struct hci_chan *chan;
6ed58ec5 2692 struct sk_buff *skb;
02b20f0b 2693 int quote, cnt, tmp;
6ed58ec5
VT
2694
2695 BT_DBG("%s", hdev->name);
2696
52087a79
LAD
2697 if (!hci_conn_num(hdev, LE_LINK))
2698 return;
2699
6ed58ec5
VT
2700 if (!test_bit(HCI_RAW, &hdev->flags)) {
2701 /* LE tx timeout must be longer than maximum
2702 * link supervision timeout (40.9 seconds) */
bae1f5d9 2703 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2704 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2705 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2706 }
2707
2708 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2709 tmp = cnt;
73d80deb 2710 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2711 u32 priority = (skb_peek(&chan->data_q))->priority;
2712 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2713 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2714 skb->len, skb->priority);
6ed58ec5 2715
ec1cce24
LAD
2716 /* Stop if priority has changed */
2717 if (skb->priority < priority)
2718 break;
2719
2720 skb = skb_dequeue(&chan->data_q);
2721
6ed58ec5
VT
2722 hci_send_frame(skb);
2723 hdev->le_last_tx = jiffies;
2724
2725 cnt--;
73d80deb
LAD
2726 chan->sent++;
2727 chan->conn->sent++;
6ed58ec5
VT
2728 }
2729 }
73d80deb 2730
6ed58ec5
VT
2731 if (hdev->le_pkts)
2732 hdev->le_cnt = cnt;
2733 else
2734 hdev->acl_cnt = cnt;
02b20f0b
LAD
2735
2736 if (cnt != tmp)
2737 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2738}
2739
3eff45ea 2740static void hci_tx_work(struct work_struct *work)
1da177e4 2741{
3eff45ea 2742 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2743 struct sk_buff *skb;
2744
6ed58ec5
VT
2745 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2746 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2747
2748 /* Schedule queues and send stuff to HCI driver */
2749
2750 hci_sched_acl(hdev);
2751
2752 hci_sched_sco(hdev);
2753
b6a0dc82
MH
2754 hci_sched_esco(hdev);
2755
6ed58ec5
VT
2756 hci_sched_le(hdev);
2757
1da177e4
LT
2758 /* Send next queued raw (unknown type) packet */
2759 while ((skb = skb_dequeue(&hdev->raw_q)))
2760 hci_send_frame(skb);
1da177e4
LT
2761}
2762
25985edc 2763/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2764
2765/* ACL data packet */
2766static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2767{
2768 struct hci_acl_hdr *hdr = (void *) skb->data;
2769 struct hci_conn *conn;
2770 __u16 handle, flags;
2771
2772 skb_pull(skb, HCI_ACL_HDR_SIZE);
2773
2774 handle = __le16_to_cpu(hdr->handle);
2775 flags = hci_flags(handle);
2776 handle = hci_handle(handle);
2777
2778 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2779
2780 hdev->stat.acl_rx++;
2781
2782 hci_dev_lock(hdev);
2783 conn = hci_conn_hash_lookup_handle(hdev, handle);
2784 hci_dev_unlock(hdev);
8e87d142 2785
1da177e4 2786 if (conn) {
65983fc7 2787 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2788
1da177e4 2789 /* Send to upper protocol */
686ebf28
UF
2790 l2cap_recv_acldata(conn, skb, flags);
2791 return;
1da177e4 2792 } else {
8e87d142 2793 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2794 hdev->name, handle);
2795 }
2796
2797 kfree_skb(skb);
2798}
2799
2800/* SCO data packet */
2801static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2802{
2803 struct hci_sco_hdr *hdr = (void *) skb->data;
2804 struct hci_conn *conn;
2805 __u16 handle;
2806
2807 skb_pull(skb, HCI_SCO_HDR_SIZE);
2808
2809 handle = __le16_to_cpu(hdr->handle);
2810
2811 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2812
2813 hdev->stat.sco_rx++;
2814
2815 hci_dev_lock(hdev);
2816 conn = hci_conn_hash_lookup_handle(hdev, handle);
2817 hci_dev_unlock(hdev);
2818
2819 if (conn) {
1da177e4 2820 /* Send to upper protocol */
686ebf28
UF
2821 sco_recv_scodata(conn, skb);
2822 return;
1da177e4 2823 } else {
8e87d142 2824 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2825 hdev->name, handle);
2826 }
2827
2828 kfree_skb(skb);
2829}
2830
b78752cc 2831static void hci_rx_work(struct work_struct *work)
1da177e4 2832{
b78752cc 2833 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2834 struct sk_buff *skb;
2835
2836 BT_DBG("%s", hdev->name);
2837
1da177e4 2838 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
2839 /* Send copy to monitor */
2840 hci_send_to_monitor(hdev, skb);
2841
1da177e4
LT
2842 if (atomic_read(&hdev->promisc)) {
2843 /* Send copy to the sockets */
470fe1b5 2844 hci_send_to_sock(hdev, skb);
1da177e4
LT
2845 }
2846
2847 if (test_bit(HCI_RAW, &hdev->flags)) {
2848 kfree_skb(skb);
2849 continue;
2850 }
2851
2852 if (test_bit(HCI_INIT, &hdev->flags)) {
2853 /* Don't process data packets in this states. */
0d48d939 2854 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2855 case HCI_ACLDATA_PKT:
2856 case HCI_SCODATA_PKT:
2857 kfree_skb(skb);
2858 continue;
3ff50b79 2859 }
1da177e4
LT
2860 }
2861
2862 /* Process frame */
0d48d939 2863 switch (bt_cb(skb)->pkt_type) {
1da177e4 2864 case HCI_EVENT_PKT:
b78752cc 2865 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2866 hci_event_packet(hdev, skb);
2867 break;
2868
2869 case HCI_ACLDATA_PKT:
2870 BT_DBG("%s ACL data packet", hdev->name);
2871 hci_acldata_packet(hdev, skb);
2872 break;
2873
2874 case HCI_SCODATA_PKT:
2875 BT_DBG("%s SCO data packet", hdev->name);
2876 hci_scodata_packet(hdev, skb);
2877 break;
2878
2879 default:
2880 kfree_skb(skb);
2881 break;
2882 }
2883 }
1da177e4
LT
2884}
2885
c347b765 2886static void hci_cmd_work(struct work_struct *work)
1da177e4 2887{
c347b765 2888 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2889 struct sk_buff *skb;
2890
2891 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2892
1da177e4 2893 /* Send queued commands */
5a08ecce
AE
2894 if (atomic_read(&hdev->cmd_cnt)) {
2895 skb = skb_dequeue(&hdev->cmd_q);
2896 if (!skb)
2897 return;
2898
7585b97a 2899 kfree_skb(hdev->sent_cmd);
1da177e4 2900
70f23020
AE
2901 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2902 if (hdev->sent_cmd) {
1da177e4
LT
2903 atomic_dec(&hdev->cmd_cnt);
2904 hci_send_frame(skb);
7bdb8a5c
SJ
2905 if (test_bit(HCI_RESET, &hdev->flags))
2906 del_timer(&hdev->cmd_timer);
2907 else
2908 mod_timer(&hdev->cmd_timer,
6bd32326 2909 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2910 } else {
2911 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2912 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2913 }
2914 }
2915}
2519a1fc
AG
2916
2917int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2918{
2919 /* General inquiry access code (GIAC) */
2920 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2921 struct hci_cp_inquiry cp;
2922
2923 BT_DBG("%s", hdev->name);
2924
2925 if (test_bit(HCI_INQUIRY, &hdev->flags))
2926 return -EINPROGRESS;
2927
4663262c
JH
2928 inquiry_cache_flush(hdev);
2929
2519a1fc
AG
2930 memset(&cp, 0, sizeof(cp));
2931 memcpy(&cp.lap, lap, sizeof(cp.lap));
2932 cp.length = length;
2933
2934 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2935}
023d5049
AG
2936
2937int hci_cancel_inquiry(struct hci_dev *hdev)
2938{
2939 BT_DBG("%s", hdev->name);
2940
2941 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2942 return -EPERM;
2943
2944 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2945}