Bluetooth: Add HCI command structure for writing current IAC LAP
[linux-block.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
77a63e0a
FW
82static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
75e84b7c
JH
84{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
7b1abbbe
JH
107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
75e84b7c
JH
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
7b1abbbe 137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 138 const void *param, u8 event, u32 timeout)
75e84b7c
JH
139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
7b1abbbe 148 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
7b1abbbe
JH
187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 192 const void *param, u32 timeout)
7b1abbbe
JH
193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
1da177e4 198/* Execute request and wait for completion. */
01178cd4 199static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
200 void (*func)(struct hci_request *req,
201 unsigned long opt),
01178cd4 202 unsigned long opt, __u32 timeout)
1da177e4 203{
42c6b129 204 struct hci_request req;
1da177e4
LT
205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
42c6b129
JH
210 hci_req_init(&req, hdev);
211
1da177e4
LT
212 hdev->req_status = HCI_REQ_PEND;
213
42c6b129 214 func(&req, opt);
53cce22d 215
42c6b129
JH
216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
53cce22d 218 hdev->req_status = 0;
920c8300
AG
219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
42c6b129 224 */
920c8300
AG
225 if (err == -ENODATA)
226 return 0;
227
228 return err;
53cce22d
JH
229 }
230
bc4445c7
AG
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
1da177e4
LT
234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
e175072f 243 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
3ff50b79 253 }
1da177e4 254
a5040efa 255 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
01178cd4 262static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
263 void (*req)(struct hci_request *req,
264 unsigned long opt),
01178cd4 265 unsigned long opt, __u32 timeout)
1da177e4
LT
266{
267 int ret;
268
7c6a329e
MH
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
1da177e4
LT
272 /* Serialize all requests */
273 hci_req_lock(hdev);
01178cd4 274 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
42c6b129 280static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 281{
42c6b129 282 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
283
284 /* Reset device */
42c6b129
JH
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
287}
288
42c6b129 289static void bredr_init(struct hci_request *req)
1da177e4 290{
42c6b129 291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 292
1da177e4 293 /* Read Local Supported Features */
42c6b129 294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 295
1143e5a6 296 /* Read Local Version */
42c6b129 297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
298
299 /* Read BD Address */
42c6b129 300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
301}
302
42c6b129 303static void amp_init(struct hci_request *req)
e61ef499 304{
42c6b129 305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 306
e61ef499 307 /* Read Local Version */
42c6b129 308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 309
f6996cfe
MH
310 /* Read Local Supported Commands */
311 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
312
313 /* Read Local Supported Features */
314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
315
6bcbc489 316 /* Read Local AMP Info */
42c6b129 317 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
318
319 /* Read Data Blk size */
42c6b129 320 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 321
f38ba941
MH
322 /* Read Flow Control Mode */
323 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
324
7528ca1c
MH
325 /* Read Location Data */
326 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
327}
328
42c6b129 329static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 330{
42c6b129 331 struct hci_dev *hdev = req->hdev;
e61ef499
AE
332
333 BT_DBG("%s %ld", hdev->name, opt);
334
11778716
AE
335 /* Reset */
336 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 337 hci_reset_req(req, 0);
11778716 338
e61ef499
AE
339 switch (hdev->dev_type) {
340 case HCI_BREDR:
42c6b129 341 bredr_init(req);
e61ef499
AE
342 break;
343
344 case HCI_AMP:
42c6b129 345 amp_init(req);
e61ef499
AE
346 break;
347
348 default:
349 BT_ERR("Unknown device type %d", hdev->dev_type);
350 break;
351 }
e61ef499
AE
352}
353
42c6b129 354static void bredr_setup(struct hci_request *req)
2177bab5 355{
4ca048e3
MH
356 struct hci_dev *hdev = req->hdev;
357
2177bab5
JH
358 __le16 param;
359 __u8 flt_type;
360
361 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 362 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
363
364 /* Read Class of Device */
42c6b129 365 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
366
367 /* Read Local Name */
42c6b129 368 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
369
370 /* Read Voice Setting */
42c6b129 371 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 372
b4cb9fb2
MH
373 /* Read Number of Supported IAC */
374 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
375
4b836f39
MH
376 /* Read Current IAC LAP */
377 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
378
2177bab5
JH
379 /* Clear Event Filters */
380 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 381 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
382
383 /* Connection accept timeout ~20 secs */
384 param = __constant_cpu_to_le16(0x7d00);
42c6b129 385 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 386
4ca048e3
MH
387 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
388 * but it does not support page scan related HCI commands.
389 */
390 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
391 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
392 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
393 }
2177bab5
JH
394}
395
42c6b129 396static void le_setup(struct hci_request *req)
2177bab5 397{
c73eee91
JH
398 struct hci_dev *hdev = req->hdev;
399
2177bab5 400 /* Read LE Buffer Size */
42c6b129 401 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
402
403 /* Read LE Local Supported Features */
42c6b129 404 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
405
406 /* Read LE Advertising Channel TX Power */
42c6b129 407 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
408
409 /* Read LE White List Size */
42c6b129 410 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
411
412 /* Read LE Supported States */
42c6b129 413 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
414
415 /* LE-only controllers have LE implicitly enabled */
416 if (!lmp_bredr_capable(hdev))
417 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
418}
419
420static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
421{
422 if (lmp_ext_inq_capable(hdev))
423 return 0x02;
424
425 if (lmp_inq_rssi_capable(hdev))
426 return 0x01;
427
428 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
429 hdev->lmp_subver == 0x0757)
430 return 0x01;
431
432 if (hdev->manufacturer == 15) {
433 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
434 return 0x01;
435 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
436 return 0x01;
437 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
438 return 0x01;
439 }
440
441 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
442 hdev->lmp_subver == 0x1805)
443 return 0x01;
444
445 return 0x00;
446}
447
42c6b129 448static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
449{
450 u8 mode;
451
42c6b129 452 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 453
42c6b129 454 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
455}
456
42c6b129 457static void hci_setup_event_mask(struct hci_request *req)
2177bab5 458{
42c6b129
JH
459 struct hci_dev *hdev = req->hdev;
460
2177bab5
JH
461 /* The second byte is 0xff instead of 0x9f (two reserved bits
462 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
463 * command otherwise.
464 */
465 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
466
467 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
468 * any event mask for pre 1.2 devices.
469 */
470 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
471 return;
472
473 if (lmp_bredr_capable(hdev)) {
474 events[4] |= 0x01; /* Flow Specification Complete */
475 events[4] |= 0x02; /* Inquiry Result with RSSI */
476 events[4] |= 0x04; /* Read Remote Extended Features Complete */
477 events[5] |= 0x08; /* Synchronous Connection Complete */
478 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
479 } else {
480 /* Use a different default for LE-only devices */
481 memset(events, 0, sizeof(events));
482 events[0] |= 0x10; /* Disconnection Complete */
483 events[0] |= 0x80; /* Encryption Change */
484 events[1] |= 0x08; /* Read Remote Version Information Complete */
485 events[1] |= 0x20; /* Command Complete */
486 events[1] |= 0x40; /* Command Status */
487 events[1] |= 0x80; /* Hardware Error */
488 events[2] |= 0x04; /* Number of Completed Packets */
489 events[3] |= 0x02; /* Data Buffer Overflow */
490 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
491 }
492
493 if (lmp_inq_rssi_capable(hdev))
494 events[4] |= 0x02; /* Inquiry Result with RSSI */
495
496 if (lmp_sniffsubr_capable(hdev))
497 events[5] |= 0x20; /* Sniff Subrating */
498
499 if (lmp_pause_enc_capable(hdev))
500 events[5] |= 0x80; /* Encryption Key Refresh Complete */
501
502 if (lmp_ext_inq_capable(hdev))
503 events[5] |= 0x40; /* Extended Inquiry Result */
504
505 if (lmp_no_flush_capable(hdev))
506 events[7] |= 0x01; /* Enhanced Flush Complete */
507
508 if (lmp_lsto_capable(hdev))
509 events[6] |= 0x80; /* Link Supervision Timeout Changed */
510
511 if (lmp_ssp_capable(hdev)) {
512 events[6] |= 0x01; /* IO Capability Request */
513 events[6] |= 0x02; /* IO Capability Response */
514 events[6] |= 0x04; /* User Confirmation Request */
515 events[6] |= 0x08; /* User Passkey Request */
516 events[6] |= 0x10; /* Remote OOB Data Request */
517 events[6] |= 0x20; /* Simple Pairing Complete */
518 events[7] |= 0x04; /* User Passkey Notification */
519 events[7] |= 0x08; /* Keypress Notification */
520 events[7] |= 0x10; /* Remote Host Supported
521 * Features Notification
522 */
523 }
524
525 if (lmp_le_capable(hdev))
526 events[7] |= 0x20; /* LE Meta-Event */
527
42c6b129 528 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
529
530 if (lmp_le_capable(hdev)) {
531 memset(events, 0, sizeof(events));
532 events[0] = 0x1f;
42c6b129
JH
533 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
534 sizeof(events), events);
2177bab5
JH
535 }
536}
537
42c6b129 538static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 539{
42c6b129
JH
540 struct hci_dev *hdev = req->hdev;
541
2177bab5 542 if (lmp_bredr_capable(hdev))
42c6b129 543 bredr_setup(req);
56f87901
JH
544 else
545 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
546
547 if (lmp_le_capable(hdev))
42c6b129 548 le_setup(req);
2177bab5 549
42c6b129 550 hci_setup_event_mask(req);
2177bab5 551
3f8e2d75
JH
552 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
553 * local supported commands HCI command.
554 */
555 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 556 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
557
558 if (lmp_ssp_capable(hdev)) {
559 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
560 u8 mode = 0x01;
42c6b129
JH
561 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
562 sizeof(mode), &mode);
2177bab5
JH
563 } else {
564 struct hci_cp_write_eir cp;
565
566 memset(hdev->eir, 0, sizeof(hdev->eir));
567 memset(&cp, 0, sizeof(cp));
568
42c6b129 569 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
570 }
571 }
572
573 if (lmp_inq_rssi_capable(hdev))
42c6b129 574 hci_setup_inquiry_mode(req);
2177bab5
JH
575
576 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 577 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
578
579 if (lmp_ext_feat_capable(hdev)) {
580 struct hci_cp_read_local_ext_features cp;
581
582 cp.page = 0x01;
42c6b129
JH
583 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
584 sizeof(cp), &cp);
2177bab5
JH
585 }
586
587 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
588 u8 enable = 1;
42c6b129
JH
589 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
590 &enable);
2177bab5
JH
591 }
592}
593
42c6b129 594static void hci_setup_link_policy(struct hci_request *req)
2177bab5 595{
42c6b129 596 struct hci_dev *hdev = req->hdev;
2177bab5
JH
597 struct hci_cp_write_def_link_policy cp;
598 u16 link_policy = 0;
599
600 if (lmp_rswitch_capable(hdev))
601 link_policy |= HCI_LP_RSWITCH;
602 if (lmp_hold_capable(hdev))
603 link_policy |= HCI_LP_HOLD;
604 if (lmp_sniff_capable(hdev))
605 link_policy |= HCI_LP_SNIFF;
606 if (lmp_park_capable(hdev))
607 link_policy |= HCI_LP_PARK;
608
609 cp.policy = cpu_to_le16(link_policy);
42c6b129 610 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
611}
612
42c6b129 613static void hci_set_le_support(struct hci_request *req)
2177bab5 614{
42c6b129 615 struct hci_dev *hdev = req->hdev;
2177bab5
JH
616 struct hci_cp_write_le_host_supported cp;
617
c73eee91
JH
618 /* LE-only devices do not support explicit enablement */
619 if (!lmp_bredr_capable(hdev))
620 return;
621
2177bab5
JH
622 memset(&cp, 0, sizeof(cp));
623
624 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
625 cp.le = 0x01;
626 cp.simul = lmp_le_br_capable(hdev);
627 }
628
629 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
630 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
631 &cp);
2177bab5
JH
632}
633
d62e6d67
JH
634static void hci_set_event_mask_page_2(struct hci_request *req)
635{
636 struct hci_dev *hdev = req->hdev;
637 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
638
639 /* If Connectionless Slave Broadcast master role is supported
640 * enable all necessary events for it.
641 */
642 if (hdev->features[2][0] & 0x01) {
643 events[1] |= 0x40; /* Triggered Clock Capture */
644 events[1] |= 0x80; /* Synchronization Train Complete */
645 events[2] |= 0x10; /* Slave Page Response Timeout */
646 events[2] |= 0x20; /* CSB Channel Map Change */
647 }
648
649 /* If Connectionless Slave Broadcast slave role is supported
650 * enable all necessary events for it.
651 */
652 if (hdev->features[2][0] & 0x02) {
653 events[2] |= 0x01; /* Synchronization Train Received */
654 events[2] |= 0x02; /* CSB Receive */
655 events[2] |= 0x04; /* CSB Timeout */
656 events[2] |= 0x08; /* Truncated Page Complete */
657 }
658
659 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
660}
661
42c6b129 662static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 663{
42c6b129 664 struct hci_dev *hdev = req->hdev;
d2c5d77f 665 u8 p;
42c6b129 666
b8f4e068
GP
667 /* Some Broadcom based Bluetooth controllers do not support the
668 * Delete Stored Link Key command. They are clearly indicating its
669 * absence in the bit mask of supported commands.
670 *
671 * Check the supported commands and only if the the command is marked
672 * as supported send it. If not supported assume that the controller
673 * does not have actual support for stored link keys which makes this
674 * command redundant anyway.
637b4cae 675 */
59f45d57
JH
676 if (hdev->commands[6] & 0x80) {
677 struct hci_cp_delete_stored_link_key cp;
678
679 bacpy(&cp.bdaddr, BDADDR_ANY);
680 cp.delete_all = 0x01;
681 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
682 sizeof(cp), &cp);
683 }
684
2177bab5 685 if (hdev->commands[5] & 0x10)
42c6b129 686 hci_setup_link_policy(req);
2177bab5 687
441ad2d0 688 if (lmp_le_capable(hdev))
42c6b129 689 hci_set_le_support(req);
d2c5d77f
JH
690
691 /* Read features beyond page 1 if available */
692 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
693 struct hci_cp_read_local_ext_features cp;
694
695 cp.page = p;
696 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
697 sizeof(cp), &cp);
698 }
2177bab5
JH
699}
700
5d4e7e8d
JH
701static void hci_init4_req(struct hci_request *req, unsigned long opt)
702{
703 struct hci_dev *hdev = req->hdev;
704
d62e6d67
JH
705 /* Set event mask page 2 if the HCI command for it is supported */
706 if (hdev->commands[22] & 0x04)
707 hci_set_event_mask_page_2(req);
708
5d4e7e8d
JH
709 /* Check for Synchronization Train support */
710 if (hdev->features[2][0] & 0x04)
711 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
712}
713
2177bab5
JH
714static int __hci_init(struct hci_dev *hdev)
715{
716 int err;
717
718 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
719 if (err < 0)
720 return err;
721
722 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
723 * BR/EDR/LE type controllers. AMP controllers only need the
724 * first stage init.
725 */
726 if (hdev->dev_type != HCI_BREDR)
727 return 0;
728
729 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
730 if (err < 0)
731 return err;
732
5d4e7e8d
JH
733 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
734 if (err < 0)
735 return err;
736
737 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
2177bab5
JH
738}
739
42c6b129 740static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
741{
742 __u8 scan = opt;
743
42c6b129 744 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
745
746 /* Inquiry and Page scans */
42c6b129 747 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
748}
749
42c6b129 750static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
751{
752 __u8 auth = opt;
753
42c6b129 754 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
755
756 /* Authentication */
42c6b129 757 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
758}
759
42c6b129 760static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
761{
762 __u8 encrypt = opt;
763
42c6b129 764 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 765
e4e8e37c 766 /* Encryption */
42c6b129 767 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
768}
769
42c6b129 770static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
771{
772 __le16 policy = cpu_to_le16(opt);
773
42c6b129 774 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
775
776 /* Default link policy */
42c6b129 777 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
778}
779
8e87d142 780/* Get HCI device by index.
1da177e4
LT
781 * Device is held on return. */
782struct hci_dev *hci_dev_get(int index)
783{
8035ded4 784 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
785
786 BT_DBG("%d", index);
787
788 if (index < 0)
789 return NULL;
790
791 read_lock(&hci_dev_list_lock);
8035ded4 792 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
793 if (d->id == index) {
794 hdev = hci_dev_hold(d);
795 break;
796 }
797 }
798 read_unlock(&hci_dev_list_lock);
799 return hdev;
800}
1da177e4
LT
801
802/* ---- Inquiry support ---- */
ff9ef578 803
30dc78e1
JH
804bool hci_discovery_active(struct hci_dev *hdev)
805{
806 struct discovery_state *discov = &hdev->discovery;
807
6fbe195d 808 switch (discov->state) {
343f935b 809 case DISCOVERY_FINDING:
6fbe195d 810 case DISCOVERY_RESOLVING:
30dc78e1
JH
811 return true;
812
6fbe195d
AG
813 default:
814 return false;
815 }
30dc78e1
JH
816}
817
ff9ef578
JH
818void hci_discovery_set_state(struct hci_dev *hdev, int state)
819{
820 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
821
822 if (hdev->discovery.state == state)
823 return;
824
825 switch (state) {
826 case DISCOVERY_STOPPED:
7b99b659
AG
827 if (hdev->discovery.state != DISCOVERY_STARTING)
828 mgmt_discovering(hdev, 0);
ff9ef578
JH
829 break;
830 case DISCOVERY_STARTING:
831 break;
343f935b 832 case DISCOVERY_FINDING:
ff9ef578
JH
833 mgmt_discovering(hdev, 1);
834 break;
30dc78e1
JH
835 case DISCOVERY_RESOLVING:
836 break;
ff9ef578
JH
837 case DISCOVERY_STOPPING:
838 break;
839 }
840
841 hdev->discovery.state = state;
842}
843
1f9b9a5d 844void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 845{
30883512 846 struct discovery_state *cache = &hdev->discovery;
b57c1a56 847 struct inquiry_entry *p, *n;
1da177e4 848
561aafbc
JH
849 list_for_each_entry_safe(p, n, &cache->all, all) {
850 list_del(&p->all);
b57c1a56 851 kfree(p);
1da177e4 852 }
561aafbc
JH
853
854 INIT_LIST_HEAD(&cache->unknown);
855 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
856}
857
a8c5fb1a
GP
858struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
859 bdaddr_t *bdaddr)
1da177e4 860{
30883512 861 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
862 struct inquiry_entry *e;
863
6ed93dc6 864 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 865
561aafbc
JH
866 list_for_each_entry(e, &cache->all, all) {
867 if (!bacmp(&e->data.bdaddr, bdaddr))
868 return e;
869 }
870
871 return NULL;
872}
873
874struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 875 bdaddr_t *bdaddr)
561aafbc 876{
30883512 877 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
878 struct inquiry_entry *e;
879
6ed93dc6 880 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
881
882 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 883 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
884 return e;
885 }
886
887 return NULL;
1da177e4
LT
888}
889
30dc78e1 890struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
891 bdaddr_t *bdaddr,
892 int state)
30dc78e1
JH
893{
894 struct discovery_state *cache = &hdev->discovery;
895 struct inquiry_entry *e;
896
6ed93dc6 897 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
898
899 list_for_each_entry(e, &cache->resolve, list) {
900 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
901 return e;
902 if (!bacmp(&e->data.bdaddr, bdaddr))
903 return e;
904 }
905
906 return NULL;
907}
908
a3d4e20a 909void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 910 struct inquiry_entry *ie)
a3d4e20a
JH
911{
912 struct discovery_state *cache = &hdev->discovery;
913 struct list_head *pos = &cache->resolve;
914 struct inquiry_entry *p;
915
916 list_del(&ie->list);
917
918 list_for_each_entry(p, &cache->resolve, list) {
919 if (p->name_state != NAME_PENDING &&
a8c5fb1a 920 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
921 break;
922 pos = &p->list;
923 }
924
925 list_add(&ie->list, pos);
926}
927
3175405b 928bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 929 bool name_known, bool *ssp)
1da177e4 930{
30883512 931 struct discovery_state *cache = &hdev->discovery;
70f23020 932 struct inquiry_entry *ie;
1da177e4 933
6ed93dc6 934 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 935
2b2fec4d
SJ
936 hci_remove_remote_oob_data(hdev, &data->bdaddr);
937
388fc8fa
JH
938 if (ssp)
939 *ssp = data->ssp_mode;
940
70f23020 941 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 942 if (ie) {
388fc8fa
JH
943 if (ie->data.ssp_mode && ssp)
944 *ssp = true;
945
a3d4e20a 946 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 947 data->rssi != ie->data.rssi) {
a3d4e20a
JH
948 ie->data.rssi = data->rssi;
949 hci_inquiry_cache_update_resolve(hdev, ie);
950 }
951
561aafbc 952 goto update;
a3d4e20a 953 }
561aafbc
JH
954
955 /* Entry not in the cache. Add new one. */
956 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
957 if (!ie)
3175405b 958 return false;
561aafbc
JH
959
960 list_add(&ie->all, &cache->all);
961
962 if (name_known) {
963 ie->name_state = NAME_KNOWN;
964 } else {
965 ie->name_state = NAME_NOT_KNOWN;
966 list_add(&ie->list, &cache->unknown);
967 }
70f23020 968
561aafbc
JH
969update:
970 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 971 ie->name_state != NAME_PENDING) {
561aafbc
JH
972 ie->name_state = NAME_KNOWN;
973 list_del(&ie->list);
1da177e4
LT
974 }
975
70f23020
AE
976 memcpy(&ie->data, data, sizeof(*data));
977 ie->timestamp = jiffies;
1da177e4 978 cache->timestamp = jiffies;
3175405b
JH
979
980 if (ie->name_state == NAME_NOT_KNOWN)
981 return false;
982
983 return true;
1da177e4
LT
984}
985
986static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
987{
30883512 988 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
989 struct inquiry_info *info = (struct inquiry_info *) buf;
990 struct inquiry_entry *e;
991 int copied = 0;
992
561aafbc 993 list_for_each_entry(e, &cache->all, all) {
1da177e4 994 struct inquiry_data *data = &e->data;
b57c1a56
JH
995
996 if (copied >= num)
997 break;
998
1da177e4
LT
999 bacpy(&info->bdaddr, &data->bdaddr);
1000 info->pscan_rep_mode = data->pscan_rep_mode;
1001 info->pscan_period_mode = data->pscan_period_mode;
1002 info->pscan_mode = data->pscan_mode;
1003 memcpy(info->dev_class, data->dev_class, 3);
1004 info->clock_offset = data->clock_offset;
b57c1a56 1005
1da177e4 1006 info++;
b57c1a56 1007 copied++;
1da177e4
LT
1008 }
1009
1010 BT_DBG("cache %p, copied %d", cache, copied);
1011 return copied;
1012}
1013
42c6b129 1014static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1015{
1016 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1017 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1018 struct hci_cp_inquiry cp;
1019
1020 BT_DBG("%s", hdev->name);
1021
1022 if (test_bit(HCI_INQUIRY, &hdev->flags))
1023 return;
1024
1025 /* Start Inquiry */
1026 memcpy(&cp.lap, &ir->lap, 3);
1027 cp.length = ir->length;
1028 cp.num_rsp = ir->num_rsp;
42c6b129 1029 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1030}
1031
3e13fa1e
AG
1032static int wait_inquiry(void *word)
1033{
1034 schedule();
1035 return signal_pending(current);
1036}
1037
1da177e4
LT
1038int hci_inquiry(void __user *arg)
1039{
1040 __u8 __user *ptr = arg;
1041 struct hci_inquiry_req ir;
1042 struct hci_dev *hdev;
1043 int err = 0, do_inquiry = 0, max_rsp;
1044 long timeo;
1045 __u8 *buf;
1046
1047 if (copy_from_user(&ir, ptr, sizeof(ir)))
1048 return -EFAULT;
1049
5a08ecce
AE
1050 hdev = hci_dev_get(ir.dev_id);
1051 if (!hdev)
1da177e4
LT
1052 return -ENODEV;
1053
0736cfa8
MH
1054 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1055 err = -EBUSY;
1056 goto done;
1057 }
1058
5b69bef5
MH
1059 if (hdev->dev_type != HCI_BREDR) {
1060 err = -EOPNOTSUPP;
1061 goto done;
1062 }
1063
56f87901
JH
1064 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1065 err = -EOPNOTSUPP;
1066 goto done;
1067 }
1068
09fd0de5 1069 hci_dev_lock(hdev);
8e87d142 1070 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1071 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1072 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1073 do_inquiry = 1;
1074 }
09fd0de5 1075 hci_dev_unlock(hdev);
1da177e4 1076
04837f64 1077 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1078
1079 if (do_inquiry) {
01178cd4
JH
1080 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1081 timeo);
70f23020
AE
1082 if (err < 0)
1083 goto done;
3e13fa1e
AG
1084
1085 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1086 * cleared). If it is interrupted by a signal, return -EINTR.
1087 */
1088 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1089 TASK_INTERRUPTIBLE))
1090 return -EINTR;
70f23020 1091 }
1da177e4 1092
8fc9ced3
GP
1093 /* for unlimited number of responses we will use buffer with
1094 * 255 entries
1095 */
1da177e4
LT
1096 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1097
1098 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1099 * copy it to the user space.
1100 */
01df8c31 1101 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1102 if (!buf) {
1da177e4
LT
1103 err = -ENOMEM;
1104 goto done;
1105 }
1106
09fd0de5 1107 hci_dev_lock(hdev);
1da177e4 1108 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1109 hci_dev_unlock(hdev);
1da177e4
LT
1110
1111 BT_DBG("num_rsp %d", ir.num_rsp);
1112
1113 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1114 ptr += sizeof(ir);
1115 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1116 ir.num_rsp))
1da177e4 1117 err = -EFAULT;
8e87d142 1118 } else
1da177e4
LT
1119 err = -EFAULT;
1120
1121 kfree(buf);
1122
1123done:
1124 hci_dev_put(hdev);
1125 return err;
1126}
1127
cbed0ca1 1128static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1129{
1da177e4
LT
1130 int ret = 0;
1131
1da177e4
LT
1132 BT_DBG("%s %p", hdev->name, hdev);
1133
1134 hci_req_lock(hdev);
1135
94324962
JH
1136 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1137 ret = -ENODEV;
1138 goto done;
1139 }
1140
a5c8f270
MH
1141 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1142 /* Check for rfkill but allow the HCI setup stage to
1143 * proceed (which in itself doesn't cause any RF activity).
1144 */
1145 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1146 ret = -ERFKILL;
1147 goto done;
1148 }
1149
1150 /* Check for valid public address or a configured static
1151 * random adddress, but let the HCI setup proceed to
1152 * be able to determine if there is a public address
1153 * or not.
1154 *
1155 * This check is only valid for BR/EDR controllers
1156 * since AMP controllers do not have an address.
1157 */
1158 if (hdev->dev_type == HCI_BREDR &&
1159 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1160 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1161 ret = -EADDRNOTAVAIL;
1162 goto done;
1163 }
611b30f7
MH
1164 }
1165
1da177e4
LT
1166 if (test_bit(HCI_UP, &hdev->flags)) {
1167 ret = -EALREADY;
1168 goto done;
1169 }
1170
1da177e4
LT
1171 if (hdev->open(hdev)) {
1172 ret = -EIO;
1173 goto done;
1174 }
1175
f41c70c4
MH
1176 atomic_set(&hdev->cmd_cnt, 1);
1177 set_bit(HCI_INIT, &hdev->flags);
1178
1179 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1180 ret = hdev->setup(hdev);
1181
1182 if (!ret) {
f41c70c4
MH
1183 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1184 set_bit(HCI_RAW, &hdev->flags);
1185
0736cfa8
MH
1186 if (!test_bit(HCI_RAW, &hdev->flags) &&
1187 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1188 ret = __hci_init(hdev);
1da177e4
LT
1189 }
1190
f41c70c4
MH
1191 clear_bit(HCI_INIT, &hdev->flags);
1192
1da177e4
LT
1193 if (!ret) {
1194 hci_dev_hold(hdev);
1195 set_bit(HCI_UP, &hdev->flags);
1196 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1197 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1198 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1199 hdev->dev_type == HCI_BREDR) {
09fd0de5 1200 hci_dev_lock(hdev);
744cf19e 1201 mgmt_powered(hdev, 1);
09fd0de5 1202 hci_dev_unlock(hdev);
56e5cb86 1203 }
8e87d142 1204 } else {
1da177e4 1205 /* Init failed, cleanup */
3eff45ea 1206 flush_work(&hdev->tx_work);
c347b765 1207 flush_work(&hdev->cmd_work);
b78752cc 1208 flush_work(&hdev->rx_work);
1da177e4
LT
1209
1210 skb_queue_purge(&hdev->cmd_q);
1211 skb_queue_purge(&hdev->rx_q);
1212
1213 if (hdev->flush)
1214 hdev->flush(hdev);
1215
1216 if (hdev->sent_cmd) {
1217 kfree_skb(hdev->sent_cmd);
1218 hdev->sent_cmd = NULL;
1219 }
1220
1221 hdev->close(hdev);
1222 hdev->flags = 0;
1223 }
1224
1225done:
1226 hci_req_unlock(hdev);
1da177e4
LT
1227 return ret;
1228}
1229
cbed0ca1
JH
1230/* ---- HCI ioctl helpers ---- */
1231
1232int hci_dev_open(__u16 dev)
1233{
1234 struct hci_dev *hdev;
1235 int err;
1236
1237 hdev = hci_dev_get(dev);
1238 if (!hdev)
1239 return -ENODEV;
1240
e1d08f40
JH
1241 /* We need to ensure that no other power on/off work is pending
1242 * before proceeding to call hci_dev_do_open. This is
1243 * particularly important if the setup procedure has not yet
1244 * completed.
1245 */
1246 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1247 cancel_delayed_work(&hdev->power_off);
1248
a5c8f270
MH
1249 /* After this call it is guaranteed that the setup procedure
1250 * has finished. This means that error conditions like RFKILL
1251 * or no valid public or static random address apply.
1252 */
e1d08f40
JH
1253 flush_workqueue(hdev->req_workqueue);
1254
cbed0ca1
JH
1255 err = hci_dev_do_open(hdev);
1256
1257 hci_dev_put(hdev);
1258
1259 return err;
1260}
1261
1da177e4
LT
1262static int hci_dev_do_close(struct hci_dev *hdev)
1263{
1264 BT_DBG("%s %p", hdev->name, hdev);
1265
78c04c0b
VCG
1266 cancel_delayed_work(&hdev->power_off);
1267
1da177e4
LT
1268 hci_req_cancel(hdev, ENODEV);
1269 hci_req_lock(hdev);
1270
1271 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1272 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1273 hci_req_unlock(hdev);
1274 return 0;
1275 }
1276
3eff45ea
GP
1277 /* Flush RX and TX works */
1278 flush_work(&hdev->tx_work);
b78752cc 1279 flush_work(&hdev->rx_work);
1da177e4 1280
16ab91ab 1281 if (hdev->discov_timeout > 0) {
e0f9309f 1282 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1283 hdev->discov_timeout = 0;
5e5282bb 1284 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1285 }
1286
a8b2d5c2 1287 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1288 cancel_delayed_work(&hdev->service_cache);
1289
7ba8b4be
AG
1290 cancel_delayed_work_sync(&hdev->le_scan_disable);
1291
09fd0de5 1292 hci_dev_lock(hdev);
1f9b9a5d 1293 hci_inquiry_cache_flush(hdev);
1da177e4 1294 hci_conn_hash_flush(hdev);
09fd0de5 1295 hci_dev_unlock(hdev);
1da177e4
LT
1296
1297 hci_notify(hdev, HCI_DEV_DOWN);
1298
1299 if (hdev->flush)
1300 hdev->flush(hdev);
1301
1302 /* Reset device */
1303 skb_queue_purge(&hdev->cmd_q);
1304 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1305 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 1306 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 1307 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1308 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1309 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1310 clear_bit(HCI_INIT, &hdev->flags);
1311 }
1312
c347b765
GP
1313 /* flush cmd work */
1314 flush_work(&hdev->cmd_work);
1da177e4
LT
1315
1316 /* Drop queues */
1317 skb_queue_purge(&hdev->rx_q);
1318 skb_queue_purge(&hdev->cmd_q);
1319 skb_queue_purge(&hdev->raw_q);
1320
1321 /* Drop last sent command */
1322 if (hdev->sent_cmd) {
b79f44c1 1323 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1324 kfree_skb(hdev->sent_cmd);
1325 hdev->sent_cmd = NULL;
1326 }
1327
b6ddb638
JH
1328 kfree_skb(hdev->recv_evt);
1329 hdev->recv_evt = NULL;
1330
1da177e4
LT
1331 /* After this point our queues are empty
1332 * and no tasks are scheduled. */
1333 hdev->close(hdev);
1334
35b973c9
JH
1335 /* Clear flags */
1336 hdev->flags = 0;
1337 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1338
93c311a0
MH
1339 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1340 if (hdev->dev_type == HCI_BREDR) {
1341 hci_dev_lock(hdev);
1342 mgmt_powered(hdev, 0);
1343 hci_dev_unlock(hdev);
1344 }
8ee56540 1345 }
5add6af8 1346
ced5c338 1347 /* Controller radio is available but is currently powered down */
536619e8 1348 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1349
e59fda8d 1350 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1351 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1352
1da177e4
LT
1353 hci_req_unlock(hdev);
1354
1355 hci_dev_put(hdev);
1356 return 0;
1357}
1358
1359int hci_dev_close(__u16 dev)
1360{
1361 struct hci_dev *hdev;
1362 int err;
1363
70f23020
AE
1364 hdev = hci_dev_get(dev);
1365 if (!hdev)
1da177e4 1366 return -ENODEV;
8ee56540 1367
0736cfa8
MH
1368 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1369 err = -EBUSY;
1370 goto done;
1371 }
1372
8ee56540
MH
1373 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1374 cancel_delayed_work(&hdev->power_off);
1375
1da177e4 1376 err = hci_dev_do_close(hdev);
8ee56540 1377
0736cfa8 1378done:
1da177e4
LT
1379 hci_dev_put(hdev);
1380 return err;
1381}
1382
1383int hci_dev_reset(__u16 dev)
1384{
1385 struct hci_dev *hdev;
1386 int ret = 0;
1387
70f23020
AE
1388 hdev = hci_dev_get(dev);
1389 if (!hdev)
1da177e4
LT
1390 return -ENODEV;
1391
1392 hci_req_lock(hdev);
1da177e4 1393
808a049e
MH
1394 if (!test_bit(HCI_UP, &hdev->flags)) {
1395 ret = -ENETDOWN;
1da177e4 1396 goto done;
808a049e 1397 }
1da177e4 1398
0736cfa8
MH
1399 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1400 ret = -EBUSY;
1401 goto done;
1402 }
1403
1da177e4
LT
1404 /* Drop queues */
1405 skb_queue_purge(&hdev->rx_q);
1406 skb_queue_purge(&hdev->cmd_q);
1407
09fd0de5 1408 hci_dev_lock(hdev);
1f9b9a5d 1409 hci_inquiry_cache_flush(hdev);
1da177e4 1410 hci_conn_hash_flush(hdev);
09fd0de5 1411 hci_dev_unlock(hdev);
1da177e4
LT
1412
1413 if (hdev->flush)
1414 hdev->flush(hdev);
1415
8e87d142 1416 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1417 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1418
1419 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1420 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1421
1422done:
1da177e4
LT
1423 hci_req_unlock(hdev);
1424 hci_dev_put(hdev);
1425 return ret;
1426}
1427
1428int hci_dev_reset_stat(__u16 dev)
1429{
1430 struct hci_dev *hdev;
1431 int ret = 0;
1432
70f23020
AE
1433 hdev = hci_dev_get(dev);
1434 if (!hdev)
1da177e4
LT
1435 return -ENODEV;
1436
0736cfa8
MH
1437 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1438 ret = -EBUSY;
1439 goto done;
1440 }
1441
1da177e4
LT
1442 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1443
0736cfa8 1444done:
1da177e4 1445 hci_dev_put(hdev);
1da177e4
LT
1446 return ret;
1447}
1448
1449int hci_dev_cmd(unsigned int cmd, void __user *arg)
1450{
1451 struct hci_dev *hdev;
1452 struct hci_dev_req dr;
1453 int err = 0;
1454
1455 if (copy_from_user(&dr, arg, sizeof(dr)))
1456 return -EFAULT;
1457
70f23020
AE
1458 hdev = hci_dev_get(dr.dev_id);
1459 if (!hdev)
1da177e4
LT
1460 return -ENODEV;
1461
0736cfa8
MH
1462 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1463 err = -EBUSY;
1464 goto done;
1465 }
1466
5b69bef5
MH
1467 if (hdev->dev_type != HCI_BREDR) {
1468 err = -EOPNOTSUPP;
1469 goto done;
1470 }
1471
56f87901
JH
1472 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1473 err = -EOPNOTSUPP;
1474 goto done;
1475 }
1476
1da177e4
LT
1477 switch (cmd) {
1478 case HCISETAUTH:
01178cd4
JH
1479 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1480 HCI_INIT_TIMEOUT);
1da177e4
LT
1481 break;
1482
1483 case HCISETENCRYPT:
1484 if (!lmp_encrypt_capable(hdev)) {
1485 err = -EOPNOTSUPP;
1486 break;
1487 }
1488
1489 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1490 /* Auth must be enabled first */
01178cd4
JH
1491 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1492 HCI_INIT_TIMEOUT);
1da177e4
LT
1493 if (err)
1494 break;
1495 }
1496
01178cd4
JH
1497 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1498 HCI_INIT_TIMEOUT);
1da177e4
LT
1499 break;
1500
1501 case HCISETSCAN:
01178cd4
JH
1502 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1503 HCI_INIT_TIMEOUT);
1da177e4
LT
1504 break;
1505
1da177e4 1506 case HCISETLINKPOL:
01178cd4
JH
1507 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1508 HCI_INIT_TIMEOUT);
1da177e4
LT
1509 break;
1510
1511 case HCISETLINKMODE:
e4e8e37c
MH
1512 hdev->link_mode = ((__u16) dr.dev_opt) &
1513 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1514 break;
1515
1516 case HCISETPTYPE:
1517 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1518 break;
1519
1520 case HCISETACLMTU:
e4e8e37c
MH
1521 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1522 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1523 break;
1524
1525 case HCISETSCOMTU:
e4e8e37c
MH
1526 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1527 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1528 break;
1529
1530 default:
1531 err = -EINVAL;
1532 break;
1533 }
e4e8e37c 1534
0736cfa8 1535done:
1da177e4
LT
1536 hci_dev_put(hdev);
1537 return err;
1538}
1539
1540int hci_get_dev_list(void __user *arg)
1541{
8035ded4 1542 struct hci_dev *hdev;
1da177e4
LT
1543 struct hci_dev_list_req *dl;
1544 struct hci_dev_req *dr;
1da177e4
LT
1545 int n = 0, size, err;
1546 __u16 dev_num;
1547
1548 if (get_user(dev_num, (__u16 __user *) arg))
1549 return -EFAULT;
1550
1551 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1552 return -EINVAL;
1553
1554 size = sizeof(*dl) + dev_num * sizeof(*dr);
1555
70f23020
AE
1556 dl = kzalloc(size, GFP_KERNEL);
1557 if (!dl)
1da177e4
LT
1558 return -ENOMEM;
1559
1560 dr = dl->dev_req;
1561
f20d09d5 1562 read_lock(&hci_dev_list_lock);
8035ded4 1563 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1564 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1565 cancel_delayed_work(&hdev->power_off);
c542a06c 1566
a8b2d5c2
JH
1567 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1568 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1569
1da177e4
LT
1570 (dr + n)->dev_id = hdev->id;
1571 (dr + n)->dev_opt = hdev->flags;
c542a06c 1572
1da177e4
LT
1573 if (++n >= dev_num)
1574 break;
1575 }
f20d09d5 1576 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1577
1578 dl->dev_num = n;
1579 size = sizeof(*dl) + n * sizeof(*dr);
1580
1581 err = copy_to_user(arg, dl, size);
1582 kfree(dl);
1583
1584 return err ? -EFAULT : 0;
1585}
1586
1587int hci_get_dev_info(void __user *arg)
1588{
1589 struct hci_dev *hdev;
1590 struct hci_dev_info di;
1591 int err = 0;
1592
1593 if (copy_from_user(&di, arg, sizeof(di)))
1594 return -EFAULT;
1595
70f23020
AE
1596 hdev = hci_dev_get(di.dev_id);
1597 if (!hdev)
1da177e4
LT
1598 return -ENODEV;
1599
a8b2d5c2 1600 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1601 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1602
a8b2d5c2
JH
1603 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1604 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1605
1da177e4
LT
1606 strcpy(di.name, hdev->name);
1607 di.bdaddr = hdev->bdaddr;
60f2a3ed 1608 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
1609 di.flags = hdev->flags;
1610 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1611 if (lmp_bredr_capable(hdev)) {
1612 di.acl_mtu = hdev->acl_mtu;
1613 di.acl_pkts = hdev->acl_pkts;
1614 di.sco_mtu = hdev->sco_mtu;
1615 di.sco_pkts = hdev->sco_pkts;
1616 } else {
1617 di.acl_mtu = hdev->le_mtu;
1618 di.acl_pkts = hdev->le_pkts;
1619 di.sco_mtu = 0;
1620 di.sco_pkts = 0;
1621 }
1da177e4
LT
1622 di.link_policy = hdev->link_policy;
1623 di.link_mode = hdev->link_mode;
1624
1625 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1626 memcpy(&di.features, &hdev->features, sizeof(di.features));
1627
1628 if (copy_to_user(arg, &di, sizeof(di)))
1629 err = -EFAULT;
1630
1631 hci_dev_put(hdev);
1632
1633 return err;
1634}
1635
1636/* ---- Interface to HCI drivers ---- */
1637
611b30f7
MH
1638static int hci_rfkill_set_block(void *data, bool blocked)
1639{
1640 struct hci_dev *hdev = data;
1641
1642 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1643
0736cfa8
MH
1644 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1645 return -EBUSY;
1646
5e130367
JH
1647 if (blocked) {
1648 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
1649 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1650 hci_dev_do_close(hdev);
5e130367
JH
1651 } else {
1652 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 1653 }
611b30f7
MH
1654
1655 return 0;
1656}
1657
1658static const struct rfkill_ops hci_rfkill_ops = {
1659 .set_block = hci_rfkill_set_block,
1660};
1661
ab81cbf9
JH
1662static void hci_power_on(struct work_struct *work)
1663{
1664 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 1665 int err;
ab81cbf9
JH
1666
1667 BT_DBG("%s", hdev->name);
1668
cbed0ca1 1669 err = hci_dev_do_open(hdev);
96570ffc
JH
1670 if (err < 0) {
1671 mgmt_set_powered_failed(hdev, err);
ab81cbf9 1672 return;
96570ffc 1673 }
ab81cbf9 1674
a5c8f270
MH
1675 /* During the HCI setup phase, a few error conditions are
1676 * ignored and they need to be checked now. If they are still
1677 * valid, it is important to turn the device back off.
1678 */
1679 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1680 (hdev->dev_type == HCI_BREDR &&
1681 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1682 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
1683 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1684 hci_dev_do_close(hdev);
1685 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
1686 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1687 HCI_AUTO_OFF_TIMEOUT);
bf543036 1688 }
ab81cbf9 1689
a8b2d5c2 1690 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1691 mgmt_index_added(hdev);
ab81cbf9
JH
1692}
1693
1694static void hci_power_off(struct work_struct *work)
1695{
3243553f 1696 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1697 power_off.work);
ab81cbf9
JH
1698
1699 BT_DBG("%s", hdev->name);
1700
8ee56540 1701 hci_dev_do_close(hdev);
ab81cbf9
JH
1702}
1703
16ab91ab
JH
1704static void hci_discov_off(struct work_struct *work)
1705{
1706 struct hci_dev *hdev;
b1e73124 1707 struct hci_request req;
16ab91ab
JH
1708 u8 scan = SCAN_PAGE;
1709
1710 hdev = container_of(work, struct hci_dev, discov_off.work);
1711
1712 BT_DBG("%s", hdev->name);
1713
09fd0de5 1714 hci_dev_lock(hdev);
16ab91ab 1715
b1e73124
MH
1716 hci_req_init(&req, hdev);
1717 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1718 hci_req_run(&req, NULL);
16ab91ab
JH
1719
1720 hdev->discov_timeout = 0;
1721
09fd0de5 1722 hci_dev_unlock(hdev);
16ab91ab
JH
1723}
1724
2aeb9a1a
JH
1725int hci_uuids_clear(struct hci_dev *hdev)
1726{
4821002c 1727 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1728
4821002c
JH
1729 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1730 list_del(&uuid->list);
2aeb9a1a
JH
1731 kfree(uuid);
1732 }
1733
1734 return 0;
1735}
1736
55ed8ca1
JH
1737int hci_link_keys_clear(struct hci_dev *hdev)
1738{
1739 struct list_head *p, *n;
1740
1741 list_for_each_safe(p, n, &hdev->link_keys) {
1742 struct link_key *key;
1743
1744 key = list_entry(p, struct link_key, list);
1745
1746 list_del(p);
1747 kfree(key);
1748 }
1749
1750 return 0;
1751}
1752
b899efaf
VCG
1753int hci_smp_ltks_clear(struct hci_dev *hdev)
1754{
1755 struct smp_ltk *k, *tmp;
1756
1757 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1758 list_del(&k->list);
1759 kfree(k);
1760 }
1761
1762 return 0;
1763}
1764
55ed8ca1
JH
1765struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1766{
8035ded4 1767 struct link_key *k;
55ed8ca1 1768
8035ded4 1769 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1770 if (bacmp(bdaddr, &k->bdaddr) == 0)
1771 return k;
55ed8ca1
JH
1772
1773 return NULL;
1774}
1775
745c0ce3 1776static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1777 u8 key_type, u8 old_key_type)
d25e28ab
JH
1778{
1779 /* Legacy key */
1780 if (key_type < 0x03)
745c0ce3 1781 return true;
d25e28ab
JH
1782
1783 /* Debug keys are insecure so don't store them persistently */
1784 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1785 return false;
d25e28ab
JH
1786
1787 /* Changed combination key and there's no previous one */
1788 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1789 return false;
d25e28ab
JH
1790
1791 /* Security mode 3 case */
1792 if (!conn)
745c0ce3 1793 return true;
d25e28ab
JH
1794
1795 /* Neither local nor remote side had no-bonding as requirement */
1796 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1797 return true;
d25e28ab
JH
1798
1799 /* Local side had dedicated bonding as requirement */
1800 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1801 return true;
d25e28ab
JH
1802
1803 /* Remote side had dedicated bonding as requirement */
1804 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1805 return true;
d25e28ab
JH
1806
1807 /* If none of the above criteria match, then don't store the key
1808 * persistently */
745c0ce3 1809 return false;
d25e28ab
JH
1810}
1811
c9839a11 1812struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1813{
c9839a11 1814 struct smp_ltk *k;
75d262c2 1815
c9839a11
VCG
1816 list_for_each_entry(k, &hdev->long_term_keys, list) {
1817 if (k->ediv != ediv ||
a8c5fb1a 1818 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1819 continue;
1820
c9839a11 1821 return k;
75d262c2
VCG
1822 }
1823
1824 return NULL;
1825}
75d262c2 1826
c9839a11 1827struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1828 u8 addr_type)
75d262c2 1829{
c9839a11 1830 struct smp_ltk *k;
75d262c2 1831
c9839a11
VCG
1832 list_for_each_entry(k, &hdev->long_term_keys, list)
1833 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1834 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1835 return k;
1836
1837 return NULL;
1838}
75d262c2 1839
d25e28ab 1840int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1841 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1842{
1843 struct link_key *key, *old_key;
745c0ce3
VA
1844 u8 old_key_type;
1845 bool persistent;
55ed8ca1
JH
1846
1847 old_key = hci_find_link_key(hdev, bdaddr);
1848 if (old_key) {
1849 old_key_type = old_key->type;
1850 key = old_key;
1851 } else {
12adcf3a 1852 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1853 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1854 if (!key)
1855 return -ENOMEM;
1856 list_add(&key->list, &hdev->link_keys);
1857 }
1858
6ed93dc6 1859 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1860
d25e28ab
JH
1861 /* Some buggy controller combinations generate a changed
1862 * combination key for legacy pairing even when there's no
1863 * previous key */
1864 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1865 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1866 type = HCI_LK_COMBINATION;
655fe6ec
JH
1867 if (conn)
1868 conn->key_type = type;
1869 }
d25e28ab 1870
55ed8ca1 1871 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1872 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1873 key->pin_len = pin_len;
1874
b6020ba0 1875 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1876 key->type = old_key_type;
4748fed2
JH
1877 else
1878 key->type = type;
1879
4df378a1
JH
1880 if (!new_key)
1881 return 0;
1882
1883 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1884
744cf19e 1885 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1886
6ec5bcad
VA
1887 if (conn)
1888 conn->flush_key = !persistent;
55ed8ca1
JH
1889
1890 return 0;
1891}
1892
c9839a11 1893int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1894 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1895 ediv, u8 rand[8])
75d262c2 1896{
c9839a11 1897 struct smp_ltk *key, *old_key;
75d262c2 1898
c9839a11
VCG
1899 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1900 return 0;
75d262c2 1901
c9839a11
VCG
1902 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1903 if (old_key)
75d262c2 1904 key = old_key;
c9839a11
VCG
1905 else {
1906 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1907 if (!key)
1908 return -ENOMEM;
c9839a11 1909 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1910 }
1911
75d262c2 1912 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1913 key->bdaddr_type = addr_type;
1914 memcpy(key->val, tk, sizeof(key->val));
1915 key->authenticated = authenticated;
1916 key->ediv = ediv;
1917 key->enc_size = enc_size;
1918 key->type = type;
1919 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1920
c9839a11
VCG
1921 if (!new_key)
1922 return 0;
75d262c2 1923
261cc5aa
VCG
1924 if (type & HCI_SMP_LTK)
1925 mgmt_new_ltk(hdev, key, 1);
1926
75d262c2
VCG
1927 return 0;
1928}
1929
55ed8ca1
JH
1930int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1931{
1932 struct link_key *key;
1933
1934 key = hci_find_link_key(hdev, bdaddr);
1935 if (!key)
1936 return -ENOENT;
1937
6ed93dc6 1938 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1939
1940 list_del(&key->list);
1941 kfree(key);
1942
1943 return 0;
1944}
1945
b899efaf
VCG
1946int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1947{
1948 struct smp_ltk *k, *tmp;
1949
1950 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1951 if (bacmp(bdaddr, &k->bdaddr))
1952 continue;
1953
6ed93dc6 1954 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1955
1956 list_del(&k->list);
1957 kfree(k);
1958 }
1959
1960 return 0;
1961}
1962
6bd32326 1963/* HCI command timer function */
bda4f23a 1964static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1965{
1966 struct hci_dev *hdev = (void *) arg;
1967
bda4f23a
AE
1968 if (hdev->sent_cmd) {
1969 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1970 u16 opcode = __le16_to_cpu(sent->opcode);
1971
1972 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1973 } else {
1974 BT_ERR("%s command tx timeout", hdev->name);
1975 }
1976
6bd32326 1977 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1978 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1979}
1980
2763eda6 1981struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1982 bdaddr_t *bdaddr)
2763eda6
SJ
1983{
1984 struct oob_data *data;
1985
1986 list_for_each_entry(data, &hdev->remote_oob_data, list)
1987 if (bacmp(bdaddr, &data->bdaddr) == 0)
1988 return data;
1989
1990 return NULL;
1991}
1992
1993int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1994{
1995 struct oob_data *data;
1996
1997 data = hci_find_remote_oob_data(hdev, bdaddr);
1998 if (!data)
1999 return -ENOENT;
2000
6ed93dc6 2001 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2002
2003 list_del(&data->list);
2004 kfree(data);
2005
2006 return 0;
2007}
2008
2009int hci_remote_oob_data_clear(struct hci_dev *hdev)
2010{
2011 struct oob_data *data, *n;
2012
2013 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2014 list_del(&data->list);
2015 kfree(data);
2016 }
2017
2018 return 0;
2019}
2020
2021int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 2022 u8 *randomizer)
2763eda6
SJ
2023{
2024 struct oob_data *data;
2025
2026 data = hci_find_remote_oob_data(hdev, bdaddr);
2027
2028 if (!data) {
2029 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2030 if (!data)
2031 return -ENOMEM;
2032
2033 bacpy(&data->bdaddr, bdaddr);
2034 list_add(&data->list, &hdev->remote_oob_data);
2035 }
2036
2037 memcpy(data->hash, hash, sizeof(data->hash));
2038 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2039
6ed93dc6 2040 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2041
2042 return 0;
2043}
2044
04124681 2045struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 2046{
8035ded4 2047 struct bdaddr_list *b;
b2a66aad 2048
8035ded4 2049 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
2050 if (bacmp(bdaddr, &b->bdaddr) == 0)
2051 return b;
b2a66aad
AJ
2052
2053 return NULL;
2054}
2055
2056int hci_blacklist_clear(struct hci_dev *hdev)
2057{
2058 struct list_head *p, *n;
2059
2060 list_for_each_safe(p, n, &hdev->blacklist) {
2061 struct bdaddr_list *b;
2062
2063 b = list_entry(p, struct bdaddr_list, list);
2064
2065 list_del(p);
2066 kfree(b);
2067 }
2068
2069 return 0;
2070}
2071
88c1fe4b 2072int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2073{
2074 struct bdaddr_list *entry;
b2a66aad
AJ
2075
2076 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2077 return -EBADF;
2078
5e762444
AJ
2079 if (hci_blacklist_lookup(hdev, bdaddr))
2080 return -EEXIST;
b2a66aad
AJ
2081
2082 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2083 if (!entry)
2084 return -ENOMEM;
b2a66aad
AJ
2085
2086 bacpy(&entry->bdaddr, bdaddr);
2087
2088 list_add(&entry->list, &hdev->blacklist);
2089
88c1fe4b 2090 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2091}
2092
88c1fe4b 2093int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2094{
2095 struct bdaddr_list *entry;
b2a66aad 2096
1ec918ce 2097 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 2098 return hci_blacklist_clear(hdev);
b2a66aad
AJ
2099
2100 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 2101 if (!entry)
5e762444 2102 return -ENOENT;
b2a66aad
AJ
2103
2104 list_del(&entry->list);
2105 kfree(entry);
2106
88c1fe4b 2107 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
2108}
2109
4c87eaab 2110static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2111{
4c87eaab
AG
2112 if (status) {
2113 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2114
4c87eaab
AG
2115 hci_dev_lock(hdev);
2116 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2117 hci_dev_unlock(hdev);
2118 return;
2119 }
7ba8b4be
AG
2120}
2121
4c87eaab 2122static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2123{
4c87eaab
AG
2124 /* General inquiry access code (GIAC) */
2125 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2126 struct hci_request req;
2127 struct hci_cp_inquiry cp;
7ba8b4be
AG
2128 int err;
2129
4c87eaab
AG
2130 if (status) {
2131 BT_ERR("Failed to disable LE scanning: status %d", status);
2132 return;
2133 }
7ba8b4be 2134
4c87eaab
AG
2135 switch (hdev->discovery.type) {
2136 case DISCOV_TYPE_LE:
2137 hci_dev_lock(hdev);
2138 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2139 hci_dev_unlock(hdev);
2140 break;
7ba8b4be 2141
4c87eaab
AG
2142 case DISCOV_TYPE_INTERLEAVED:
2143 hci_req_init(&req, hdev);
7ba8b4be 2144
4c87eaab
AG
2145 memset(&cp, 0, sizeof(cp));
2146 memcpy(&cp.lap, lap, sizeof(cp.lap));
2147 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2148 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 2149
4c87eaab 2150 hci_dev_lock(hdev);
7dbfac1d 2151
4c87eaab 2152 hci_inquiry_cache_flush(hdev);
7dbfac1d 2153
4c87eaab
AG
2154 err = hci_req_run(&req, inquiry_complete);
2155 if (err) {
2156 BT_ERR("Inquiry request failed: err %d", err);
2157 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2158 }
7dbfac1d 2159
4c87eaab
AG
2160 hci_dev_unlock(hdev);
2161 break;
7dbfac1d 2162 }
7dbfac1d
AG
2163}
2164
7ba8b4be
AG
2165static void le_scan_disable_work(struct work_struct *work)
2166{
2167 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2168 le_scan_disable.work);
7ba8b4be 2169 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
2170 struct hci_request req;
2171 int err;
7ba8b4be
AG
2172
2173 BT_DBG("%s", hdev->name);
2174
4c87eaab 2175 hci_req_init(&req, hdev);
28b75a89 2176
7ba8b4be 2177 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
2178 cp.enable = LE_SCAN_DISABLE;
2179 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 2180
4c87eaab
AG
2181 err = hci_req_run(&req, le_scan_disable_work_complete);
2182 if (err)
2183 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2184}
2185
9be0dab7
DH
2186/* Alloc HCI device */
2187struct hci_dev *hci_alloc_dev(void)
2188{
2189 struct hci_dev *hdev;
2190
2191 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2192 if (!hdev)
2193 return NULL;
2194
b1b813d4
DH
2195 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2196 hdev->esco_type = (ESCO_HV1);
2197 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
2198 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2199 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2200 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2201 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2202
b1b813d4
DH
2203 hdev->sniff_max_interval = 800;
2204 hdev->sniff_min_interval = 80;
2205
bef64738
MH
2206 hdev->le_scan_interval = 0x0060;
2207 hdev->le_scan_window = 0x0030;
2208
b1b813d4
DH
2209 mutex_init(&hdev->lock);
2210 mutex_init(&hdev->req_lock);
2211
2212 INIT_LIST_HEAD(&hdev->mgmt_pending);
2213 INIT_LIST_HEAD(&hdev->blacklist);
2214 INIT_LIST_HEAD(&hdev->uuids);
2215 INIT_LIST_HEAD(&hdev->link_keys);
2216 INIT_LIST_HEAD(&hdev->long_term_keys);
2217 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2218 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2219
2220 INIT_WORK(&hdev->rx_work, hci_rx_work);
2221 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2222 INIT_WORK(&hdev->tx_work, hci_tx_work);
2223 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 2224
b1b813d4
DH
2225 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2226 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2227 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2228
b1b813d4
DH
2229 skb_queue_head_init(&hdev->rx_q);
2230 skb_queue_head_init(&hdev->cmd_q);
2231 skb_queue_head_init(&hdev->raw_q);
2232
2233 init_waitqueue_head(&hdev->req_wait_q);
2234
bda4f23a 2235 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2236
b1b813d4
DH
2237 hci_init_sysfs(hdev);
2238 discovery_init(hdev);
9be0dab7
DH
2239
2240 return hdev;
2241}
2242EXPORT_SYMBOL(hci_alloc_dev);
2243
2244/* Free HCI device */
2245void hci_free_dev(struct hci_dev *hdev)
2246{
9be0dab7
DH
2247 /* will free via device release */
2248 put_device(&hdev->dev);
2249}
2250EXPORT_SYMBOL(hci_free_dev);
2251
1da177e4
LT
2252/* Register HCI device */
2253int hci_register_dev(struct hci_dev *hdev)
2254{
b1b813d4 2255 int id, error;
1da177e4 2256
010666a1 2257 if (!hdev->open || !hdev->close)
1da177e4
LT
2258 return -EINVAL;
2259
08add513
MM
2260 /* Do not allow HCI_AMP devices to register at index 0,
2261 * so the index can be used as the AMP controller ID.
2262 */
3df92b31
SL
2263 switch (hdev->dev_type) {
2264 case HCI_BREDR:
2265 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2266 break;
2267 case HCI_AMP:
2268 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2269 break;
2270 default:
2271 return -EINVAL;
1da177e4 2272 }
8e87d142 2273
3df92b31
SL
2274 if (id < 0)
2275 return id;
2276
1da177e4
LT
2277 sprintf(hdev->name, "hci%d", id);
2278 hdev->id = id;
2d8b3a11
AE
2279
2280 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2281
d8537548
KC
2282 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2283 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
2284 if (!hdev->workqueue) {
2285 error = -ENOMEM;
2286 goto err;
2287 }
f48fd9c8 2288
d8537548
KC
2289 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2290 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
2291 if (!hdev->req_workqueue) {
2292 destroy_workqueue(hdev->workqueue);
2293 error = -ENOMEM;
2294 goto err;
2295 }
2296
33ca954d
DH
2297 error = hci_add_sysfs(hdev);
2298 if (error < 0)
2299 goto err_wqueue;
1da177e4 2300
611b30f7 2301 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2302 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2303 hdev);
611b30f7
MH
2304 if (hdev->rfkill) {
2305 if (rfkill_register(hdev->rfkill) < 0) {
2306 rfkill_destroy(hdev->rfkill);
2307 hdev->rfkill = NULL;
2308 }
2309 }
2310
5e130367
JH
2311 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2312 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2313
a8b2d5c2 2314 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 2315 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 2316
01cd3404 2317 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
2318 /* Assume BR/EDR support until proven otherwise (such as
2319 * through reading supported features during init.
2320 */
2321 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2322 }
ce2be9ac 2323
fcee3377
GP
2324 write_lock(&hci_dev_list_lock);
2325 list_add(&hdev->list, &hci_dev_list);
2326 write_unlock(&hci_dev_list_lock);
2327
1da177e4 2328 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2329 hci_dev_hold(hdev);
1da177e4 2330
19202573 2331 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2332
1da177e4 2333 return id;
f48fd9c8 2334
33ca954d
DH
2335err_wqueue:
2336 destroy_workqueue(hdev->workqueue);
6ead1bbc 2337 destroy_workqueue(hdev->req_workqueue);
33ca954d 2338err:
3df92b31 2339 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 2340
33ca954d 2341 return error;
1da177e4
LT
2342}
2343EXPORT_SYMBOL(hci_register_dev);
2344
2345/* Unregister HCI device */
59735631 2346void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2347{
3df92b31 2348 int i, id;
ef222013 2349
c13854ce 2350 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2351
94324962
JH
2352 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2353
3df92b31
SL
2354 id = hdev->id;
2355
f20d09d5 2356 write_lock(&hci_dev_list_lock);
1da177e4 2357 list_del(&hdev->list);
f20d09d5 2358 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2359
2360 hci_dev_do_close(hdev);
2361
cd4c5391 2362 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2363 kfree_skb(hdev->reassembly[i]);
2364
b9b5ef18
GP
2365 cancel_work_sync(&hdev->power_on);
2366
ab81cbf9 2367 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2368 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2369 hci_dev_lock(hdev);
744cf19e 2370 mgmt_index_removed(hdev);
09fd0de5 2371 hci_dev_unlock(hdev);
56e5cb86 2372 }
ab81cbf9 2373
2e58ef3e
JH
2374 /* mgmt_index_removed should take care of emptying the
2375 * pending list */
2376 BUG_ON(!list_empty(&hdev->mgmt_pending));
2377
1da177e4
LT
2378 hci_notify(hdev, HCI_DEV_UNREG);
2379
611b30f7
MH
2380 if (hdev->rfkill) {
2381 rfkill_unregister(hdev->rfkill);
2382 rfkill_destroy(hdev->rfkill);
2383 }
2384
ce242970 2385 hci_del_sysfs(hdev);
147e2d59 2386
f48fd9c8 2387 destroy_workqueue(hdev->workqueue);
6ead1bbc 2388 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2389
09fd0de5 2390 hci_dev_lock(hdev);
e2e0cacb 2391 hci_blacklist_clear(hdev);
2aeb9a1a 2392 hci_uuids_clear(hdev);
55ed8ca1 2393 hci_link_keys_clear(hdev);
b899efaf 2394 hci_smp_ltks_clear(hdev);
2763eda6 2395 hci_remote_oob_data_clear(hdev);
09fd0de5 2396 hci_dev_unlock(hdev);
e2e0cacb 2397
dc946bd8 2398 hci_dev_put(hdev);
3df92b31
SL
2399
2400 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2401}
2402EXPORT_SYMBOL(hci_unregister_dev);
2403
2404/* Suspend HCI device */
2405int hci_suspend_dev(struct hci_dev *hdev)
2406{
2407 hci_notify(hdev, HCI_DEV_SUSPEND);
2408 return 0;
2409}
2410EXPORT_SYMBOL(hci_suspend_dev);
2411
2412/* Resume HCI device */
2413int hci_resume_dev(struct hci_dev *hdev)
2414{
2415 hci_notify(hdev, HCI_DEV_RESUME);
2416 return 0;
2417}
2418EXPORT_SYMBOL(hci_resume_dev);
2419
76bca880 2420/* Receive frame from HCI drivers */
e1a26170 2421int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 2422{
76bca880 2423 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2424 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2425 kfree_skb(skb);
2426 return -ENXIO;
2427 }
2428
d82603c6 2429 /* Incoming skb */
76bca880
MH
2430 bt_cb(skb)->incoming = 1;
2431
2432 /* Time stamp */
2433 __net_timestamp(skb);
2434
76bca880 2435 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2436 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2437
76bca880
MH
2438 return 0;
2439}
2440EXPORT_SYMBOL(hci_recv_frame);
2441
33e882a5 2442static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2443 int count, __u8 index)
33e882a5
SS
2444{
2445 int len = 0;
2446 int hlen = 0;
2447 int remain = count;
2448 struct sk_buff *skb;
2449 struct bt_skb_cb *scb;
2450
2451 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2452 index >= NUM_REASSEMBLY)
33e882a5
SS
2453 return -EILSEQ;
2454
2455 skb = hdev->reassembly[index];
2456
2457 if (!skb) {
2458 switch (type) {
2459 case HCI_ACLDATA_PKT:
2460 len = HCI_MAX_FRAME_SIZE;
2461 hlen = HCI_ACL_HDR_SIZE;
2462 break;
2463 case HCI_EVENT_PKT:
2464 len = HCI_MAX_EVENT_SIZE;
2465 hlen = HCI_EVENT_HDR_SIZE;
2466 break;
2467 case HCI_SCODATA_PKT:
2468 len = HCI_MAX_SCO_SIZE;
2469 hlen = HCI_SCO_HDR_SIZE;
2470 break;
2471 }
2472
1e429f38 2473 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2474 if (!skb)
2475 return -ENOMEM;
2476
2477 scb = (void *) skb->cb;
2478 scb->expect = hlen;
2479 scb->pkt_type = type;
2480
33e882a5
SS
2481 hdev->reassembly[index] = skb;
2482 }
2483
2484 while (count) {
2485 scb = (void *) skb->cb;
89bb46d0 2486 len = min_t(uint, scb->expect, count);
33e882a5
SS
2487
2488 memcpy(skb_put(skb, len), data, len);
2489
2490 count -= len;
2491 data += len;
2492 scb->expect -= len;
2493 remain = count;
2494
2495 switch (type) {
2496 case HCI_EVENT_PKT:
2497 if (skb->len == HCI_EVENT_HDR_SIZE) {
2498 struct hci_event_hdr *h = hci_event_hdr(skb);
2499 scb->expect = h->plen;
2500
2501 if (skb_tailroom(skb) < scb->expect) {
2502 kfree_skb(skb);
2503 hdev->reassembly[index] = NULL;
2504 return -ENOMEM;
2505 }
2506 }
2507 break;
2508
2509 case HCI_ACLDATA_PKT:
2510 if (skb->len == HCI_ACL_HDR_SIZE) {
2511 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2512 scb->expect = __le16_to_cpu(h->dlen);
2513
2514 if (skb_tailroom(skb) < scb->expect) {
2515 kfree_skb(skb);
2516 hdev->reassembly[index] = NULL;
2517 return -ENOMEM;
2518 }
2519 }
2520 break;
2521
2522 case HCI_SCODATA_PKT:
2523 if (skb->len == HCI_SCO_HDR_SIZE) {
2524 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2525 scb->expect = h->dlen;
2526
2527 if (skb_tailroom(skb) < scb->expect) {
2528 kfree_skb(skb);
2529 hdev->reassembly[index] = NULL;
2530 return -ENOMEM;
2531 }
2532 }
2533 break;
2534 }
2535
2536 if (scb->expect == 0) {
2537 /* Complete frame */
2538
2539 bt_cb(skb)->pkt_type = type;
e1a26170 2540 hci_recv_frame(hdev, skb);
33e882a5
SS
2541
2542 hdev->reassembly[index] = NULL;
2543 return remain;
2544 }
2545 }
2546
2547 return remain;
2548}
2549
ef222013
MH
2550int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2551{
f39a3c06
SS
2552 int rem = 0;
2553
ef222013
MH
2554 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2555 return -EILSEQ;
2556
da5f6c37 2557 while (count) {
1e429f38 2558 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2559 if (rem < 0)
2560 return rem;
ef222013 2561
f39a3c06
SS
2562 data += (count - rem);
2563 count = rem;
f81c6224 2564 }
ef222013 2565
f39a3c06 2566 return rem;
ef222013
MH
2567}
2568EXPORT_SYMBOL(hci_recv_fragment);
2569
99811510
SS
2570#define STREAM_REASSEMBLY 0
2571
2572int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2573{
2574 int type;
2575 int rem = 0;
2576
da5f6c37 2577 while (count) {
99811510
SS
2578 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2579
2580 if (!skb) {
2581 struct { char type; } *pkt;
2582
2583 /* Start of the frame */
2584 pkt = data;
2585 type = pkt->type;
2586
2587 data++;
2588 count--;
2589 } else
2590 type = bt_cb(skb)->pkt_type;
2591
1e429f38 2592 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2593 STREAM_REASSEMBLY);
99811510
SS
2594 if (rem < 0)
2595 return rem;
2596
2597 data += (count - rem);
2598 count = rem;
f81c6224 2599 }
99811510
SS
2600
2601 return rem;
2602}
2603EXPORT_SYMBOL(hci_recv_stream_fragment);
2604
1da177e4
LT
2605/* ---- Interface to upper protocols ---- */
2606
1da177e4
LT
2607int hci_register_cb(struct hci_cb *cb)
2608{
2609 BT_DBG("%p name %s", cb, cb->name);
2610
f20d09d5 2611 write_lock(&hci_cb_list_lock);
1da177e4 2612 list_add(&cb->list, &hci_cb_list);
f20d09d5 2613 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2614
2615 return 0;
2616}
2617EXPORT_SYMBOL(hci_register_cb);
2618
2619int hci_unregister_cb(struct hci_cb *cb)
2620{
2621 BT_DBG("%p name %s", cb, cb->name);
2622
f20d09d5 2623 write_lock(&hci_cb_list_lock);
1da177e4 2624 list_del(&cb->list);
f20d09d5 2625 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2626
2627 return 0;
2628}
2629EXPORT_SYMBOL(hci_unregister_cb);
2630
51086991 2631static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 2632{
0d48d939 2633 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2634
cd82e61c
MH
2635 /* Time stamp */
2636 __net_timestamp(skb);
1da177e4 2637
cd82e61c
MH
2638 /* Send copy to monitor */
2639 hci_send_to_monitor(hdev, skb);
2640
2641 if (atomic_read(&hdev->promisc)) {
2642 /* Send copy to the sockets */
470fe1b5 2643 hci_send_to_sock(hdev, skb);
1da177e4
LT
2644 }
2645
2646 /* Get rid of skb owner, prior to sending to the driver. */
2647 skb_orphan(skb);
2648
7bd8f09f 2649 if (hdev->send(hdev, skb) < 0)
51086991 2650 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
2651}
2652
3119ae95
JH
2653void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2654{
2655 skb_queue_head_init(&req->cmd_q);
2656 req->hdev = hdev;
5d73e034 2657 req->err = 0;
3119ae95
JH
2658}
2659
2660int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2661{
2662 struct hci_dev *hdev = req->hdev;
2663 struct sk_buff *skb;
2664 unsigned long flags;
2665
2666 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2667
5d73e034
AG
2668 /* If an error occured during request building, remove all HCI
2669 * commands queued on the HCI request queue.
2670 */
2671 if (req->err) {
2672 skb_queue_purge(&req->cmd_q);
2673 return req->err;
2674 }
2675
3119ae95
JH
2676 /* Do not allow empty requests */
2677 if (skb_queue_empty(&req->cmd_q))
382b0c39 2678 return -ENODATA;
3119ae95
JH
2679
2680 skb = skb_peek_tail(&req->cmd_q);
2681 bt_cb(skb)->req.complete = complete;
2682
2683 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2684 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2685 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2686
2687 queue_work(hdev->workqueue, &hdev->cmd_work);
2688
2689 return 0;
2690}
2691
1ca3a9d0 2692static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 2693 u32 plen, const void *param)
1da177e4
LT
2694{
2695 int len = HCI_COMMAND_HDR_SIZE + plen;
2696 struct hci_command_hdr *hdr;
2697 struct sk_buff *skb;
2698
1da177e4 2699 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2700 if (!skb)
2701 return NULL;
1da177e4
LT
2702
2703 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2704 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2705 hdr->plen = plen;
2706
2707 if (plen)
2708 memcpy(skb_put(skb, plen), param, plen);
2709
2710 BT_DBG("skb len %d", skb->len);
2711
0d48d939 2712 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 2713
1ca3a9d0
JH
2714 return skb;
2715}
2716
2717/* Send HCI command */
07dc93dd
JH
2718int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2719 const void *param)
1ca3a9d0
JH
2720{
2721 struct sk_buff *skb;
2722
2723 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2724
2725 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2726 if (!skb) {
2727 BT_ERR("%s no memory for command", hdev->name);
2728 return -ENOMEM;
2729 }
2730
11714b3d
JH
2731 /* Stand-alone HCI commands must be flaged as
2732 * single-command requests.
2733 */
2734 bt_cb(skb)->req.start = true;
2735
1da177e4 2736 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2737 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2738
2739 return 0;
2740}
1da177e4 2741
71c76a17 2742/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
2743void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2744 const void *param, u8 event)
71c76a17
JH
2745{
2746 struct hci_dev *hdev = req->hdev;
2747 struct sk_buff *skb;
2748
2749 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2750
34739c1e
AG
2751 /* If an error occured during request building, there is no point in
2752 * queueing the HCI command. We can simply return.
2753 */
2754 if (req->err)
2755 return;
2756
71c76a17
JH
2757 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2758 if (!skb) {
5d73e034
AG
2759 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2760 hdev->name, opcode);
2761 req->err = -ENOMEM;
e348fe6b 2762 return;
71c76a17
JH
2763 }
2764
2765 if (skb_queue_empty(&req->cmd_q))
2766 bt_cb(skb)->req.start = true;
2767
02350a72
JH
2768 bt_cb(skb)->req.event = event;
2769
71c76a17 2770 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2771}
2772
07dc93dd
JH
2773void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2774 const void *param)
02350a72
JH
2775{
2776 hci_req_add_ev(req, opcode, plen, param, 0);
2777}
2778
1da177e4 2779/* Get data from the previously sent command */
a9de9248 2780void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2781{
2782 struct hci_command_hdr *hdr;
2783
2784 if (!hdev->sent_cmd)
2785 return NULL;
2786
2787 hdr = (void *) hdev->sent_cmd->data;
2788
a9de9248 2789 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2790 return NULL;
2791
f0e09510 2792 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2793
2794 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2795}
2796
2797/* Send ACL data */
2798static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2799{
2800 struct hci_acl_hdr *hdr;
2801 int len = skb->len;
2802
badff6d0
ACM
2803 skb_push(skb, HCI_ACL_HDR_SIZE);
2804 skb_reset_transport_header(skb);
9c70220b 2805 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2806 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2807 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2808}
2809
ee22be7e 2810static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2811 struct sk_buff *skb, __u16 flags)
1da177e4 2812{
ee22be7e 2813 struct hci_conn *conn = chan->conn;
1da177e4
LT
2814 struct hci_dev *hdev = conn->hdev;
2815 struct sk_buff *list;
2816
087bfd99
GP
2817 skb->len = skb_headlen(skb);
2818 skb->data_len = 0;
2819
2820 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2821
2822 switch (hdev->dev_type) {
2823 case HCI_BREDR:
2824 hci_add_acl_hdr(skb, conn->handle, flags);
2825 break;
2826 case HCI_AMP:
2827 hci_add_acl_hdr(skb, chan->handle, flags);
2828 break;
2829 default:
2830 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2831 return;
2832 }
087bfd99 2833
70f23020
AE
2834 list = skb_shinfo(skb)->frag_list;
2835 if (!list) {
1da177e4
LT
2836 /* Non fragmented */
2837 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2838
73d80deb 2839 skb_queue_tail(queue, skb);
1da177e4
LT
2840 } else {
2841 /* Fragmented */
2842 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2843
2844 skb_shinfo(skb)->frag_list = NULL;
2845
2846 /* Queue all fragments atomically */
af3e6359 2847 spin_lock(&queue->lock);
1da177e4 2848
73d80deb 2849 __skb_queue_tail(queue, skb);
e702112f
AE
2850
2851 flags &= ~ACL_START;
2852 flags |= ACL_CONT;
1da177e4
LT
2853 do {
2854 skb = list; list = list->next;
8e87d142 2855
0d48d939 2856 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2857 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2858
2859 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2860
73d80deb 2861 __skb_queue_tail(queue, skb);
1da177e4
LT
2862 } while (list);
2863
af3e6359 2864 spin_unlock(&queue->lock);
1da177e4 2865 }
73d80deb
LAD
2866}
2867
2868void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2869{
ee22be7e 2870 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2871
f0e09510 2872 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 2873
ee22be7e 2874 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2875
3eff45ea 2876 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2877}
1da177e4
LT
2878
2879/* Send SCO data */
0d861d8b 2880void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2881{
2882 struct hci_dev *hdev = conn->hdev;
2883 struct hci_sco_hdr hdr;
2884
2885 BT_DBG("%s len %d", hdev->name, skb->len);
2886
aca3192c 2887 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2888 hdr.dlen = skb->len;
2889
badff6d0
ACM
2890 skb_push(skb, HCI_SCO_HDR_SIZE);
2891 skb_reset_transport_header(skb);
9c70220b 2892 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 2893
0d48d939 2894 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2895
1da177e4 2896 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2897 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2898}
1da177e4
LT
2899
2900/* ---- HCI TX task (outgoing data) ---- */
2901
2902/* HCI Connection scheduler */
6039aa73
GP
2903static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2904 int *quote)
1da177e4
LT
2905{
2906 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2907 struct hci_conn *conn = NULL, *c;
abc5de8f 2908 unsigned int num = 0, min = ~0;
1da177e4 2909
8e87d142 2910 /* We don't have to lock device here. Connections are always
1da177e4 2911 * added and removed with TX task disabled. */
bf4c6325
GP
2912
2913 rcu_read_lock();
2914
2915 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2916 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2917 continue;
769be974
MH
2918
2919 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2920 continue;
2921
1da177e4
LT
2922 num++;
2923
2924 if (c->sent < min) {
2925 min = c->sent;
2926 conn = c;
2927 }
52087a79
LAD
2928
2929 if (hci_conn_num(hdev, type) == num)
2930 break;
1da177e4
LT
2931 }
2932
bf4c6325
GP
2933 rcu_read_unlock();
2934
1da177e4 2935 if (conn) {
6ed58ec5
VT
2936 int cnt, q;
2937
2938 switch (conn->type) {
2939 case ACL_LINK:
2940 cnt = hdev->acl_cnt;
2941 break;
2942 case SCO_LINK:
2943 case ESCO_LINK:
2944 cnt = hdev->sco_cnt;
2945 break;
2946 case LE_LINK:
2947 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2948 break;
2949 default:
2950 cnt = 0;
2951 BT_ERR("Unknown link type");
2952 }
2953
2954 q = cnt / num;
1da177e4
LT
2955 *quote = q ? q : 1;
2956 } else
2957 *quote = 0;
2958
2959 BT_DBG("conn %p quote %d", conn, *quote);
2960 return conn;
2961}
2962
6039aa73 2963static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2964{
2965 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2966 struct hci_conn *c;
1da177e4 2967
bae1f5d9 2968 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2969
bf4c6325
GP
2970 rcu_read_lock();
2971
1da177e4 2972 /* Kill stalled connections */
bf4c6325 2973 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2974 if (c->type == type && c->sent) {
6ed93dc6
AE
2975 BT_ERR("%s killing stalled connection %pMR",
2976 hdev->name, &c->dst);
bed71748 2977 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2978 }
2979 }
bf4c6325
GP
2980
2981 rcu_read_unlock();
1da177e4
LT
2982}
2983
6039aa73
GP
2984static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2985 int *quote)
1da177e4 2986{
73d80deb
LAD
2987 struct hci_conn_hash *h = &hdev->conn_hash;
2988 struct hci_chan *chan = NULL;
abc5de8f 2989 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2990 struct hci_conn *conn;
73d80deb
LAD
2991 int cnt, q, conn_num = 0;
2992
2993 BT_DBG("%s", hdev->name);
2994
bf4c6325
GP
2995 rcu_read_lock();
2996
2997 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2998 struct hci_chan *tmp;
2999
3000 if (conn->type != type)
3001 continue;
3002
3003 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3004 continue;
3005
3006 conn_num++;
3007
8192edef 3008 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3009 struct sk_buff *skb;
3010
3011 if (skb_queue_empty(&tmp->data_q))
3012 continue;
3013
3014 skb = skb_peek(&tmp->data_q);
3015 if (skb->priority < cur_prio)
3016 continue;
3017
3018 if (skb->priority > cur_prio) {
3019 num = 0;
3020 min = ~0;
3021 cur_prio = skb->priority;
3022 }
3023
3024 num++;
3025
3026 if (conn->sent < min) {
3027 min = conn->sent;
3028 chan = tmp;
3029 }
3030 }
3031
3032 if (hci_conn_num(hdev, type) == conn_num)
3033 break;
3034 }
3035
bf4c6325
GP
3036 rcu_read_unlock();
3037
73d80deb
LAD
3038 if (!chan)
3039 return NULL;
3040
3041 switch (chan->conn->type) {
3042 case ACL_LINK:
3043 cnt = hdev->acl_cnt;
3044 break;
bd1eb66b
AE
3045 case AMP_LINK:
3046 cnt = hdev->block_cnt;
3047 break;
73d80deb
LAD
3048 case SCO_LINK:
3049 case ESCO_LINK:
3050 cnt = hdev->sco_cnt;
3051 break;
3052 case LE_LINK:
3053 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3054 break;
3055 default:
3056 cnt = 0;
3057 BT_ERR("Unknown link type");
3058 }
3059
3060 q = cnt / num;
3061 *quote = q ? q : 1;
3062 BT_DBG("chan %p quote %d", chan, *quote);
3063 return chan;
3064}
3065
02b20f0b
LAD
3066static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3067{
3068 struct hci_conn_hash *h = &hdev->conn_hash;
3069 struct hci_conn *conn;
3070 int num = 0;
3071
3072 BT_DBG("%s", hdev->name);
3073
bf4c6325
GP
3074 rcu_read_lock();
3075
3076 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3077 struct hci_chan *chan;
3078
3079 if (conn->type != type)
3080 continue;
3081
3082 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3083 continue;
3084
3085 num++;
3086
8192edef 3087 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3088 struct sk_buff *skb;
3089
3090 if (chan->sent) {
3091 chan->sent = 0;
3092 continue;
3093 }
3094
3095 if (skb_queue_empty(&chan->data_q))
3096 continue;
3097
3098 skb = skb_peek(&chan->data_q);
3099 if (skb->priority >= HCI_PRIO_MAX - 1)
3100 continue;
3101
3102 skb->priority = HCI_PRIO_MAX - 1;
3103
3104 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3105 skb->priority);
02b20f0b
LAD
3106 }
3107
3108 if (hci_conn_num(hdev, type) == num)
3109 break;
3110 }
bf4c6325
GP
3111
3112 rcu_read_unlock();
3113
02b20f0b
LAD
3114}
3115
b71d385a
AE
3116static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3117{
3118 /* Calculate count of blocks used by this packet */
3119 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3120}
3121
6039aa73 3122static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3123{
1da177e4
LT
3124 if (!test_bit(HCI_RAW, &hdev->flags)) {
3125 /* ACL tx timeout must be longer than maximum
3126 * link supervision timeout (40.9 seconds) */
63d2bc1b 3127 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3128 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3129 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3130 }
63d2bc1b 3131}
1da177e4 3132
6039aa73 3133static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3134{
3135 unsigned int cnt = hdev->acl_cnt;
3136 struct hci_chan *chan;
3137 struct sk_buff *skb;
3138 int quote;
3139
3140 __check_timeout(hdev, cnt);
04837f64 3141
73d80deb 3142 while (hdev->acl_cnt &&
a8c5fb1a 3143 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3144 u32 priority = (skb_peek(&chan->data_q))->priority;
3145 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3146 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3147 skb->len, skb->priority);
73d80deb 3148
ec1cce24
LAD
3149 /* Stop if priority has changed */
3150 if (skb->priority < priority)
3151 break;
3152
3153 skb = skb_dequeue(&chan->data_q);
3154
73d80deb 3155 hci_conn_enter_active_mode(chan->conn,
04124681 3156 bt_cb(skb)->force_active);
04837f64 3157
57d17d70 3158 hci_send_frame(hdev, skb);
1da177e4
LT
3159 hdev->acl_last_tx = jiffies;
3160
3161 hdev->acl_cnt--;
73d80deb
LAD
3162 chan->sent++;
3163 chan->conn->sent++;
1da177e4
LT
3164 }
3165 }
02b20f0b
LAD
3166
3167 if (cnt != hdev->acl_cnt)
3168 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3169}
3170
6039aa73 3171static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3172{
63d2bc1b 3173 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3174 struct hci_chan *chan;
3175 struct sk_buff *skb;
3176 int quote;
bd1eb66b 3177 u8 type;
b71d385a 3178
63d2bc1b 3179 __check_timeout(hdev, cnt);
b71d385a 3180
bd1eb66b
AE
3181 BT_DBG("%s", hdev->name);
3182
3183 if (hdev->dev_type == HCI_AMP)
3184 type = AMP_LINK;
3185 else
3186 type = ACL_LINK;
3187
b71d385a 3188 while (hdev->block_cnt > 0 &&
bd1eb66b 3189 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3190 u32 priority = (skb_peek(&chan->data_q))->priority;
3191 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3192 int blocks;
3193
3194 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3195 skb->len, skb->priority);
b71d385a
AE
3196
3197 /* Stop if priority has changed */
3198 if (skb->priority < priority)
3199 break;
3200
3201 skb = skb_dequeue(&chan->data_q);
3202
3203 blocks = __get_blocks(hdev, skb);
3204 if (blocks > hdev->block_cnt)
3205 return;
3206
3207 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3208 bt_cb(skb)->force_active);
b71d385a 3209
57d17d70 3210 hci_send_frame(hdev, skb);
b71d385a
AE
3211 hdev->acl_last_tx = jiffies;
3212
3213 hdev->block_cnt -= blocks;
3214 quote -= blocks;
3215
3216 chan->sent += blocks;
3217 chan->conn->sent += blocks;
3218 }
3219 }
3220
3221 if (cnt != hdev->block_cnt)
bd1eb66b 3222 hci_prio_recalculate(hdev, type);
b71d385a
AE
3223}
3224
6039aa73 3225static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3226{
3227 BT_DBG("%s", hdev->name);
3228
bd1eb66b
AE
3229 /* No ACL link over BR/EDR controller */
3230 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3231 return;
3232
3233 /* No AMP link over AMP controller */
3234 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3235 return;
3236
3237 switch (hdev->flow_ctl_mode) {
3238 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3239 hci_sched_acl_pkt(hdev);
3240 break;
3241
3242 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3243 hci_sched_acl_blk(hdev);
3244 break;
3245 }
3246}
3247
1da177e4 3248/* Schedule SCO */
6039aa73 3249static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3250{
3251 struct hci_conn *conn;
3252 struct sk_buff *skb;
3253 int quote;
3254
3255 BT_DBG("%s", hdev->name);
3256
52087a79
LAD
3257 if (!hci_conn_num(hdev, SCO_LINK))
3258 return;
3259
1da177e4
LT
3260 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3261 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3262 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3263 hci_send_frame(hdev, skb);
1da177e4
LT
3264
3265 conn->sent++;
3266 if (conn->sent == ~0)
3267 conn->sent = 0;
3268 }
3269 }
3270}
3271
6039aa73 3272static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3273{
3274 struct hci_conn *conn;
3275 struct sk_buff *skb;
3276 int quote;
3277
3278 BT_DBG("%s", hdev->name);
3279
52087a79
LAD
3280 if (!hci_conn_num(hdev, ESCO_LINK))
3281 return;
3282
8fc9ced3
GP
3283 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3284 &quote))) {
b6a0dc82
MH
3285 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3286 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3287 hci_send_frame(hdev, skb);
b6a0dc82
MH
3288
3289 conn->sent++;
3290 if (conn->sent == ~0)
3291 conn->sent = 0;
3292 }
3293 }
3294}
3295
6039aa73 3296static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3297{
73d80deb 3298 struct hci_chan *chan;
6ed58ec5 3299 struct sk_buff *skb;
02b20f0b 3300 int quote, cnt, tmp;
6ed58ec5
VT
3301
3302 BT_DBG("%s", hdev->name);
3303
52087a79
LAD
3304 if (!hci_conn_num(hdev, LE_LINK))
3305 return;
3306
6ed58ec5
VT
3307 if (!test_bit(HCI_RAW, &hdev->flags)) {
3308 /* LE tx timeout must be longer than maximum
3309 * link supervision timeout (40.9 seconds) */
bae1f5d9 3310 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3311 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3312 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3313 }
3314
3315 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3316 tmp = cnt;
73d80deb 3317 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3318 u32 priority = (skb_peek(&chan->data_q))->priority;
3319 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3320 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3321 skb->len, skb->priority);
6ed58ec5 3322
ec1cce24
LAD
3323 /* Stop if priority has changed */
3324 if (skb->priority < priority)
3325 break;
3326
3327 skb = skb_dequeue(&chan->data_q);
3328
57d17d70 3329 hci_send_frame(hdev, skb);
6ed58ec5
VT
3330 hdev->le_last_tx = jiffies;
3331
3332 cnt--;
73d80deb
LAD
3333 chan->sent++;
3334 chan->conn->sent++;
6ed58ec5
VT
3335 }
3336 }
73d80deb 3337
6ed58ec5
VT
3338 if (hdev->le_pkts)
3339 hdev->le_cnt = cnt;
3340 else
3341 hdev->acl_cnt = cnt;
02b20f0b
LAD
3342
3343 if (cnt != tmp)
3344 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3345}
3346
3eff45ea 3347static void hci_tx_work(struct work_struct *work)
1da177e4 3348{
3eff45ea 3349 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3350 struct sk_buff *skb;
3351
6ed58ec5 3352 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3353 hdev->sco_cnt, hdev->le_cnt);
1da177e4 3354
52de599e
MH
3355 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3356 /* Schedule queues and send stuff to HCI driver */
3357 hci_sched_acl(hdev);
3358 hci_sched_sco(hdev);
3359 hci_sched_esco(hdev);
3360 hci_sched_le(hdev);
3361 }
6ed58ec5 3362
1da177e4
LT
3363 /* Send next queued raw (unknown type) packet */
3364 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 3365 hci_send_frame(hdev, skb);
1da177e4
LT
3366}
3367
25985edc 3368/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3369
3370/* ACL data packet */
6039aa73 3371static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3372{
3373 struct hci_acl_hdr *hdr = (void *) skb->data;
3374 struct hci_conn *conn;
3375 __u16 handle, flags;
3376
3377 skb_pull(skb, HCI_ACL_HDR_SIZE);
3378
3379 handle = __le16_to_cpu(hdr->handle);
3380 flags = hci_flags(handle);
3381 handle = hci_handle(handle);
3382
f0e09510 3383 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3384 handle, flags);
1da177e4
LT
3385
3386 hdev->stat.acl_rx++;
3387
3388 hci_dev_lock(hdev);
3389 conn = hci_conn_hash_lookup_handle(hdev, handle);
3390 hci_dev_unlock(hdev);
8e87d142 3391
1da177e4 3392 if (conn) {
65983fc7 3393 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3394
1da177e4 3395 /* Send to upper protocol */
686ebf28
UF
3396 l2cap_recv_acldata(conn, skb, flags);
3397 return;
1da177e4 3398 } else {
8e87d142 3399 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3400 hdev->name, handle);
1da177e4
LT
3401 }
3402
3403 kfree_skb(skb);
3404}
3405
3406/* SCO data packet */
6039aa73 3407static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3408{
3409 struct hci_sco_hdr *hdr = (void *) skb->data;
3410 struct hci_conn *conn;
3411 __u16 handle;
3412
3413 skb_pull(skb, HCI_SCO_HDR_SIZE);
3414
3415 handle = __le16_to_cpu(hdr->handle);
3416
f0e09510 3417 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3418
3419 hdev->stat.sco_rx++;
3420
3421 hci_dev_lock(hdev);
3422 conn = hci_conn_hash_lookup_handle(hdev, handle);
3423 hci_dev_unlock(hdev);
3424
3425 if (conn) {
1da177e4 3426 /* Send to upper protocol */
686ebf28
UF
3427 sco_recv_scodata(conn, skb);
3428 return;
1da177e4 3429 } else {
8e87d142 3430 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3431 hdev->name, handle);
1da177e4
LT
3432 }
3433
3434 kfree_skb(skb);
3435}
3436
9238f36a
JH
3437static bool hci_req_is_complete(struct hci_dev *hdev)
3438{
3439 struct sk_buff *skb;
3440
3441 skb = skb_peek(&hdev->cmd_q);
3442 if (!skb)
3443 return true;
3444
3445 return bt_cb(skb)->req.start;
3446}
3447
42c6b129
JH
3448static void hci_resend_last(struct hci_dev *hdev)
3449{
3450 struct hci_command_hdr *sent;
3451 struct sk_buff *skb;
3452 u16 opcode;
3453
3454 if (!hdev->sent_cmd)
3455 return;
3456
3457 sent = (void *) hdev->sent_cmd->data;
3458 opcode = __le16_to_cpu(sent->opcode);
3459 if (opcode == HCI_OP_RESET)
3460 return;
3461
3462 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3463 if (!skb)
3464 return;
3465
3466 skb_queue_head(&hdev->cmd_q, skb);
3467 queue_work(hdev->workqueue, &hdev->cmd_work);
3468}
3469
9238f36a
JH
3470void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3471{
3472 hci_req_complete_t req_complete = NULL;
3473 struct sk_buff *skb;
3474 unsigned long flags;
3475
3476 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3477
42c6b129
JH
3478 /* If the completed command doesn't match the last one that was
3479 * sent we need to do special handling of it.
9238f36a 3480 */
42c6b129
JH
3481 if (!hci_sent_cmd_data(hdev, opcode)) {
3482 /* Some CSR based controllers generate a spontaneous
3483 * reset complete event during init and any pending
3484 * command will never be completed. In such a case we
3485 * need to resend whatever was the last sent
3486 * command.
3487 */
3488 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3489 hci_resend_last(hdev);
3490
9238f36a 3491 return;
42c6b129 3492 }
9238f36a
JH
3493
3494 /* If the command succeeded and there's still more commands in
3495 * this request the request is not yet complete.
3496 */
3497 if (!status && !hci_req_is_complete(hdev))
3498 return;
3499
3500 /* If this was the last command in a request the complete
3501 * callback would be found in hdev->sent_cmd instead of the
3502 * command queue (hdev->cmd_q).
3503 */
3504 if (hdev->sent_cmd) {
3505 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
3506
3507 if (req_complete) {
3508 /* We must set the complete callback to NULL to
3509 * avoid calling the callback more than once if
3510 * this function gets called again.
3511 */
3512 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3513
9238f36a 3514 goto call_complete;
53e21fbc 3515 }
9238f36a
JH
3516 }
3517
3518 /* Remove all pending commands belonging to this request */
3519 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3520 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3521 if (bt_cb(skb)->req.start) {
3522 __skb_queue_head(&hdev->cmd_q, skb);
3523 break;
3524 }
3525
3526 req_complete = bt_cb(skb)->req.complete;
3527 kfree_skb(skb);
3528 }
3529 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3530
3531call_complete:
3532 if (req_complete)
3533 req_complete(hdev, status);
3534}
3535
b78752cc 3536static void hci_rx_work(struct work_struct *work)
1da177e4 3537{
b78752cc 3538 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3539 struct sk_buff *skb;
3540
3541 BT_DBG("%s", hdev->name);
3542
1da177e4 3543 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3544 /* Send copy to monitor */
3545 hci_send_to_monitor(hdev, skb);
3546
1da177e4
LT
3547 if (atomic_read(&hdev->promisc)) {
3548 /* Send copy to the sockets */
470fe1b5 3549 hci_send_to_sock(hdev, skb);
1da177e4
LT
3550 }
3551
0736cfa8
MH
3552 if (test_bit(HCI_RAW, &hdev->flags) ||
3553 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
3554 kfree_skb(skb);
3555 continue;
3556 }
3557
3558 if (test_bit(HCI_INIT, &hdev->flags)) {
3559 /* Don't process data packets in this states. */
0d48d939 3560 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3561 case HCI_ACLDATA_PKT:
3562 case HCI_SCODATA_PKT:
3563 kfree_skb(skb);
3564 continue;
3ff50b79 3565 }
1da177e4
LT
3566 }
3567
3568 /* Process frame */
0d48d939 3569 switch (bt_cb(skb)->pkt_type) {
1da177e4 3570 case HCI_EVENT_PKT:
b78752cc 3571 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3572 hci_event_packet(hdev, skb);
3573 break;
3574
3575 case HCI_ACLDATA_PKT:
3576 BT_DBG("%s ACL data packet", hdev->name);
3577 hci_acldata_packet(hdev, skb);
3578 break;
3579
3580 case HCI_SCODATA_PKT:
3581 BT_DBG("%s SCO data packet", hdev->name);
3582 hci_scodata_packet(hdev, skb);
3583 break;
3584
3585 default:
3586 kfree_skb(skb);
3587 break;
3588 }
3589 }
1da177e4
LT
3590}
3591
c347b765 3592static void hci_cmd_work(struct work_struct *work)
1da177e4 3593{
c347b765 3594 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3595 struct sk_buff *skb;
3596
2104786b
AE
3597 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3598 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3599
1da177e4 3600 /* Send queued commands */
5a08ecce
AE
3601 if (atomic_read(&hdev->cmd_cnt)) {
3602 skb = skb_dequeue(&hdev->cmd_q);
3603 if (!skb)
3604 return;
3605
7585b97a 3606 kfree_skb(hdev->sent_cmd);
1da177e4 3607
a675d7f1 3608 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 3609 if (hdev->sent_cmd) {
1da177e4 3610 atomic_dec(&hdev->cmd_cnt);
57d17d70 3611 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
3612 if (test_bit(HCI_RESET, &hdev->flags))
3613 del_timer(&hdev->cmd_timer);
3614 else
3615 mod_timer(&hdev->cmd_timer,
5f246e89 3616 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3617 } else {
3618 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3619 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3620 }
3621 }
3622}