Bluetooth: Read number of supported IAC on controller setup
[linux-block.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
77a63e0a
FW
82static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
75e84b7c
JH
84{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
7b1abbbe
JH
107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
75e84b7c
JH
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
7b1abbbe 137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 138 const void *param, u8 event, u32 timeout)
75e84b7c
JH
139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
7b1abbbe 148 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
7b1abbbe
JH
187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 192 const void *param, u32 timeout)
7b1abbbe
JH
193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
1da177e4 198/* Execute request and wait for completion. */
01178cd4 199static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
200 void (*func)(struct hci_request *req,
201 unsigned long opt),
01178cd4 202 unsigned long opt, __u32 timeout)
1da177e4 203{
42c6b129 204 struct hci_request req;
1da177e4
LT
205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
42c6b129
JH
210 hci_req_init(&req, hdev);
211
1da177e4
LT
212 hdev->req_status = HCI_REQ_PEND;
213
42c6b129 214 func(&req, opt);
53cce22d 215
42c6b129
JH
216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
53cce22d 218 hdev->req_status = 0;
920c8300
AG
219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
42c6b129 224 */
920c8300
AG
225 if (err == -ENODATA)
226 return 0;
227
228 return err;
53cce22d
JH
229 }
230
bc4445c7
AG
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
1da177e4
LT
234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
e175072f 243 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
3ff50b79 253 }
1da177e4 254
a5040efa 255 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
01178cd4 262static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
263 void (*req)(struct hci_request *req,
264 unsigned long opt),
01178cd4 265 unsigned long opt, __u32 timeout)
1da177e4
LT
266{
267 int ret;
268
7c6a329e
MH
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
1da177e4
LT
272 /* Serialize all requests */
273 hci_req_lock(hdev);
01178cd4 274 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
42c6b129 280static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 281{
42c6b129 282 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
283
284 /* Reset device */
42c6b129
JH
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
287}
288
42c6b129 289static void bredr_init(struct hci_request *req)
1da177e4 290{
42c6b129 291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 292
1da177e4 293 /* Read Local Supported Features */
42c6b129 294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 295
1143e5a6 296 /* Read Local Version */
42c6b129 297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
298
299 /* Read BD Address */
42c6b129 300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
301}
302
42c6b129 303static void amp_init(struct hci_request *req)
e61ef499 304{
42c6b129 305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 306
e61ef499 307 /* Read Local Version */
42c6b129 308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 309
f6996cfe
MH
310 /* Read Local Supported Commands */
311 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
312
313 /* Read Local Supported Features */
314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
315
6bcbc489 316 /* Read Local AMP Info */
42c6b129 317 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
318
319 /* Read Data Blk size */
42c6b129 320 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 321
f38ba941
MH
322 /* Read Flow Control Mode */
323 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
324
7528ca1c
MH
325 /* Read Location Data */
326 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
327}
328
42c6b129 329static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 330{
42c6b129 331 struct hci_dev *hdev = req->hdev;
e61ef499
AE
332
333 BT_DBG("%s %ld", hdev->name, opt);
334
11778716
AE
335 /* Reset */
336 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 337 hci_reset_req(req, 0);
11778716 338
e61ef499
AE
339 switch (hdev->dev_type) {
340 case HCI_BREDR:
42c6b129 341 bredr_init(req);
e61ef499
AE
342 break;
343
344 case HCI_AMP:
42c6b129 345 amp_init(req);
e61ef499
AE
346 break;
347
348 default:
349 BT_ERR("Unknown device type %d", hdev->dev_type);
350 break;
351 }
e61ef499
AE
352}
353
42c6b129 354static void bredr_setup(struct hci_request *req)
2177bab5 355{
4ca048e3
MH
356 struct hci_dev *hdev = req->hdev;
357
2177bab5
JH
358 __le16 param;
359 __u8 flt_type;
360
361 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 362 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
363
364 /* Read Class of Device */
42c6b129 365 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
366
367 /* Read Local Name */
42c6b129 368 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
369
370 /* Read Voice Setting */
42c6b129 371 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 372
b4cb9fb2
MH
373 /* Read Number of Supported IAC */
374 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
375
2177bab5
JH
376 /* Clear Event Filters */
377 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 378 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
379
380 /* Connection accept timeout ~20 secs */
381 param = __constant_cpu_to_le16(0x7d00);
42c6b129 382 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 383
4ca048e3
MH
384 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
385 * but it does not support page scan related HCI commands.
386 */
387 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
388 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
389 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
390 }
2177bab5
JH
391}
392
42c6b129 393static void le_setup(struct hci_request *req)
2177bab5 394{
c73eee91
JH
395 struct hci_dev *hdev = req->hdev;
396
2177bab5 397 /* Read LE Buffer Size */
42c6b129 398 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
399
400 /* Read LE Local Supported Features */
42c6b129 401 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
402
403 /* Read LE Advertising Channel TX Power */
42c6b129 404 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
405
406 /* Read LE White List Size */
42c6b129 407 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
408
409 /* Read LE Supported States */
42c6b129 410 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
411
412 /* LE-only controllers have LE implicitly enabled */
413 if (!lmp_bredr_capable(hdev))
414 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
415}
416
417static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
418{
419 if (lmp_ext_inq_capable(hdev))
420 return 0x02;
421
422 if (lmp_inq_rssi_capable(hdev))
423 return 0x01;
424
425 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
426 hdev->lmp_subver == 0x0757)
427 return 0x01;
428
429 if (hdev->manufacturer == 15) {
430 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
431 return 0x01;
432 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
433 return 0x01;
434 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
435 return 0x01;
436 }
437
438 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
439 hdev->lmp_subver == 0x1805)
440 return 0x01;
441
442 return 0x00;
443}
444
42c6b129 445static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
446{
447 u8 mode;
448
42c6b129 449 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 450
42c6b129 451 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
452}
453
42c6b129 454static void hci_setup_event_mask(struct hci_request *req)
2177bab5 455{
42c6b129
JH
456 struct hci_dev *hdev = req->hdev;
457
2177bab5
JH
458 /* The second byte is 0xff instead of 0x9f (two reserved bits
459 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
460 * command otherwise.
461 */
462 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
463
464 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
465 * any event mask for pre 1.2 devices.
466 */
467 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
468 return;
469
470 if (lmp_bredr_capable(hdev)) {
471 events[4] |= 0x01; /* Flow Specification Complete */
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473 events[4] |= 0x04; /* Read Remote Extended Features Complete */
474 events[5] |= 0x08; /* Synchronous Connection Complete */
475 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
476 } else {
477 /* Use a different default for LE-only devices */
478 memset(events, 0, sizeof(events));
479 events[0] |= 0x10; /* Disconnection Complete */
480 events[0] |= 0x80; /* Encryption Change */
481 events[1] |= 0x08; /* Read Remote Version Information Complete */
482 events[1] |= 0x20; /* Command Complete */
483 events[1] |= 0x40; /* Command Status */
484 events[1] |= 0x80; /* Hardware Error */
485 events[2] |= 0x04; /* Number of Completed Packets */
486 events[3] |= 0x02; /* Data Buffer Overflow */
487 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
488 }
489
490 if (lmp_inq_rssi_capable(hdev))
491 events[4] |= 0x02; /* Inquiry Result with RSSI */
492
493 if (lmp_sniffsubr_capable(hdev))
494 events[5] |= 0x20; /* Sniff Subrating */
495
496 if (lmp_pause_enc_capable(hdev))
497 events[5] |= 0x80; /* Encryption Key Refresh Complete */
498
499 if (lmp_ext_inq_capable(hdev))
500 events[5] |= 0x40; /* Extended Inquiry Result */
501
502 if (lmp_no_flush_capable(hdev))
503 events[7] |= 0x01; /* Enhanced Flush Complete */
504
505 if (lmp_lsto_capable(hdev))
506 events[6] |= 0x80; /* Link Supervision Timeout Changed */
507
508 if (lmp_ssp_capable(hdev)) {
509 events[6] |= 0x01; /* IO Capability Request */
510 events[6] |= 0x02; /* IO Capability Response */
511 events[6] |= 0x04; /* User Confirmation Request */
512 events[6] |= 0x08; /* User Passkey Request */
513 events[6] |= 0x10; /* Remote OOB Data Request */
514 events[6] |= 0x20; /* Simple Pairing Complete */
515 events[7] |= 0x04; /* User Passkey Notification */
516 events[7] |= 0x08; /* Keypress Notification */
517 events[7] |= 0x10; /* Remote Host Supported
518 * Features Notification
519 */
520 }
521
522 if (lmp_le_capable(hdev))
523 events[7] |= 0x20; /* LE Meta-Event */
524
42c6b129 525 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
526
527 if (lmp_le_capable(hdev)) {
528 memset(events, 0, sizeof(events));
529 events[0] = 0x1f;
42c6b129
JH
530 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
531 sizeof(events), events);
2177bab5
JH
532 }
533}
534
42c6b129 535static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 536{
42c6b129
JH
537 struct hci_dev *hdev = req->hdev;
538
2177bab5 539 if (lmp_bredr_capable(hdev))
42c6b129 540 bredr_setup(req);
56f87901
JH
541 else
542 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
543
544 if (lmp_le_capable(hdev))
42c6b129 545 le_setup(req);
2177bab5 546
42c6b129 547 hci_setup_event_mask(req);
2177bab5 548
3f8e2d75
JH
549 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
550 * local supported commands HCI command.
551 */
552 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 553 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
554
555 if (lmp_ssp_capable(hdev)) {
556 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
557 u8 mode = 0x01;
42c6b129
JH
558 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
559 sizeof(mode), &mode);
2177bab5
JH
560 } else {
561 struct hci_cp_write_eir cp;
562
563 memset(hdev->eir, 0, sizeof(hdev->eir));
564 memset(&cp, 0, sizeof(cp));
565
42c6b129 566 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
567 }
568 }
569
570 if (lmp_inq_rssi_capable(hdev))
42c6b129 571 hci_setup_inquiry_mode(req);
2177bab5
JH
572
573 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 574 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
575
576 if (lmp_ext_feat_capable(hdev)) {
577 struct hci_cp_read_local_ext_features cp;
578
579 cp.page = 0x01;
42c6b129
JH
580 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
581 sizeof(cp), &cp);
2177bab5
JH
582 }
583
584 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
585 u8 enable = 1;
42c6b129
JH
586 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
587 &enable);
2177bab5
JH
588 }
589}
590
42c6b129 591static void hci_setup_link_policy(struct hci_request *req)
2177bab5 592{
42c6b129 593 struct hci_dev *hdev = req->hdev;
2177bab5
JH
594 struct hci_cp_write_def_link_policy cp;
595 u16 link_policy = 0;
596
597 if (lmp_rswitch_capable(hdev))
598 link_policy |= HCI_LP_RSWITCH;
599 if (lmp_hold_capable(hdev))
600 link_policy |= HCI_LP_HOLD;
601 if (lmp_sniff_capable(hdev))
602 link_policy |= HCI_LP_SNIFF;
603 if (lmp_park_capable(hdev))
604 link_policy |= HCI_LP_PARK;
605
606 cp.policy = cpu_to_le16(link_policy);
42c6b129 607 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
608}
609
42c6b129 610static void hci_set_le_support(struct hci_request *req)
2177bab5 611{
42c6b129 612 struct hci_dev *hdev = req->hdev;
2177bab5
JH
613 struct hci_cp_write_le_host_supported cp;
614
c73eee91
JH
615 /* LE-only devices do not support explicit enablement */
616 if (!lmp_bredr_capable(hdev))
617 return;
618
2177bab5
JH
619 memset(&cp, 0, sizeof(cp));
620
621 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
622 cp.le = 0x01;
623 cp.simul = lmp_le_br_capable(hdev);
624 }
625
626 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
627 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
628 &cp);
2177bab5
JH
629}
630
d62e6d67
JH
631static void hci_set_event_mask_page_2(struct hci_request *req)
632{
633 struct hci_dev *hdev = req->hdev;
634 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
635
636 /* If Connectionless Slave Broadcast master role is supported
637 * enable all necessary events for it.
638 */
639 if (hdev->features[2][0] & 0x01) {
640 events[1] |= 0x40; /* Triggered Clock Capture */
641 events[1] |= 0x80; /* Synchronization Train Complete */
642 events[2] |= 0x10; /* Slave Page Response Timeout */
643 events[2] |= 0x20; /* CSB Channel Map Change */
644 }
645
646 /* If Connectionless Slave Broadcast slave role is supported
647 * enable all necessary events for it.
648 */
649 if (hdev->features[2][0] & 0x02) {
650 events[2] |= 0x01; /* Synchronization Train Received */
651 events[2] |= 0x02; /* CSB Receive */
652 events[2] |= 0x04; /* CSB Timeout */
653 events[2] |= 0x08; /* Truncated Page Complete */
654 }
655
656 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
657}
658
42c6b129 659static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 660{
42c6b129 661 struct hci_dev *hdev = req->hdev;
d2c5d77f 662 u8 p;
42c6b129 663
b8f4e068
GP
664 /* Some Broadcom based Bluetooth controllers do not support the
665 * Delete Stored Link Key command. They are clearly indicating its
666 * absence in the bit mask of supported commands.
667 *
668 * Check the supported commands and only if the the command is marked
669 * as supported send it. If not supported assume that the controller
670 * does not have actual support for stored link keys which makes this
671 * command redundant anyway.
637b4cae 672 */
59f45d57
JH
673 if (hdev->commands[6] & 0x80) {
674 struct hci_cp_delete_stored_link_key cp;
675
676 bacpy(&cp.bdaddr, BDADDR_ANY);
677 cp.delete_all = 0x01;
678 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
679 sizeof(cp), &cp);
680 }
681
2177bab5 682 if (hdev->commands[5] & 0x10)
42c6b129 683 hci_setup_link_policy(req);
2177bab5 684
04b4edcb 685 if (lmp_le_capable(hdev)) {
42c6b129 686 hci_set_le_support(req);
04b4edcb
JH
687 hci_update_ad(req);
688 }
d2c5d77f
JH
689
690 /* Read features beyond page 1 if available */
691 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
692 struct hci_cp_read_local_ext_features cp;
693
694 cp.page = p;
695 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
696 sizeof(cp), &cp);
697 }
2177bab5
JH
698}
699
5d4e7e8d
JH
700static void hci_init4_req(struct hci_request *req, unsigned long opt)
701{
702 struct hci_dev *hdev = req->hdev;
703
d62e6d67
JH
704 /* Set event mask page 2 if the HCI command for it is supported */
705 if (hdev->commands[22] & 0x04)
706 hci_set_event_mask_page_2(req);
707
5d4e7e8d
JH
708 /* Check for Synchronization Train support */
709 if (hdev->features[2][0] & 0x04)
710 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
711}
712
2177bab5
JH
713static int __hci_init(struct hci_dev *hdev)
714{
715 int err;
716
717 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
718 if (err < 0)
719 return err;
720
721 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
722 * BR/EDR/LE type controllers. AMP controllers only need the
723 * first stage init.
724 */
725 if (hdev->dev_type != HCI_BREDR)
726 return 0;
727
728 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
729 if (err < 0)
730 return err;
731
5d4e7e8d
JH
732 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
733 if (err < 0)
734 return err;
735
736 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
2177bab5
JH
737}
738
42c6b129 739static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
740{
741 __u8 scan = opt;
742
42c6b129 743 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
744
745 /* Inquiry and Page scans */
42c6b129 746 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
747}
748
42c6b129 749static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
750{
751 __u8 auth = opt;
752
42c6b129 753 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
754
755 /* Authentication */
42c6b129 756 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
757}
758
42c6b129 759static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
760{
761 __u8 encrypt = opt;
762
42c6b129 763 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 764
e4e8e37c 765 /* Encryption */
42c6b129 766 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
767}
768
42c6b129 769static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
770{
771 __le16 policy = cpu_to_le16(opt);
772
42c6b129 773 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
774
775 /* Default link policy */
42c6b129 776 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
777}
778
8e87d142 779/* Get HCI device by index.
1da177e4
LT
780 * Device is held on return. */
781struct hci_dev *hci_dev_get(int index)
782{
8035ded4 783 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
784
785 BT_DBG("%d", index);
786
787 if (index < 0)
788 return NULL;
789
790 read_lock(&hci_dev_list_lock);
8035ded4 791 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
792 if (d->id == index) {
793 hdev = hci_dev_hold(d);
794 break;
795 }
796 }
797 read_unlock(&hci_dev_list_lock);
798 return hdev;
799}
1da177e4
LT
800
801/* ---- Inquiry support ---- */
ff9ef578 802
30dc78e1
JH
803bool hci_discovery_active(struct hci_dev *hdev)
804{
805 struct discovery_state *discov = &hdev->discovery;
806
6fbe195d 807 switch (discov->state) {
343f935b 808 case DISCOVERY_FINDING:
6fbe195d 809 case DISCOVERY_RESOLVING:
30dc78e1
JH
810 return true;
811
6fbe195d
AG
812 default:
813 return false;
814 }
30dc78e1
JH
815}
816
ff9ef578
JH
817void hci_discovery_set_state(struct hci_dev *hdev, int state)
818{
819 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
820
821 if (hdev->discovery.state == state)
822 return;
823
824 switch (state) {
825 case DISCOVERY_STOPPED:
7b99b659
AG
826 if (hdev->discovery.state != DISCOVERY_STARTING)
827 mgmt_discovering(hdev, 0);
ff9ef578
JH
828 break;
829 case DISCOVERY_STARTING:
830 break;
343f935b 831 case DISCOVERY_FINDING:
ff9ef578
JH
832 mgmt_discovering(hdev, 1);
833 break;
30dc78e1
JH
834 case DISCOVERY_RESOLVING:
835 break;
ff9ef578
JH
836 case DISCOVERY_STOPPING:
837 break;
838 }
839
840 hdev->discovery.state = state;
841}
842
1f9b9a5d 843void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 844{
30883512 845 struct discovery_state *cache = &hdev->discovery;
b57c1a56 846 struct inquiry_entry *p, *n;
1da177e4 847
561aafbc
JH
848 list_for_each_entry_safe(p, n, &cache->all, all) {
849 list_del(&p->all);
b57c1a56 850 kfree(p);
1da177e4 851 }
561aafbc
JH
852
853 INIT_LIST_HEAD(&cache->unknown);
854 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
855}
856
a8c5fb1a
GP
857struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
858 bdaddr_t *bdaddr)
1da177e4 859{
30883512 860 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
861 struct inquiry_entry *e;
862
6ed93dc6 863 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 864
561aafbc
JH
865 list_for_each_entry(e, &cache->all, all) {
866 if (!bacmp(&e->data.bdaddr, bdaddr))
867 return e;
868 }
869
870 return NULL;
871}
872
873struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 874 bdaddr_t *bdaddr)
561aafbc 875{
30883512 876 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
877 struct inquiry_entry *e;
878
6ed93dc6 879 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
880
881 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 882 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
883 return e;
884 }
885
886 return NULL;
1da177e4
LT
887}
888
30dc78e1 889struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
890 bdaddr_t *bdaddr,
891 int state)
30dc78e1
JH
892{
893 struct discovery_state *cache = &hdev->discovery;
894 struct inquiry_entry *e;
895
6ed93dc6 896 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
897
898 list_for_each_entry(e, &cache->resolve, list) {
899 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
900 return e;
901 if (!bacmp(&e->data.bdaddr, bdaddr))
902 return e;
903 }
904
905 return NULL;
906}
907
a3d4e20a 908void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 909 struct inquiry_entry *ie)
a3d4e20a
JH
910{
911 struct discovery_state *cache = &hdev->discovery;
912 struct list_head *pos = &cache->resolve;
913 struct inquiry_entry *p;
914
915 list_del(&ie->list);
916
917 list_for_each_entry(p, &cache->resolve, list) {
918 if (p->name_state != NAME_PENDING &&
a8c5fb1a 919 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
920 break;
921 pos = &p->list;
922 }
923
924 list_add(&ie->list, pos);
925}
926
3175405b 927bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 928 bool name_known, bool *ssp)
1da177e4 929{
30883512 930 struct discovery_state *cache = &hdev->discovery;
70f23020 931 struct inquiry_entry *ie;
1da177e4 932
6ed93dc6 933 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 934
2b2fec4d
SJ
935 hci_remove_remote_oob_data(hdev, &data->bdaddr);
936
388fc8fa
JH
937 if (ssp)
938 *ssp = data->ssp_mode;
939
70f23020 940 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 941 if (ie) {
388fc8fa
JH
942 if (ie->data.ssp_mode && ssp)
943 *ssp = true;
944
a3d4e20a 945 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 946 data->rssi != ie->data.rssi) {
a3d4e20a
JH
947 ie->data.rssi = data->rssi;
948 hci_inquiry_cache_update_resolve(hdev, ie);
949 }
950
561aafbc 951 goto update;
a3d4e20a 952 }
561aafbc
JH
953
954 /* Entry not in the cache. Add new one. */
955 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
956 if (!ie)
3175405b 957 return false;
561aafbc
JH
958
959 list_add(&ie->all, &cache->all);
960
961 if (name_known) {
962 ie->name_state = NAME_KNOWN;
963 } else {
964 ie->name_state = NAME_NOT_KNOWN;
965 list_add(&ie->list, &cache->unknown);
966 }
70f23020 967
561aafbc
JH
968update:
969 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 970 ie->name_state != NAME_PENDING) {
561aafbc
JH
971 ie->name_state = NAME_KNOWN;
972 list_del(&ie->list);
1da177e4
LT
973 }
974
70f23020
AE
975 memcpy(&ie->data, data, sizeof(*data));
976 ie->timestamp = jiffies;
1da177e4 977 cache->timestamp = jiffies;
3175405b
JH
978
979 if (ie->name_state == NAME_NOT_KNOWN)
980 return false;
981
982 return true;
1da177e4
LT
983}
984
985static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
986{
30883512 987 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
988 struct inquiry_info *info = (struct inquiry_info *) buf;
989 struct inquiry_entry *e;
990 int copied = 0;
991
561aafbc 992 list_for_each_entry(e, &cache->all, all) {
1da177e4 993 struct inquiry_data *data = &e->data;
b57c1a56
JH
994
995 if (copied >= num)
996 break;
997
1da177e4
LT
998 bacpy(&info->bdaddr, &data->bdaddr);
999 info->pscan_rep_mode = data->pscan_rep_mode;
1000 info->pscan_period_mode = data->pscan_period_mode;
1001 info->pscan_mode = data->pscan_mode;
1002 memcpy(info->dev_class, data->dev_class, 3);
1003 info->clock_offset = data->clock_offset;
b57c1a56 1004
1da177e4 1005 info++;
b57c1a56 1006 copied++;
1da177e4
LT
1007 }
1008
1009 BT_DBG("cache %p, copied %d", cache, copied);
1010 return copied;
1011}
1012
42c6b129 1013static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1014{
1015 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1016 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1017 struct hci_cp_inquiry cp;
1018
1019 BT_DBG("%s", hdev->name);
1020
1021 if (test_bit(HCI_INQUIRY, &hdev->flags))
1022 return;
1023
1024 /* Start Inquiry */
1025 memcpy(&cp.lap, &ir->lap, 3);
1026 cp.length = ir->length;
1027 cp.num_rsp = ir->num_rsp;
42c6b129 1028 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1029}
1030
3e13fa1e
AG
1031static int wait_inquiry(void *word)
1032{
1033 schedule();
1034 return signal_pending(current);
1035}
1036
1da177e4
LT
1037int hci_inquiry(void __user *arg)
1038{
1039 __u8 __user *ptr = arg;
1040 struct hci_inquiry_req ir;
1041 struct hci_dev *hdev;
1042 int err = 0, do_inquiry = 0, max_rsp;
1043 long timeo;
1044 __u8 *buf;
1045
1046 if (copy_from_user(&ir, ptr, sizeof(ir)))
1047 return -EFAULT;
1048
5a08ecce
AE
1049 hdev = hci_dev_get(ir.dev_id);
1050 if (!hdev)
1da177e4
LT
1051 return -ENODEV;
1052
0736cfa8
MH
1053 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1054 err = -EBUSY;
1055 goto done;
1056 }
1057
5b69bef5
MH
1058 if (hdev->dev_type != HCI_BREDR) {
1059 err = -EOPNOTSUPP;
1060 goto done;
1061 }
1062
56f87901
JH
1063 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1064 err = -EOPNOTSUPP;
1065 goto done;
1066 }
1067
09fd0de5 1068 hci_dev_lock(hdev);
8e87d142 1069 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1070 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1071 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1072 do_inquiry = 1;
1073 }
09fd0de5 1074 hci_dev_unlock(hdev);
1da177e4 1075
04837f64 1076 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1077
1078 if (do_inquiry) {
01178cd4
JH
1079 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1080 timeo);
70f23020
AE
1081 if (err < 0)
1082 goto done;
3e13fa1e
AG
1083
1084 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1085 * cleared). If it is interrupted by a signal, return -EINTR.
1086 */
1087 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1088 TASK_INTERRUPTIBLE))
1089 return -EINTR;
70f23020 1090 }
1da177e4 1091
8fc9ced3
GP
1092 /* for unlimited number of responses we will use buffer with
1093 * 255 entries
1094 */
1da177e4
LT
1095 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1096
1097 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1098 * copy it to the user space.
1099 */
01df8c31 1100 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1101 if (!buf) {
1da177e4
LT
1102 err = -ENOMEM;
1103 goto done;
1104 }
1105
09fd0de5 1106 hci_dev_lock(hdev);
1da177e4 1107 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1108 hci_dev_unlock(hdev);
1da177e4
LT
1109
1110 BT_DBG("num_rsp %d", ir.num_rsp);
1111
1112 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1113 ptr += sizeof(ir);
1114 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1115 ir.num_rsp))
1da177e4 1116 err = -EFAULT;
8e87d142 1117 } else
1da177e4
LT
1118 err = -EFAULT;
1119
1120 kfree(buf);
1121
1122done:
1123 hci_dev_put(hdev);
1124 return err;
1125}
1126
3f0f524b
JH
1127static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1128{
1129 u8 ad_len = 0, flags = 0;
1130 size_t name_len;
1131
f3d3444a 1132 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
3f0f524b
JH
1133 flags |= LE_AD_GENERAL;
1134
11802b29
JH
1135 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1136 if (lmp_le_br_capable(hdev))
1137 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1138 if (lmp_host_le_br_capable(hdev))
1139 flags |= LE_AD_SIM_LE_BREDR_HOST;
1140 } else {
3f0f524b 1141 flags |= LE_AD_NO_BREDR;
11802b29 1142 }
3f0f524b
JH
1143
1144 if (flags) {
1145 BT_DBG("adv flags 0x%02x", flags);
1146
1147 ptr[0] = 2;
1148 ptr[1] = EIR_FLAGS;
1149 ptr[2] = flags;
1150
1151 ad_len += 3;
1152 ptr += 3;
1153 }
1154
1155 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1156 ptr[0] = 2;
1157 ptr[1] = EIR_TX_POWER;
1158 ptr[2] = (u8) hdev->adv_tx_power;
1159
1160 ad_len += 3;
1161 ptr += 3;
1162 }
1163
1164 name_len = strlen(hdev->dev_name);
1165 if (name_len > 0) {
1166 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1167
1168 if (name_len > max_len) {
1169 name_len = max_len;
1170 ptr[1] = EIR_NAME_SHORT;
1171 } else
1172 ptr[1] = EIR_NAME_COMPLETE;
1173
1174 ptr[0] = name_len + 1;
1175
1176 memcpy(ptr + 2, hdev->dev_name, name_len);
1177
1178 ad_len += (name_len + 2);
1179 ptr += (name_len + 2);
1180 }
1181
1182 return ad_len;
1183}
1184
04b4edcb 1185void hci_update_ad(struct hci_request *req)
3f0f524b 1186{
04b4edcb 1187 struct hci_dev *hdev = req->hdev;
3f0f524b
JH
1188 struct hci_cp_le_set_adv_data cp;
1189 u8 len;
3f0f524b 1190
04b4edcb
JH
1191 if (!lmp_le_capable(hdev))
1192 return;
3f0f524b
JH
1193
1194 memset(&cp, 0, sizeof(cp));
1195
1196 len = create_ad(hdev, cp.data);
1197
1198 if (hdev->adv_data_len == len &&
04b4edcb
JH
1199 memcmp(cp.data, hdev->adv_data, len) == 0)
1200 return;
3f0f524b
JH
1201
1202 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1203 hdev->adv_data_len = len;
1204
1205 cp.length = len;
3f0f524b 1206
04b4edcb 1207 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
3f0f524b
JH
1208}
1209
cbed0ca1 1210static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1211{
1da177e4
LT
1212 int ret = 0;
1213
1da177e4
LT
1214 BT_DBG("%s %p", hdev->name, hdev);
1215
1216 hci_req_lock(hdev);
1217
94324962
JH
1218 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1219 ret = -ENODEV;
1220 goto done;
1221 }
1222
a5c8f270
MH
1223 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1224 /* Check for rfkill but allow the HCI setup stage to
1225 * proceed (which in itself doesn't cause any RF activity).
1226 */
1227 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1228 ret = -ERFKILL;
1229 goto done;
1230 }
1231
1232 /* Check for valid public address or a configured static
1233 * random adddress, but let the HCI setup proceed to
1234 * be able to determine if there is a public address
1235 * or not.
1236 *
1237 * This check is only valid for BR/EDR controllers
1238 * since AMP controllers do not have an address.
1239 */
1240 if (hdev->dev_type == HCI_BREDR &&
1241 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1242 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1243 ret = -EADDRNOTAVAIL;
1244 goto done;
1245 }
611b30f7
MH
1246 }
1247
1da177e4
LT
1248 if (test_bit(HCI_UP, &hdev->flags)) {
1249 ret = -EALREADY;
1250 goto done;
1251 }
1252
1da177e4
LT
1253 if (hdev->open(hdev)) {
1254 ret = -EIO;
1255 goto done;
1256 }
1257
f41c70c4
MH
1258 atomic_set(&hdev->cmd_cnt, 1);
1259 set_bit(HCI_INIT, &hdev->flags);
1260
1261 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1262 ret = hdev->setup(hdev);
1263
1264 if (!ret) {
f41c70c4
MH
1265 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1266 set_bit(HCI_RAW, &hdev->flags);
1267
0736cfa8
MH
1268 if (!test_bit(HCI_RAW, &hdev->flags) &&
1269 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1270 ret = __hci_init(hdev);
1da177e4
LT
1271 }
1272
f41c70c4
MH
1273 clear_bit(HCI_INIT, &hdev->flags);
1274
1da177e4
LT
1275 if (!ret) {
1276 hci_dev_hold(hdev);
1277 set_bit(HCI_UP, &hdev->flags);
1278 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1279 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1280 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1281 hdev->dev_type == HCI_BREDR) {
09fd0de5 1282 hci_dev_lock(hdev);
744cf19e 1283 mgmt_powered(hdev, 1);
09fd0de5 1284 hci_dev_unlock(hdev);
56e5cb86 1285 }
8e87d142 1286 } else {
1da177e4 1287 /* Init failed, cleanup */
3eff45ea 1288 flush_work(&hdev->tx_work);
c347b765 1289 flush_work(&hdev->cmd_work);
b78752cc 1290 flush_work(&hdev->rx_work);
1da177e4
LT
1291
1292 skb_queue_purge(&hdev->cmd_q);
1293 skb_queue_purge(&hdev->rx_q);
1294
1295 if (hdev->flush)
1296 hdev->flush(hdev);
1297
1298 if (hdev->sent_cmd) {
1299 kfree_skb(hdev->sent_cmd);
1300 hdev->sent_cmd = NULL;
1301 }
1302
1303 hdev->close(hdev);
1304 hdev->flags = 0;
1305 }
1306
1307done:
1308 hci_req_unlock(hdev);
1da177e4
LT
1309 return ret;
1310}
1311
cbed0ca1
JH
1312/* ---- HCI ioctl helpers ---- */
1313
1314int hci_dev_open(__u16 dev)
1315{
1316 struct hci_dev *hdev;
1317 int err;
1318
1319 hdev = hci_dev_get(dev);
1320 if (!hdev)
1321 return -ENODEV;
1322
e1d08f40
JH
1323 /* We need to ensure that no other power on/off work is pending
1324 * before proceeding to call hci_dev_do_open. This is
1325 * particularly important if the setup procedure has not yet
1326 * completed.
1327 */
1328 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1329 cancel_delayed_work(&hdev->power_off);
1330
a5c8f270
MH
1331 /* After this call it is guaranteed that the setup procedure
1332 * has finished. This means that error conditions like RFKILL
1333 * or no valid public or static random address apply.
1334 */
e1d08f40
JH
1335 flush_workqueue(hdev->req_workqueue);
1336
cbed0ca1
JH
1337 err = hci_dev_do_open(hdev);
1338
1339 hci_dev_put(hdev);
1340
1341 return err;
1342}
1343
1da177e4
LT
1344static int hci_dev_do_close(struct hci_dev *hdev)
1345{
1346 BT_DBG("%s %p", hdev->name, hdev);
1347
78c04c0b
VCG
1348 cancel_delayed_work(&hdev->power_off);
1349
1da177e4
LT
1350 hci_req_cancel(hdev, ENODEV);
1351 hci_req_lock(hdev);
1352
1353 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1354 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1355 hci_req_unlock(hdev);
1356 return 0;
1357 }
1358
3eff45ea
GP
1359 /* Flush RX and TX works */
1360 flush_work(&hdev->tx_work);
b78752cc 1361 flush_work(&hdev->rx_work);
1da177e4 1362
16ab91ab 1363 if (hdev->discov_timeout > 0) {
e0f9309f 1364 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1365 hdev->discov_timeout = 0;
5e5282bb 1366 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1367 }
1368
a8b2d5c2 1369 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1370 cancel_delayed_work(&hdev->service_cache);
1371
7ba8b4be
AG
1372 cancel_delayed_work_sync(&hdev->le_scan_disable);
1373
09fd0de5 1374 hci_dev_lock(hdev);
1f9b9a5d 1375 hci_inquiry_cache_flush(hdev);
1da177e4 1376 hci_conn_hash_flush(hdev);
09fd0de5 1377 hci_dev_unlock(hdev);
1da177e4
LT
1378
1379 hci_notify(hdev, HCI_DEV_DOWN);
1380
1381 if (hdev->flush)
1382 hdev->flush(hdev);
1383
1384 /* Reset device */
1385 skb_queue_purge(&hdev->cmd_q);
1386 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1387 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 1388 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 1389 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1390 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1391 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1392 clear_bit(HCI_INIT, &hdev->flags);
1393 }
1394
c347b765
GP
1395 /* flush cmd work */
1396 flush_work(&hdev->cmd_work);
1da177e4
LT
1397
1398 /* Drop queues */
1399 skb_queue_purge(&hdev->rx_q);
1400 skb_queue_purge(&hdev->cmd_q);
1401 skb_queue_purge(&hdev->raw_q);
1402
1403 /* Drop last sent command */
1404 if (hdev->sent_cmd) {
b79f44c1 1405 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1406 kfree_skb(hdev->sent_cmd);
1407 hdev->sent_cmd = NULL;
1408 }
1409
b6ddb638
JH
1410 kfree_skb(hdev->recv_evt);
1411 hdev->recv_evt = NULL;
1412
1da177e4
LT
1413 /* After this point our queues are empty
1414 * and no tasks are scheduled. */
1415 hdev->close(hdev);
1416
35b973c9
JH
1417 /* Clear flags */
1418 hdev->flags = 0;
1419 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1420
93c311a0
MH
1421 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1422 if (hdev->dev_type == HCI_BREDR) {
1423 hci_dev_lock(hdev);
1424 mgmt_powered(hdev, 0);
1425 hci_dev_unlock(hdev);
1426 }
8ee56540 1427 }
5add6af8 1428
ced5c338 1429 /* Controller radio is available but is currently powered down */
536619e8 1430 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1431
e59fda8d 1432 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1433 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1434
1da177e4
LT
1435 hci_req_unlock(hdev);
1436
1437 hci_dev_put(hdev);
1438 return 0;
1439}
1440
1441int hci_dev_close(__u16 dev)
1442{
1443 struct hci_dev *hdev;
1444 int err;
1445
70f23020
AE
1446 hdev = hci_dev_get(dev);
1447 if (!hdev)
1da177e4 1448 return -ENODEV;
8ee56540 1449
0736cfa8
MH
1450 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1451 err = -EBUSY;
1452 goto done;
1453 }
1454
8ee56540
MH
1455 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1456 cancel_delayed_work(&hdev->power_off);
1457
1da177e4 1458 err = hci_dev_do_close(hdev);
8ee56540 1459
0736cfa8 1460done:
1da177e4
LT
1461 hci_dev_put(hdev);
1462 return err;
1463}
1464
1465int hci_dev_reset(__u16 dev)
1466{
1467 struct hci_dev *hdev;
1468 int ret = 0;
1469
70f23020
AE
1470 hdev = hci_dev_get(dev);
1471 if (!hdev)
1da177e4
LT
1472 return -ENODEV;
1473
1474 hci_req_lock(hdev);
1da177e4 1475
808a049e
MH
1476 if (!test_bit(HCI_UP, &hdev->flags)) {
1477 ret = -ENETDOWN;
1da177e4 1478 goto done;
808a049e 1479 }
1da177e4 1480
0736cfa8
MH
1481 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1482 ret = -EBUSY;
1483 goto done;
1484 }
1485
1da177e4
LT
1486 /* Drop queues */
1487 skb_queue_purge(&hdev->rx_q);
1488 skb_queue_purge(&hdev->cmd_q);
1489
09fd0de5 1490 hci_dev_lock(hdev);
1f9b9a5d 1491 hci_inquiry_cache_flush(hdev);
1da177e4 1492 hci_conn_hash_flush(hdev);
09fd0de5 1493 hci_dev_unlock(hdev);
1da177e4
LT
1494
1495 if (hdev->flush)
1496 hdev->flush(hdev);
1497
8e87d142 1498 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1499 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1500
1501 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1502 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1503
1504done:
1da177e4
LT
1505 hci_req_unlock(hdev);
1506 hci_dev_put(hdev);
1507 return ret;
1508}
1509
1510int hci_dev_reset_stat(__u16 dev)
1511{
1512 struct hci_dev *hdev;
1513 int ret = 0;
1514
70f23020
AE
1515 hdev = hci_dev_get(dev);
1516 if (!hdev)
1da177e4
LT
1517 return -ENODEV;
1518
0736cfa8
MH
1519 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1520 ret = -EBUSY;
1521 goto done;
1522 }
1523
1da177e4
LT
1524 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1525
0736cfa8 1526done:
1da177e4 1527 hci_dev_put(hdev);
1da177e4
LT
1528 return ret;
1529}
1530
1531int hci_dev_cmd(unsigned int cmd, void __user *arg)
1532{
1533 struct hci_dev *hdev;
1534 struct hci_dev_req dr;
1535 int err = 0;
1536
1537 if (copy_from_user(&dr, arg, sizeof(dr)))
1538 return -EFAULT;
1539
70f23020
AE
1540 hdev = hci_dev_get(dr.dev_id);
1541 if (!hdev)
1da177e4
LT
1542 return -ENODEV;
1543
0736cfa8
MH
1544 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1545 err = -EBUSY;
1546 goto done;
1547 }
1548
5b69bef5
MH
1549 if (hdev->dev_type != HCI_BREDR) {
1550 err = -EOPNOTSUPP;
1551 goto done;
1552 }
1553
56f87901
JH
1554 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1555 err = -EOPNOTSUPP;
1556 goto done;
1557 }
1558
1da177e4
LT
1559 switch (cmd) {
1560 case HCISETAUTH:
01178cd4
JH
1561 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1562 HCI_INIT_TIMEOUT);
1da177e4
LT
1563 break;
1564
1565 case HCISETENCRYPT:
1566 if (!lmp_encrypt_capable(hdev)) {
1567 err = -EOPNOTSUPP;
1568 break;
1569 }
1570
1571 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1572 /* Auth must be enabled first */
01178cd4
JH
1573 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1574 HCI_INIT_TIMEOUT);
1da177e4
LT
1575 if (err)
1576 break;
1577 }
1578
01178cd4
JH
1579 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1580 HCI_INIT_TIMEOUT);
1da177e4
LT
1581 break;
1582
1583 case HCISETSCAN:
01178cd4
JH
1584 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1585 HCI_INIT_TIMEOUT);
1da177e4
LT
1586 break;
1587
1da177e4 1588 case HCISETLINKPOL:
01178cd4
JH
1589 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1590 HCI_INIT_TIMEOUT);
1da177e4
LT
1591 break;
1592
1593 case HCISETLINKMODE:
e4e8e37c
MH
1594 hdev->link_mode = ((__u16) dr.dev_opt) &
1595 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1596 break;
1597
1598 case HCISETPTYPE:
1599 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1600 break;
1601
1602 case HCISETACLMTU:
e4e8e37c
MH
1603 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1604 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1605 break;
1606
1607 case HCISETSCOMTU:
e4e8e37c
MH
1608 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1609 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1610 break;
1611
1612 default:
1613 err = -EINVAL;
1614 break;
1615 }
e4e8e37c 1616
0736cfa8 1617done:
1da177e4
LT
1618 hci_dev_put(hdev);
1619 return err;
1620}
1621
1622int hci_get_dev_list(void __user *arg)
1623{
8035ded4 1624 struct hci_dev *hdev;
1da177e4
LT
1625 struct hci_dev_list_req *dl;
1626 struct hci_dev_req *dr;
1da177e4
LT
1627 int n = 0, size, err;
1628 __u16 dev_num;
1629
1630 if (get_user(dev_num, (__u16 __user *) arg))
1631 return -EFAULT;
1632
1633 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1634 return -EINVAL;
1635
1636 size = sizeof(*dl) + dev_num * sizeof(*dr);
1637
70f23020
AE
1638 dl = kzalloc(size, GFP_KERNEL);
1639 if (!dl)
1da177e4
LT
1640 return -ENOMEM;
1641
1642 dr = dl->dev_req;
1643
f20d09d5 1644 read_lock(&hci_dev_list_lock);
8035ded4 1645 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1646 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1647 cancel_delayed_work(&hdev->power_off);
c542a06c 1648
a8b2d5c2
JH
1649 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1650 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1651
1da177e4
LT
1652 (dr + n)->dev_id = hdev->id;
1653 (dr + n)->dev_opt = hdev->flags;
c542a06c 1654
1da177e4
LT
1655 if (++n >= dev_num)
1656 break;
1657 }
f20d09d5 1658 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1659
1660 dl->dev_num = n;
1661 size = sizeof(*dl) + n * sizeof(*dr);
1662
1663 err = copy_to_user(arg, dl, size);
1664 kfree(dl);
1665
1666 return err ? -EFAULT : 0;
1667}
1668
1669int hci_get_dev_info(void __user *arg)
1670{
1671 struct hci_dev *hdev;
1672 struct hci_dev_info di;
1673 int err = 0;
1674
1675 if (copy_from_user(&di, arg, sizeof(di)))
1676 return -EFAULT;
1677
70f23020
AE
1678 hdev = hci_dev_get(di.dev_id);
1679 if (!hdev)
1da177e4
LT
1680 return -ENODEV;
1681
a8b2d5c2 1682 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1683 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1684
a8b2d5c2
JH
1685 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1686 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1687
1da177e4
LT
1688 strcpy(di.name, hdev->name);
1689 di.bdaddr = hdev->bdaddr;
60f2a3ed 1690 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
1691 di.flags = hdev->flags;
1692 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1693 if (lmp_bredr_capable(hdev)) {
1694 di.acl_mtu = hdev->acl_mtu;
1695 di.acl_pkts = hdev->acl_pkts;
1696 di.sco_mtu = hdev->sco_mtu;
1697 di.sco_pkts = hdev->sco_pkts;
1698 } else {
1699 di.acl_mtu = hdev->le_mtu;
1700 di.acl_pkts = hdev->le_pkts;
1701 di.sco_mtu = 0;
1702 di.sco_pkts = 0;
1703 }
1da177e4
LT
1704 di.link_policy = hdev->link_policy;
1705 di.link_mode = hdev->link_mode;
1706
1707 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1708 memcpy(&di.features, &hdev->features, sizeof(di.features));
1709
1710 if (copy_to_user(arg, &di, sizeof(di)))
1711 err = -EFAULT;
1712
1713 hci_dev_put(hdev);
1714
1715 return err;
1716}
1717
1718/* ---- Interface to HCI drivers ---- */
1719
611b30f7
MH
1720static int hci_rfkill_set_block(void *data, bool blocked)
1721{
1722 struct hci_dev *hdev = data;
1723
1724 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1725
0736cfa8
MH
1726 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1727 return -EBUSY;
1728
5e130367
JH
1729 if (blocked) {
1730 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
1731 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1732 hci_dev_do_close(hdev);
5e130367
JH
1733 } else {
1734 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 1735 }
611b30f7
MH
1736
1737 return 0;
1738}
1739
1740static const struct rfkill_ops hci_rfkill_ops = {
1741 .set_block = hci_rfkill_set_block,
1742};
1743
ab81cbf9
JH
1744static void hci_power_on(struct work_struct *work)
1745{
1746 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 1747 int err;
ab81cbf9
JH
1748
1749 BT_DBG("%s", hdev->name);
1750
cbed0ca1 1751 err = hci_dev_do_open(hdev);
96570ffc
JH
1752 if (err < 0) {
1753 mgmt_set_powered_failed(hdev, err);
ab81cbf9 1754 return;
96570ffc 1755 }
ab81cbf9 1756
a5c8f270
MH
1757 /* During the HCI setup phase, a few error conditions are
1758 * ignored and they need to be checked now. If they are still
1759 * valid, it is important to turn the device back off.
1760 */
1761 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1762 (hdev->dev_type == HCI_BREDR &&
1763 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1764 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
1765 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1766 hci_dev_do_close(hdev);
1767 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
1768 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1769 HCI_AUTO_OFF_TIMEOUT);
bf543036 1770 }
ab81cbf9 1771
a8b2d5c2 1772 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1773 mgmt_index_added(hdev);
ab81cbf9
JH
1774}
1775
1776static void hci_power_off(struct work_struct *work)
1777{
3243553f 1778 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1779 power_off.work);
ab81cbf9
JH
1780
1781 BT_DBG("%s", hdev->name);
1782
8ee56540 1783 hci_dev_do_close(hdev);
ab81cbf9
JH
1784}
1785
16ab91ab
JH
1786static void hci_discov_off(struct work_struct *work)
1787{
1788 struct hci_dev *hdev;
1789 u8 scan = SCAN_PAGE;
1790
1791 hdev = container_of(work, struct hci_dev, discov_off.work);
1792
1793 BT_DBG("%s", hdev->name);
1794
09fd0de5 1795 hci_dev_lock(hdev);
16ab91ab
JH
1796
1797 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1798
1799 hdev->discov_timeout = 0;
1800
09fd0de5 1801 hci_dev_unlock(hdev);
16ab91ab
JH
1802}
1803
2aeb9a1a
JH
1804int hci_uuids_clear(struct hci_dev *hdev)
1805{
4821002c 1806 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1807
4821002c
JH
1808 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1809 list_del(&uuid->list);
2aeb9a1a
JH
1810 kfree(uuid);
1811 }
1812
1813 return 0;
1814}
1815
55ed8ca1
JH
1816int hci_link_keys_clear(struct hci_dev *hdev)
1817{
1818 struct list_head *p, *n;
1819
1820 list_for_each_safe(p, n, &hdev->link_keys) {
1821 struct link_key *key;
1822
1823 key = list_entry(p, struct link_key, list);
1824
1825 list_del(p);
1826 kfree(key);
1827 }
1828
1829 return 0;
1830}
1831
b899efaf
VCG
1832int hci_smp_ltks_clear(struct hci_dev *hdev)
1833{
1834 struct smp_ltk *k, *tmp;
1835
1836 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1837 list_del(&k->list);
1838 kfree(k);
1839 }
1840
1841 return 0;
1842}
1843
55ed8ca1
JH
1844struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1845{
8035ded4 1846 struct link_key *k;
55ed8ca1 1847
8035ded4 1848 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1849 if (bacmp(bdaddr, &k->bdaddr) == 0)
1850 return k;
55ed8ca1
JH
1851
1852 return NULL;
1853}
1854
745c0ce3 1855static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1856 u8 key_type, u8 old_key_type)
d25e28ab
JH
1857{
1858 /* Legacy key */
1859 if (key_type < 0x03)
745c0ce3 1860 return true;
d25e28ab
JH
1861
1862 /* Debug keys are insecure so don't store them persistently */
1863 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1864 return false;
d25e28ab
JH
1865
1866 /* Changed combination key and there's no previous one */
1867 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1868 return false;
d25e28ab
JH
1869
1870 /* Security mode 3 case */
1871 if (!conn)
745c0ce3 1872 return true;
d25e28ab
JH
1873
1874 /* Neither local nor remote side had no-bonding as requirement */
1875 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1876 return true;
d25e28ab
JH
1877
1878 /* Local side had dedicated bonding as requirement */
1879 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1880 return true;
d25e28ab
JH
1881
1882 /* Remote side had dedicated bonding as requirement */
1883 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1884 return true;
d25e28ab
JH
1885
1886 /* If none of the above criteria match, then don't store the key
1887 * persistently */
745c0ce3 1888 return false;
d25e28ab
JH
1889}
1890
c9839a11 1891struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1892{
c9839a11 1893 struct smp_ltk *k;
75d262c2 1894
c9839a11
VCG
1895 list_for_each_entry(k, &hdev->long_term_keys, list) {
1896 if (k->ediv != ediv ||
a8c5fb1a 1897 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1898 continue;
1899
c9839a11 1900 return k;
75d262c2
VCG
1901 }
1902
1903 return NULL;
1904}
75d262c2 1905
c9839a11 1906struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1907 u8 addr_type)
75d262c2 1908{
c9839a11 1909 struct smp_ltk *k;
75d262c2 1910
c9839a11
VCG
1911 list_for_each_entry(k, &hdev->long_term_keys, list)
1912 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1913 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1914 return k;
1915
1916 return NULL;
1917}
75d262c2 1918
d25e28ab 1919int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1920 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1921{
1922 struct link_key *key, *old_key;
745c0ce3
VA
1923 u8 old_key_type;
1924 bool persistent;
55ed8ca1
JH
1925
1926 old_key = hci_find_link_key(hdev, bdaddr);
1927 if (old_key) {
1928 old_key_type = old_key->type;
1929 key = old_key;
1930 } else {
12adcf3a 1931 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1932 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1933 if (!key)
1934 return -ENOMEM;
1935 list_add(&key->list, &hdev->link_keys);
1936 }
1937
6ed93dc6 1938 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1939
d25e28ab
JH
1940 /* Some buggy controller combinations generate a changed
1941 * combination key for legacy pairing even when there's no
1942 * previous key */
1943 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1944 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1945 type = HCI_LK_COMBINATION;
655fe6ec
JH
1946 if (conn)
1947 conn->key_type = type;
1948 }
d25e28ab 1949
55ed8ca1 1950 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1951 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1952 key->pin_len = pin_len;
1953
b6020ba0 1954 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1955 key->type = old_key_type;
4748fed2
JH
1956 else
1957 key->type = type;
1958
4df378a1
JH
1959 if (!new_key)
1960 return 0;
1961
1962 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1963
744cf19e 1964 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1965
6ec5bcad
VA
1966 if (conn)
1967 conn->flush_key = !persistent;
55ed8ca1
JH
1968
1969 return 0;
1970}
1971
c9839a11 1972int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1973 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1974 ediv, u8 rand[8])
75d262c2 1975{
c9839a11 1976 struct smp_ltk *key, *old_key;
75d262c2 1977
c9839a11
VCG
1978 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1979 return 0;
75d262c2 1980
c9839a11
VCG
1981 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1982 if (old_key)
75d262c2 1983 key = old_key;
c9839a11
VCG
1984 else {
1985 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1986 if (!key)
1987 return -ENOMEM;
c9839a11 1988 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1989 }
1990
75d262c2 1991 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1992 key->bdaddr_type = addr_type;
1993 memcpy(key->val, tk, sizeof(key->val));
1994 key->authenticated = authenticated;
1995 key->ediv = ediv;
1996 key->enc_size = enc_size;
1997 key->type = type;
1998 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1999
c9839a11
VCG
2000 if (!new_key)
2001 return 0;
75d262c2 2002
261cc5aa
VCG
2003 if (type & HCI_SMP_LTK)
2004 mgmt_new_ltk(hdev, key, 1);
2005
75d262c2
VCG
2006 return 0;
2007}
2008
55ed8ca1
JH
2009int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2010{
2011 struct link_key *key;
2012
2013 key = hci_find_link_key(hdev, bdaddr);
2014 if (!key)
2015 return -ENOENT;
2016
6ed93dc6 2017 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2018
2019 list_del(&key->list);
2020 kfree(key);
2021
2022 return 0;
2023}
2024
b899efaf
VCG
2025int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2026{
2027 struct smp_ltk *k, *tmp;
2028
2029 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2030 if (bacmp(bdaddr, &k->bdaddr))
2031 continue;
2032
6ed93dc6 2033 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2034
2035 list_del(&k->list);
2036 kfree(k);
2037 }
2038
2039 return 0;
2040}
2041
6bd32326 2042/* HCI command timer function */
bda4f23a 2043static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2044{
2045 struct hci_dev *hdev = (void *) arg;
2046
bda4f23a
AE
2047 if (hdev->sent_cmd) {
2048 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2049 u16 opcode = __le16_to_cpu(sent->opcode);
2050
2051 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2052 } else {
2053 BT_ERR("%s command tx timeout", hdev->name);
2054 }
2055
6bd32326 2056 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2057 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2058}
2059
2763eda6 2060struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2061 bdaddr_t *bdaddr)
2763eda6
SJ
2062{
2063 struct oob_data *data;
2064
2065 list_for_each_entry(data, &hdev->remote_oob_data, list)
2066 if (bacmp(bdaddr, &data->bdaddr) == 0)
2067 return data;
2068
2069 return NULL;
2070}
2071
2072int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2073{
2074 struct oob_data *data;
2075
2076 data = hci_find_remote_oob_data(hdev, bdaddr);
2077 if (!data)
2078 return -ENOENT;
2079
6ed93dc6 2080 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2081
2082 list_del(&data->list);
2083 kfree(data);
2084
2085 return 0;
2086}
2087
2088int hci_remote_oob_data_clear(struct hci_dev *hdev)
2089{
2090 struct oob_data *data, *n;
2091
2092 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2093 list_del(&data->list);
2094 kfree(data);
2095 }
2096
2097 return 0;
2098}
2099
2100int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 2101 u8 *randomizer)
2763eda6
SJ
2102{
2103 struct oob_data *data;
2104
2105 data = hci_find_remote_oob_data(hdev, bdaddr);
2106
2107 if (!data) {
2108 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2109 if (!data)
2110 return -ENOMEM;
2111
2112 bacpy(&data->bdaddr, bdaddr);
2113 list_add(&data->list, &hdev->remote_oob_data);
2114 }
2115
2116 memcpy(data->hash, hash, sizeof(data->hash));
2117 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2118
6ed93dc6 2119 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2120
2121 return 0;
2122}
2123
04124681 2124struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 2125{
8035ded4 2126 struct bdaddr_list *b;
b2a66aad 2127
8035ded4 2128 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
2129 if (bacmp(bdaddr, &b->bdaddr) == 0)
2130 return b;
b2a66aad
AJ
2131
2132 return NULL;
2133}
2134
2135int hci_blacklist_clear(struct hci_dev *hdev)
2136{
2137 struct list_head *p, *n;
2138
2139 list_for_each_safe(p, n, &hdev->blacklist) {
2140 struct bdaddr_list *b;
2141
2142 b = list_entry(p, struct bdaddr_list, list);
2143
2144 list_del(p);
2145 kfree(b);
2146 }
2147
2148 return 0;
2149}
2150
88c1fe4b 2151int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2152{
2153 struct bdaddr_list *entry;
b2a66aad
AJ
2154
2155 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2156 return -EBADF;
2157
5e762444
AJ
2158 if (hci_blacklist_lookup(hdev, bdaddr))
2159 return -EEXIST;
b2a66aad
AJ
2160
2161 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2162 if (!entry)
2163 return -ENOMEM;
b2a66aad
AJ
2164
2165 bacpy(&entry->bdaddr, bdaddr);
2166
2167 list_add(&entry->list, &hdev->blacklist);
2168
88c1fe4b 2169 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2170}
2171
88c1fe4b 2172int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2173{
2174 struct bdaddr_list *entry;
b2a66aad 2175
1ec918ce 2176 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 2177 return hci_blacklist_clear(hdev);
b2a66aad
AJ
2178
2179 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 2180 if (!entry)
5e762444 2181 return -ENOENT;
b2a66aad
AJ
2182
2183 list_del(&entry->list);
2184 kfree(entry);
2185
88c1fe4b 2186 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
2187}
2188
4c87eaab 2189static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2190{
4c87eaab
AG
2191 if (status) {
2192 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2193
4c87eaab
AG
2194 hci_dev_lock(hdev);
2195 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2196 hci_dev_unlock(hdev);
2197 return;
2198 }
7ba8b4be
AG
2199}
2200
4c87eaab 2201static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2202{
4c87eaab
AG
2203 /* General inquiry access code (GIAC) */
2204 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2205 struct hci_request req;
2206 struct hci_cp_inquiry cp;
7ba8b4be
AG
2207 int err;
2208
4c87eaab
AG
2209 if (status) {
2210 BT_ERR("Failed to disable LE scanning: status %d", status);
2211 return;
2212 }
7ba8b4be 2213
4c87eaab
AG
2214 switch (hdev->discovery.type) {
2215 case DISCOV_TYPE_LE:
2216 hci_dev_lock(hdev);
2217 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2218 hci_dev_unlock(hdev);
2219 break;
7ba8b4be 2220
4c87eaab
AG
2221 case DISCOV_TYPE_INTERLEAVED:
2222 hci_req_init(&req, hdev);
7ba8b4be 2223
4c87eaab
AG
2224 memset(&cp, 0, sizeof(cp));
2225 memcpy(&cp.lap, lap, sizeof(cp.lap));
2226 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2227 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 2228
4c87eaab 2229 hci_dev_lock(hdev);
7dbfac1d 2230
4c87eaab 2231 hci_inquiry_cache_flush(hdev);
7dbfac1d 2232
4c87eaab
AG
2233 err = hci_req_run(&req, inquiry_complete);
2234 if (err) {
2235 BT_ERR("Inquiry request failed: err %d", err);
2236 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2237 }
7dbfac1d 2238
4c87eaab
AG
2239 hci_dev_unlock(hdev);
2240 break;
7dbfac1d 2241 }
7dbfac1d
AG
2242}
2243
7ba8b4be
AG
2244static void le_scan_disable_work(struct work_struct *work)
2245{
2246 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2247 le_scan_disable.work);
7ba8b4be 2248 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
2249 struct hci_request req;
2250 int err;
7ba8b4be
AG
2251
2252 BT_DBG("%s", hdev->name);
2253
4c87eaab 2254 hci_req_init(&req, hdev);
28b75a89 2255
7ba8b4be 2256 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
2257 cp.enable = LE_SCAN_DISABLE;
2258 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 2259
4c87eaab
AG
2260 err = hci_req_run(&req, le_scan_disable_work_complete);
2261 if (err)
2262 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2263}
2264
9be0dab7
DH
2265/* Alloc HCI device */
2266struct hci_dev *hci_alloc_dev(void)
2267{
2268 struct hci_dev *hdev;
2269
2270 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2271 if (!hdev)
2272 return NULL;
2273
b1b813d4
DH
2274 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2275 hdev->esco_type = (ESCO_HV1);
2276 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
2277 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2278 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2279 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2280 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2281
b1b813d4
DH
2282 hdev->sniff_max_interval = 800;
2283 hdev->sniff_min_interval = 80;
2284
bef64738
MH
2285 hdev->le_scan_interval = 0x0060;
2286 hdev->le_scan_window = 0x0030;
2287
b1b813d4
DH
2288 mutex_init(&hdev->lock);
2289 mutex_init(&hdev->req_lock);
2290
2291 INIT_LIST_HEAD(&hdev->mgmt_pending);
2292 INIT_LIST_HEAD(&hdev->blacklist);
2293 INIT_LIST_HEAD(&hdev->uuids);
2294 INIT_LIST_HEAD(&hdev->link_keys);
2295 INIT_LIST_HEAD(&hdev->long_term_keys);
2296 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2297 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2298
2299 INIT_WORK(&hdev->rx_work, hci_rx_work);
2300 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2301 INIT_WORK(&hdev->tx_work, hci_tx_work);
2302 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 2303
b1b813d4
DH
2304 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2305 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2306 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2307
b1b813d4
DH
2308 skb_queue_head_init(&hdev->rx_q);
2309 skb_queue_head_init(&hdev->cmd_q);
2310 skb_queue_head_init(&hdev->raw_q);
2311
2312 init_waitqueue_head(&hdev->req_wait_q);
2313
bda4f23a 2314 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2315
b1b813d4
DH
2316 hci_init_sysfs(hdev);
2317 discovery_init(hdev);
9be0dab7
DH
2318
2319 return hdev;
2320}
2321EXPORT_SYMBOL(hci_alloc_dev);
2322
2323/* Free HCI device */
2324void hci_free_dev(struct hci_dev *hdev)
2325{
9be0dab7
DH
2326 /* will free via device release */
2327 put_device(&hdev->dev);
2328}
2329EXPORT_SYMBOL(hci_free_dev);
2330
1da177e4
LT
2331/* Register HCI device */
2332int hci_register_dev(struct hci_dev *hdev)
2333{
b1b813d4 2334 int id, error;
1da177e4 2335
010666a1 2336 if (!hdev->open || !hdev->close)
1da177e4
LT
2337 return -EINVAL;
2338
08add513
MM
2339 /* Do not allow HCI_AMP devices to register at index 0,
2340 * so the index can be used as the AMP controller ID.
2341 */
3df92b31
SL
2342 switch (hdev->dev_type) {
2343 case HCI_BREDR:
2344 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2345 break;
2346 case HCI_AMP:
2347 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2348 break;
2349 default:
2350 return -EINVAL;
1da177e4 2351 }
8e87d142 2352
3df92b31
SL
2353 if (id < 0)
2354 return id;
2355
1da177e4
LT
2356 sprintf(hdev->name, "hci%d", id);
2357 hdev->id = id;
2d8b3a11
AE
2358
2359 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2360
d8537548
KC
2361 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2362 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
2363 if (!hdev->workqueue) {
2364 error = -ENOMEM;
2365 goto err;
2366 }
f48fd9c8 2367
d8537548
KC
2368 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2369 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
2370 if (!hdev->req_workqueue) {
2371 destroy_workqueue(hdev->workqueue);
2372 error = -ENOMEM;
2373 goto err;
2374 }
2375
33ca954d
DH
2376 error = hci_add_sysfs(hdev);
2377 if (error < 0)
2378 goto err_wqueue;
1da177e4 2379
611b30f7 2380 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2381 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2382 hdev);
611b30f7
MH
2383 if (hdev->rfkill) {
2384 if (rfkill_register(hdev->rfkill) < 0) {
2385 rfkill_destroy(hdev->rfkill);
2386 hdev->rfkill = NULL;
2387 }
2388 }
2389
5e130367
JH
2390 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2391 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2392
a8b2d5c2 2393 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 2394 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 2395
01cd3404 2396 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
2397 /* Assume BR/EDR support until proven otherwise (such as
2398 * through reading supported features during init.
2399 */
2400 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2401 }
ce2be9ac 2402
fcee3377
GP
2403 write_lock(&hci_dev_list_lock);
2404 list_add(&hdev->list, &hci_dev_list);
2405 write_unlock(&hci_dev_list_lock);
2406
1da177e4 2407 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2408 hci_dev_hold(hdev);
1da177e4 2409
19202573 2410 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2411
1da177e4 2412 return id;
f48fd9c8 2413
33ca954d
DH
2414err_wqueue:
2415 destroy_workqueue(hdev->workqueue);
6ead1bbc 2416 destroy_workqueue(hdev->req_workqueue);
33ca954d 2417err:
3df92b31 2418 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 2419
33ca954d 2420 return error;
1da177e4
LT
2421}
2422EXPORT_SYMBOL(hci_register_dev);
2423
2424/* Unregister HCI device */
59735631 2425void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2426{
3df92b31 2427 int i, id;
ef222013 2428
c13854ce 2429 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2430
94324962
JH
2431 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2432
3df92b31
SL
2433 id = hdev->id;
2434
f20d09d5 2435 write_lock(&hci_dev_list_lock);
1da177e4 2436 list_del(&hdev->list);
f20d09d5 2437 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2438
2439 hci_dev_do_close(hdev);
2440
cd4c5391 2441 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2442 kfree_skb(hdev->reassembly[i]);
2443
b9b5ef18
GP
2444 cancel_work_sync(&hdev->power_on);
2445
ab81cbf9 2446 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2447 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2448 hci_dev_lock(hdev);
744cf19e 2449 mgmt_index_removed(hdev);
09fd0de5 2450 hci_dev_unlock(hdev);
56e5cb86 2451 }
ab81cbf9 2452
2e58ef3e
JH
2453 /* mgmt_index_removed should take care of emptying the
2454 * pending list */
2455 BUG_ON(!list_empty(&hdev->mgmt_pending));
2456
1da177e4
LT
2457 hci_notify(hdev, HCI_DEV_UNREG);
2458
611b30f7
MH
2459 if (hdev->rfkill) {
2460 rfkill_unregister(hdev->rfkill);
2461 rfkill_destroy(hdev->rfkill);
2462 }
2463
ce242970 2464 hci_del_sysfs(hdev);
147e2d59 2465
f48fd9c8 2466 destroy_workqueue(hdev->workqueue);
6ead1bbc 2467 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2468
09fd0de5 2469 hci_dev_lock(hdev);
e2e0cacb 2470 hci_blacklist_clear(hdev);
2aeb9a1a 2471 hci_uuids_clear(hdev);
55ed8ca1 2472 hci_link_keys_clear(hdev);
b899efaf 2473 hci_smp_ltks_clear(hdev);
2763eda6 2474 hci_remote_oob_data_clear(hdev);
09fd0de5 2475 hci_dev_unlock(hdev);
e2e0cacb 2476
dc946bd8 2477 hci_dev_put(hdev);
3df92b31
SL
2478
2479 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2480}
2481EXPORT_SYMBOL(hci_unregister_dev);
2482
2483/* Suspend HCI device */
2484int hci_suspend_dev(struct hci_dev *hdev)
2485{
2486 hci_notify(hdev, HCI_DEV_SUSPEND);
2487 return 0;
2488}
2489EXPORT_SYMBOL(hci_suspend_dev);
2490
2491/* Resume HCI device */
2492int hci_resume_dev(struct hci_dev *hdev)
2493{
2494 hci_notify(hdev, HCI_DEV_RESUME);
2495 return 0;
2496}
2497EXPORT_SYMBOL(hci_resume_dev);
2498
76bca880 2499/* Receive frame from HCI drivers */
e1a26170 2500int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 2501{
76bca880 2502 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2503 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2504 kfree_skb(skb);
2505 return -ENXIO;
2506 }
2507
d82603c6 2508 /* Incoming skb */
76bca880
MH
2509 bt_cb(skb)->incoming = 1;
2510
2511 /* Time stamp */
2512 __net_timestamp(skb);
2513
76bca880 2514 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2515 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2516
76bca880
MH
2517 return 0;
2518}
2519EXPORT_SYMBOL(hci_recv_frame);
2520
33e882a5 2521static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2522 int count, __u8 index)
33e882a5
SS
2523{
2524 int len = 0;
2525 int hlen = 0;
2526 int remain = count;
2527 struct sk_buff *skb;
2528 struct bt_skb_cb *scb;
2529
2530 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2531 index >= NUM_REASSEMBLY)
33e882a5
SS
2532 return -EILSEQ;
2533
2534 skb = hdev->reassembly[index];
2535
2536 if (!skb) {
2537 switch (type) {
2538 case HCI_ACLDATA_PKT:
2539 len = HCI_MAX_FRAME_SIZE;
2540 hlen = HCI_ACL_HDR_SIZE;
2541 break;
2542 case HCI_EVENT_PKT:
2543 len = HCI_MAX_EVENT_SIZE;
2544 hlen = HCI_EVENT_HDR_SIZE;
2545 break;
2546 case HCI_SCODATA_PKT:
2547 len = HCI_MAX_SCO_SIZE;
2548 hlen = HCI_SCO_HDR_SIZE;
2549 break;
2550 }
2551
1e429f38 2552 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2553 if (!skb)
2554 return -ENOMEM;
2555
2556 scb = (void *) skb->cb;
2557 scb->expect = hlen;
2558 scb->pkt_type = type;
2559
33e882a5
SS
2560 hdev->reassembly[index] = skb;
2561 }
2562
2563 while (count) {
2564 scb = (void *) skb->cb;
89bb46d0 2565 len = min_t(uint, scb->expect, count);
33e882a5
SS
2566
2567 memcpy(skb_put(skb, len), data, len);
2568
2569 count -= len;
2570 data += len;
2571 scb->expect -= len;
2572 remain = count;
2573
2574 switch (type) {
2575 case HCI_EVENT_PKT:
2576 if (skb->len == HCI_EVENT_HDR_SIZE) {
2577 struct hci_event_hdr *h = hci_event_hdr(skb);
2578 scb->expect = h->plen;
2579
2580 if (skb_tailroom(skb) < scb->expect) {
2581 kfree_skb(skb);
2582 hdev->reassembly[index] = NULL;
2583 return -ENOMEM;
2584 }
2585 }
2586 break;
2587
2588 case HCI_ACLDATA_PKT:
2589 if (skb->len == HCI_ACL_HDR_SIZE) {
2590 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2591 scb->expect = __le16_to_cpu(h->dlen);
2592
2593 if (skb_tailroom(skb) < scb->expect) {
2594 kfree_skb(skb);
2595 hdev->reassembly[index] = NULL;
2596 return -ENOMEM;
2597 }
2598 }
2599 break;
2600
2601 case HCI_SCODATA_PKT:
2602 if (skb->len == HCI_SCO_HDR_SIZE) {
2603 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2604 scb->expect = h->dlen;
2605
2606 if (skb_tailroom(skb) < scb->expect) {
2607 kfree_skb(skb);
2608 hdev->reassembly[index] = NULL;
2609 return -ENOMEM;
2610 }
2611 }
2612 break;
2613 }
2614
2615 if (scb->expect == 0) {
2616 /* Complete frame */
2617
2618 bt_cb(skb)->pkt_type = type;
e1a26170 2619 hci_recv_frame(hdev, skb);
33e882a5
SS
2620
2621 hdev->reassembly[index] = NULL;
2622 return remain;
2623 }
2624 }
2625
2626 return remain;
2627}
2628
ef222013
MH
2629int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2630{
f39a3c06
SS
2631 int rem = 0;
2632
ef222013
MH
2633 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2634 return -EILSEQ;
2635
da5f6c37 2636 while (count) {
1e429f38 2637 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2638 if (rem < 0)
2639 return rem;
ef222013 2640
f39a3c06
SS
2641 data += (count - rem);
2642 count = rem;
f81c6224 2643 }
ef222013 2644
f39a3c06 2645 return rem;
ef222013
MH
2646}
2647EXPORT_SYMBOL(hci_recv_fragment);
2648
99811510
SS
2649#define STREAM_REASSEMBLY 0
2650
2651int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2652{
2653 int type;
2654 int rem = 0;
2655
da5f6c37 2656 while (count) {
99811510
SS
2657 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2658
2659 if (!skb) {
2660 struct { char type; } *pkt;
2661
2662 /* Start of the frame */
2663 pkt = data;
2664 type = pkt->type;
2665
2666 data++;
2667 count--;
2668 } else
2669 type = bt_cb(skb)->pkt_type;
2670
1e429f38 2671 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2672 STREAM_REASSEMBLY);
99811510
SS
2673 if (rem < 0)
2674 return rem;
2675
2676 data += (count - rem);
2677 count = rem;
f81c6224 2678 }
99811510
SS
2679
2680 return rem;
2681}
2682EXPORT_SYMBOL(hci_recv_stream_fragment);
2683
1da177e4
LT
2684/* ---- Interface to upper protocols ---- */
2685
1da177e4
LT
2686int hci_register_cb(struct hci_cb *cb)
2687{
2688 BT_DBG("%p name %s", cb, cb->name);
2689
f20d09d5 2690 write_lock(&hci_cb_list_lock);
1da177e4 2691 list_add(&cb->list, &hci_cb_list);
f20d09d5 2692 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2693
2694 return 0;
2695}
2696EXPORT_SYMBOL(hci_register_cb);
2697
2698int hci_unregister_cb(struct hci_cb *cb)
2699{
2700 BT_DBG("%p name %s", cb, cb->name);
2701
f20d09d5 2702 write_lock(&hci_cb_list_lock);
1da177e4 2703 list_del(&cb->list);
f20d09d5 2704 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2705
2706 return 0;
2707}
2708EXPORT_SYMBOL(hci_unregister_cb);
2709
51086991 2710static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 2711{
0d48d939 2712 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2713
cd82e61c
MH
2714 /* Time stamp */
2715 __net_timestamp(skb);
1da177e4 2716
cd82e61c
MH
2717 /* Send copy to monitor */
2718 hci_send_to_monitor(hdev, skb);
2719
2720 if (atomic_read(&hdev->promisc)) {
2721 /* Send copy to the sockets */
470fe1b5 2722 hci_send_to_sock(hdev, skb);
1da177e4
LT
2723 }
2724
2725 /* Get rid of skb owner, prior to sending to the driver. */
2726 skb_orphan(skb);
2727
7bd8f09f 2728 if (hdev->send(hdev, skb) < 0)
51086991 2729 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
2730}
2731
3119ae95
JH
2732void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2733{
2734 skb_queue_head_init(&req->cmd_q);
2735 req->hdev = hdev;
5d73e034 2736 req->err = 0;
3119ae95
JH
2737}
2738
2739int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2740{
2741 struct hci_dev *hdev = req->hdev;
2742 struct sk_buff *skb;
2743 unsigned long flags;
2744
2745 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2746
5d73e034
AG
2747 /* If an error occured during request building, remove all HCI
2748 * commands queued on the HCI request queue.
2749 */
2750 if (req->err) {
2751 skb_queue_purge(&req->cmd_q);
2752 return req->err;
2753 }
2754
3119ae95
JH
2755 /* Do not allow empty requests */
2756 if (skb_queue_empty(&req->cmd_q))
382b0c39 2757 return -ENODATA;
3119ae95
JH
2758
2759 skb = skb_peek_tail(&req->cmd_q);
2760 bt_cb(skb)->req.complete = complete;
2761
2762 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2763 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2764 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2765
2766 queue_work(hdev->workqueue, &hdev->cmd_work);
2767
2768 return 0;
2769}
2770
1ca3a9d0 2771static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 2772 u32 plen, const void *param)
1da177e4
LT
2773{
2774 int len = HCI_COMMAND_HDR_SIZE + plen;
2775 struct hci_command_hdr *hdr;
2776 struct sk_buff *skb;
2777
1da177e4 2778 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2779 if (!skb)
2780 return NULL;
1da177e4
LT
2781
2782 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2783 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2784 hdr->plen = plen;
2785
2786 if (plen)
2787 memcpy(skb_put(skb, plen), param, plen);
2788
2789 BT_DBG("skb len %d", skb->len);
2790
0d48d939 2791 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 2792
1ca3a9d0
JH
2793 return skb;
2794}
2795
2796/* Send HCI command */
07dc93dd
JH
2797int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2798 const void *param)
1ca3a9d0
JH
2799{
2800 struct sk_buff *skb;
2801
2802 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2803
2804 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2805 if (!skb) {
2806 BT_ERR("%s no memory for command", hdev->name);
2807 return -ENOMEM;
2808 }
2809
11714b3d
JH
2810 /* Stand-alone HCI commands must be flaged as
2811 * single-command requests.
2812 */
2813 bt_cb(skb)->req.start = true;
2814
1da177e4 2815 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2816 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2817
2818 return 0;
2819}
1da177e4 2820
71c76a17 2821/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
2822void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2823 const void *param, u8 event)
71c76a17
JH
2824{
2825 struct hci_dev *hdev = req->hdev;
2826 struct sk_buff *skb;
2827
2828 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2829
34739c1e
AG
2830 /* If an error occured during request building, there is no point in
2831 * queueing the HCI command. We can simply return.
2832 */
2833 if (req->err)
2834 return;
2835
71c76a17
JH
2836 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2837 if (!skb) {
5d73e034
AG
2838 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2839 hdev->name, opcode);
2840 req->err = -ENOMEM;
e348fe6b 2841 return;
71c76a17
JH
2842 }
2843
2844 if (skb_queue_empty(&req->cmd_q))
2845 bt_cb(skb)->req.start = true;
2846
02350a72
JH
2847 bt_cb(skb)->req.event = event;
2848
71c76a17 2849 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2850}
2851
07dc93dd
JH
2852void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2853 const void *param)
02350a72
JH
2854{
2855 hci_req_add_ev(req, opcode, plen, param, 0);
2856}
2857
1da177e4 2858/* Get data from the previously sent command */
a9de9248 2859void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2860{
2861 struct hci_command_hdr *hdr;
2862
2863 if (!hdev->sent_cmd)
2864 return NULL;
2865
2866 hdr = (void *) hdev->sent_cmd->data;
2867
a9de9248 2868 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2869 return NULL;
2870
f0e09510 2871 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2872
2873 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2874}
2875
2876/* Send ACL data */
2877static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2878{
2879 struct hci_acl_hdr *hdr;
2880 int len = skb->len;
2881
badff6d0
ACM
2882 skb_push(skb, HCI_ACL_HDR_SIZE);
2883 skb_reset_transport_header(skb);
9c70220b 2884 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2885 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2886 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2887}
2888
ee22be7e 2889static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2890 struct sk_buff *skb, __u16 flags)
1da177e4 2891{
ee22be7e 2892 struct hci_conn *conn = chan->conn;
1da177e4
LT
2893 struct hci_dev *hdev = conn->hdev;
2894 struct sk_buff *list;
2895
087bfd99
GP
2896 skb->len = skb_headlen(skb);
2897 skb->data_len = 0;
2898
2899 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2900
2901 switch (hdev->dev_type) {
2902 case HCI_BREDR:
2903 hci_add_acl_hdr(skb, conn->handle, flags);
2904 break;
2905 case HCI_AMP:
2906 hci_add_acl_hdr(skb, chan->handle, flags);
2907 break;
2908 default:
2909 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2910 return;
2911 }
087bfd99 2912
70f23020
AE
2913 list = skb_shinfo(skb)->frag_list;
2914 if (!list) {
1da177e4
LT
2915 /* Non fragmented */
2916 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2917
73d80deb 2918 skb_queue_tail(queue, skb);
1da177e4
LT
2919 } else {
2920 /* Fragmented */
2921 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2922
2923 skb_shinfo(skb)->frag_list = NULL;
2924
2925 /* Queue all fragments atomically */
af3e6359 2926 spin_lock(&queue->lock);
1da177e4 2927
73d80deb 2928 __skb_queue_tail(queue, skb);
e702112f
AE
2929
2930 flags &= ~ACL_START;
2931 flags |= ACL_CONT;
1da177e4
LT
2932 do {
2933 skb = list; list = list->next;
8e87d142 2934
0d48d939 2935 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2936 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2937
2938 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2939
73d80deb 2940 __skb_queue_tail(queue, skb);
1da177e4
LT
2941 } while (list);
2942
af3e6359 2943 spin_unlock(&queue->lock);
1da177e4 2944 }
73d80deb
LAD
2945}
2946
2947void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2948{
ee22be7e 2949 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2950
f0e09510 2951 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 2952
ee22be7e 2953 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2954
3eff45ea 2955 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2956}
1da177e4
LT
2957
2958/* Send SCO data */
0d861d8b 2959void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2960{
2961 struct hci_dev *hdev = conn->hdev;
2962 struct hci_sco_hdr hdr;
2963
2964 BT_DBG("%s len %d", hdev->name, skb->len);
2965
aca3192c 2966 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2967 hdr.dlen = skb->len;
2968
badff6d0
ACM
2969 skb_push(skb, HCI_SCO_HDR_SIZE);
2970 skb_reset_transport_header(skb);
9c70220b 2971 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 2972
0d48d939 2973 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2974
1da177e4 2975 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2976 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2977}
1da177e4
LT
2978
2979/* ---- HCI TX task (outgoing data) ---- */
2980
2981/* HCI Connection scheduler */
6039aa73
GP
2982static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2983 int *quote)
1da177e4
LT
2984{
2985 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2986 struct hci_conn *conn = NULL, *c;
abc5de8f 2987 unsigned int num = 0, min = ~0;
1da177e4 2988
8e87d142 2989 /* We don't have to lock device here. Connections are always
1da177e4 2990 * added and removed with TX task disabled. */
bf4c6325
GP
2991
2992 rcu_read_lock();
2993
2994 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2995 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2996 continue;
769be974
MH
2997
2998 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2999 continue;
3000
1da177e4
LT
3001 num++;
3002
3003 if (c->sent < min) {
3004 min = c->sent;
3005 conn = c;
3006 }
52087a79
LAD
3007
3008 if (hci_conn_num(hdev, type) == num)
3009 break;
1da177e4
LT
3010 }
3011
bf4c6325
GP
3012 rcu_read_unlock();
3013
1da177e4 3014 if (conn) {
6ed58ec5
VT
3015 int cnt, q;
3016
3017 switch (conn->type) {
3018 case ACL_LINK:
3019 cnt = hdev->acl_cnt;
3020 break;
3021 case SCO_LINK:
3022 case ESCO_LINK:
3023 cnt = hdev->sco_cnt;
3024 break;
3025 case LE_LINK:
3026 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3027 break;
3028 default:
3029 cnt = 0;
3030 BT_ERR("Unknown link type");
3031 }
3032
3033 q = cnt / num;
1da177e4
LT
3034 *quote = q ? q : 1;
3035 } else
3036 *quote = 0;
3037
3038 BT_DBG("conn %p quote %d", conn, *quote);
3039 return conn;
3040}
3041
6039aa73 3042static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3043{
3044 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3045 struct hci_conn *c;
1da177e4 3046
bae1f5d9 3047 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3048
bf4c6325
GP
3049 rcu_read_lock();
3050
1da177e4 3051 /* Kill stalled connections */
bf4c6325 3052 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3053 if (c->type == type && c->sent) {
6ed93dc6
AE
3054 BT_ERR("%s killing stalled connection %pMR",
3055 hdev->name, &c->dst);
bed71748 3056 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3057 }
3058 }
bf4c6325
GP
3059
3060 rcu_read_unlock();
1da177e4
LT
3061}
3062
6039aa73
GP
3063static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3064 int *quote)
1da177e4 3065{
73d80deb
LAD
3066 struct hci_conn_hash *h = &hdev->conn_hash;
3067 struct hci_chan *chan = NULL;
abc5de8f 3068 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3069 struct hci_conn *conn;
73d80deb
LAD
3070 int cnt, q, conn_num = 0;
3071
3072 BT_DBG("%s", hdev->name);
3073
bf4c6325
GP
3074 rcu_read_lock();
3075
3076 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3077 struct hci_chan *tmp;
3078
3079 if (conn->type != type)
3080 continue;
3081
3082 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3083 continue;
3084
3085 conn_num++;
3086
8192edef 3087 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3088 struct sk_buff *skb;
3089
3090 if (skb_queue_empty(&tmp->data_q))
3091 continue;
3092
3093 skb = skb_peek(&tmp->data_q);
3094 if (skb->priority < cur_prio)
3095 continue;
3096
3097 if (skb->priority > cur_prio) {
3098 num = 0;
3099 min = ~0;
3100 cur_prio = skb->priority;
3101 }
3102
3103 num++;
3104
3105 if (conn->sent < min) {
3106 min = conn->sent;
3107 chan = tmp;
3108 }
3109 }
3110
3111 if (hci_conn_num(hdev, type) == conn_num)
3112 break;
3113 }
3114
bf4c6325
GP
3115 rcu_read_unlock();
3116
73d80deb
LAD
3117 if (!chan)
3118 return NULL;
3119
3120 switch (chan->conn->type) {
3121 case ACL_LINK:
3122 cnt = hdev->acl_cnt;
3123 break;
bd1eb66b
AE
3124 case AMP_LINK:
3125 cnt = hdev->block_cnt;
3126 break;
73d80deb
LAD
3127 case SCO_LINK:
3128 case ESCO_LINK:
3129 cnt = hdev->sco_cnt;
3130 break;
3131 case LE_LINK:
3132 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3133 break;
3134 default:
3135 cnt = 0;
3136 BT_ERR("Unknown link type");
3137 }
3138
3139 q = cnt / num;
3140 *quote = q ? q : 1;
3141 BT_DBG("chan %p quote %d", chan, *quote);
3142 return chan;
3143}
3144
02b20f0b
LAD
3145static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3146{
3147 struct hci_conn_hash *h = &hdev->conn_hash;
3148 struct hci_conn *conn;
3149 int num = 0;
3150
3151 BT_DBG("%s", hdev->name);
3152
bf4c6325
GP
3153 rcu_read_lock();
3154
3155 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3156 struct hci_chan *chan;
3157
3158 if (conn->type != type)
3159 continue;
3160
3161 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3162 continue;
3163
3164 num++;
3165
8192edef 3166 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3167 struct sk_buff *skb;
3168
3169 if (chan->sent) {
3170 chan->sent = 0;
3171 continue;
3172 }
3173
3174 if (skb_queue_empty(&chan->data_q))
3175 continue;
3176
3177 skb = skb_peek(&chan->data_q);
3178 if (skb->priority >= HCI_PRIO_MAX - 1)
3179 continue;
3180
3181 skb->priority = HCI_PRIO_MAX - 1;
3182
3183 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3184 skb->priority);
02b20f0b
LAD
3185 }
3186
3187 if (hci_conn_num(hdev, type) == num)
3188 break;
3189 }
bf4c6325
GP
3190
3191 rcu_read_unlock();
3192
02b20f0b
LAD
3193}
3194
b71d385a
AE
3195static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3196{
3197 /* Calculate count of blocks used by this packet */
3198 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3199}
3200
6039aa73 3201static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3202{
1da177e4
LT
3203 if (!test_bit(HCI_RAW, &hdev->flags)) {
3204 /* ACL tx timeout must be longer than maximum
3205 * link supervision timeout (40.9 seconds) */
63d2bc1b 3206 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3207 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3208 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3209 }
63d2bc1b 3210}
1da177e4 3211
6039aa73 3212static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3213{
3214 unsigned int cnt = hdev->acl_cnt;
3215 struct hci_chan *chan;
3216 struct sk_buff *skb;
3217 int quote;
3218
3219 __check_timeout(hdev, cnt);
04837f64 3220
73d80deb 3221 while (hdev->acl_cnt &&
a8c5fb1a 3222 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3223 u32 priority = (skb_peek(&chan->data_q))->priority;
3224 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3225 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3226 skb->len, skb->priority);
73d80deb 3227
ec1cce24
LAD
3228 /* Stop if priority has changed */
3229 if (skb->priority < priority)
3230 break;
3231
3232 skb = skb_dequeue(&chan->data_q);
3233
73d80deb 3234 hci_conn_enter_active_mode(chan->conn,
04124681 3235 bt_cb(skb)->force_active);
04837f64 3236
57d17d70 3237 hci_send_frame(hdev, skb);
1da177e4
LT
3238 hdev->acl_last_tx = jiffies;
3239
3240 hdev->acl_cnt--;
73d80deb
LAD
3241 chan->sent++;
3242 chan->conn->sent++;
1da177e4
LT
3243 }
3244 }
02b20f0b
LAD
3245
3246 if (cnt != hdev->acl_cnt)
3247 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3248}
3249
6039aa73 3250static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3251{
63d2bc1b 3252 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3253 struct hci_chan *chan;
3254 struct sk_buff *skb;
3255 int quote;
bd1eb66b 3256 u8 type;
b71d385a 3257
63d2bc1b 3258 __check_timeout(hdev, cnt);
b71d385a 3259
bd1eb66b
AE
3260 BT_DBG("%s", hdev->name);
3261
3262 if (hdev->dev_type == HCI_AMP)
3263 type = AMP_LINK;
3264 else
3265 type = ACL_LINK;
3266
b71d385a 3267 while (hdev->block_cnt > 0 &&
bd1eb66b 3268 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3269 u32 priority = (skb_peek(&chan->data_q))->priority;
3270 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3271 int blocks;
3272
3273 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3274 skb->len, skb->priority);
b71d385a
AE
3275
3276 /* Stop if priority has changed */
3277 if (skb->priority < priority)
3278 break;
3279
3280 skb = skb_dequeue(&chan->data_q);
3281
3282 blocks = __get_blocks(hdev, skb);
3283 if (blocks > hdev->block_cnt)
3284 return;
3285
3286 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3287 bt_cb(skb)->force_active);
b71d385a 3288
57d17d70 3289 hci_send_frame(hdev, skb);
b71d385a
AE
3290 hdev->acl_last_tx = jiffies;
3291
3292 hdev->block_cnt -= blocks;
3293 quote -= blocks;
3294
3295 chan->sent += blocks;
3296 chan->conn->sent += blocks;
3297 }
3298 }
3299
3300 if (cnt != hdev->block_cnt)
bd1eb66b 3301 hci_prio_recalculate(hdev, type);
b71d385a
AE
3302}
3303
6039aa73 3304static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3305{
3306 BT_DBG("%s", hdev->name);
3307
bd1eb66b
AE
3308 /* No ACL link over BR/EDR controller */
3309 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3310 return;
3311
3312 /* No AMP link over AMP controller */
3313 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3314 return;
3315
3316 switch (hdev->flow_ctl_mode) {
3317 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3318 hci_sched_acl_pkt(hdev);
3319 break;
3320
3321 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3322 hci_sched_acl_blk(hdev);
3323 break;
3324 }
3325}
3326
1da177e4 3327/* Schedule SCO */
6039aa73 3328static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3329{
3330 struct hci_conn *conn;
3331 struct sk_buff *skb;
3332 int quote;
3333
3334 BT_DBG("%s", hdev->name);
3335
52087a79
LAD
3336 if (!hci_conn_num(hdev, SCO_LINK))
3337 return;
3338
1da177e4
LT
3339 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3340 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3341 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3342 hci_send_frame(hdev, skb);
1da177e4
LT
3343
3344 conn->sent++;
3345 if (conn->sent == ~0)
3346 conn->sent = 0;
3347 }
3348 }
3349}
3350
6039aa73 3351static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3352{
3353 struct hci_conn *conn;
3354 struct sk_buff *skb;
3355 int quote;
3356
3357 BT_DBG("%s", hdev->name);
3358
52087a79
LAD
3359 if (!hci_conn_num(hdev, ESCO_LINK))
3360 return;
3361
8fc9ced3
GP
3362 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3363 &quote))) {
b6a0dc82
MH
3364 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3365 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3366 hci_send_frame(hdev, skb);
b6a0dc82
MH
3367
3368 conn->sent++;
3369 if (conn->sent == ~0)
3370 conn->sent = 0;
3371 }
3372 }
3373}
3374
6039aa73 3375static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3376{
73d80deb 3377 struct hci_chan *chan;
6ed58ec5 3378 struct sk_buff *skb;
02b20f0b 3379 int quote, cnt, tmp;
6ed58ec5
VT
3380
3381 BT_DBG("%s", hdev->name);
3382
52087a79
LAD
3383 if (!hci_conn_num(hdev, LE_LINK))
3384 return;
3385
6ed58ec5
VT
3386 if (!test_bit(HCI_RAW, &hdev->flags)) {
3387 /* LE tx timeout must be longer than maximum
3388 * link supervision timeout (40.9 seconds) */
bae1f5d9 3389 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3390 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3391 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3392 }
3393
3394 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3395 tmp = cnt;
73d80deb 3396 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3397 u32 priority = (skb_peek(&chan->data_q))->priority;
3398 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3399 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3400 skb->len, skb->priority);
6ed58ec5 3401
ec1cce24
LAD
3402 /* Stop if priority has changed */
3403 if (skb->priority < priority)
3404 break;
3405
3406 skb = skb_dequeue(&chan->data_q);
3407
57d17d70 3408 hci_send_frame(hdev, skb);
6ed58ec5
VT
3409 hdev->le_last_tx = jiffies;
3410
3411 cnt--;
73d80deb
LAD
3412 chan->sent++;
3413 chan->conn->sent++;
6ed58ec5
VT
3414 }
3415 }
73d80deb 3416
6ed58ec5
VT
3417 if (hdev->le_pkts)
3418 hdev->le_cnt = cnt;
3419 else
3420 hdev->acl_cnt = cnt;
02b20f0b
LAD
3421
3422 if (cnt != tmp)
3423 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3424}
3425
3eff45ea 3426static void hci_tx_work(struct work_struct *work)
1da177e4 3427{
3eff45ea 3428 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3429 struct sk_buff *skb;
3430
6ed58ec5 3431 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3432 hdev->sco_cnt, hdev->le_cnt);
1da177e4 3433
52de599e
MH
3434 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3435 /* Schedule queues and send stuff to HCI driver */
3436 hci_sched_acl(hdev);
3437 hci_sched_sco(hdev);
3438 hci_sched_esco(hdev);
3439 hci_sched_le(hdev);
3440 }
6ed58ec5 3441
1da177e4
LT
3442 /* Send next queued raw (unknown type) packet */
3443 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 3444 hci_send_frame(hdev, skb);
1da177e4
LT
3445}
3446
25985edc 3447/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3448
3449/* ACL data packet */
6039aa73 3450static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3451{
3452 struct hci_acl_hdr *hdr = (void *) skb->data;
3453 struct hci_conn *conn;
3454 __u16 handle, flags;
3455
3456 skb_pull(skb, HCI_ACL_HDR_SIZE);
3457
3458 handle = __le16_to_cpu(hdr->handle);
3459 flags = hci_flags(handle);
3460 handle = hci_handle(handle);
3461
f0e09510 3462 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3463 handle, flags);
1da177e4
LT
3464
3465 hdev->stat.acl_rx++;
3466
3467 hci_dev_lock(hdev);
3468 conn = hci_conn_hash_lookup_handle(hdev, handle);
3469 hci_dev_unlock(hdev);
8e87d142 3470
1da177e4 3471 if (conn) {
65983fc7 3472 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3473
1da177e4 3474 /* Send to upper protocol */
686ebf28
UF
3475 l2cap_recv_acldata(conn, skb, flags);
3476 return;
1da177e4 3477 } else {
8e87d142 3478 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3479 hdev->name, handle);
1da177e4
LT
3480 }
3481
3482 kfree_skb(skb);
3483}
3484
3485/* SCO data packet */
6039aa73 3486static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3487{
3488 struct hci_sco_hdr *hdr = (void *) skb->data;
3489 struct hci_conn *conn;
3490 __u16 handle;
3491
3492 skb_pull(skb, HCI_SCO_HDR_SIZE);
3493
3494 handle = __le16_to_cpu(hdr->handle);
3495
f0e09510 3496 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3497
3498 hdev->stat.sco_rx++;
3499
3500 hci_dev_lock(hdev);
3501 conn = hci_conn_hash_lookup_handle(hdev, handle);
3502 hci_dev_unlock(hdev);
3503
3504 if (conn) {
1da177e4 3505 /* Send to upper protocol */
686ebf28
UF
3506 sco_recv_scodata(conn, skb);
3507 return;
1da177e4 3508 } else {
8e87d142 3509 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3510 hdev->name, handle);
1da177e4
LT
3511 }
3512
3513 kfree_skb(skb);
3514}
3515
9238f36a
JH
3516static bool hci_req_is_complete(struct hci_dev *hdev)
3517{
3518 struct sk_buff *skb;
3519
3520 skb = skb_peek(&hdev->cmd_q);
3521 if (!skb)
3522 return true;
3523
3524 return bt_cb(skb)->req.start;
3525}
3526
42c6b129
JH
3527static void hci_resend_last(struct hci_dev *hdev)
3528{
3529 struct hci_command_hdr *sent;
3530 struct sk_buff *skb;
3531 u16 opcode;
3532
3533 if (!hdev->sent_cmd)
3534 return;
3535
3536 sent = (void *) hdev->sent_cmd->data;
3537 opcode = __le16_to_cpu(sent->opcode);
3538 if (opcode == HCI_OP_RESET)
3539 return;
3540
3541 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3542 if (!skb)
3543 return;
3544
3545 skb_queue_head(&hdev->cmd_q, skb);
3546 queue_work(hdev->workqueue, &hdev->cmd_work);
3547}
3548
9238f36a
JH
3549void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3550{
3551 hci_req_complete_t req_complete = NULL;
3552 struct sk_buff *skb;
3553 unsigned long flags;
3554
3555 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3556
42c6b129
JH
3557 /* If the completed command doesn't match the last one that was
3558 * sent we need to do special handling of it.
9238f36a 3559 */
42c6b129
JH
3560 if (!hci_sent_cmd_data(hdev, opcode)) {
3561 /* Some CSR based controllers generate a spontaneous
3562 * reset complete event during init and any pending
3563 * command will never be completed. In such a case we
3564 * need to resend whatever was the last sent
3565 * command.
3566 */
3567 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3568 hci_resend_last(hdev);
3569
9238f36a 3570 return;
42c6b129 3571 }
9238f36a
JH
3572
3573 /* If the command succeeded and there's still more commands in
3574 * this request the request is not yet complete.
3575 */
3576 if (!status && !hci_req_is_complete(hdev))
3577 return;
3578
3579 /* If this was the last command in a request the complete
3580 * callback would be found in hdev->sent_cmd instead of the
3581 * command queue (hdev->cmd_q).
3582 */
3583 if (hdev->sent_cmd) {
3584 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
3585
3586 if (req_complete) {
3587 /* We must set the complete callback to NULL to
3588 * avoid calling the callback more than once if
3589 * this function gets called again.
3590 */
3591 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3592
9238f36a 3593 goto call_complete;
53e21fbc 3594 }
9238f36a
JH
3595 }
3596
3597 /* Remove all pending commands belonging to this request */
3598 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3599 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3600 if (bt_cb(skb)->req.start) {
3601 __skb_queue_head(&hdev->cmd_q, skb);
3602 break;
3603 }
3604
3605 req_complete = bt_cb(skb)->req.complete;
3606 kfree_skb(skb);
3607 }
3608 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3609
3610call_complete:
3611 if (req_complete)
3612 req_complete(hdev, status);
3613}
3614
b78752cc 3615static void hci_rx_work(struct work_struct *work)
1da177e4 3616{
b78752cc 3617 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3618 struct sk_buff *skb;
3619
3620 BT_DBG("%s", hdev->name);
3621
1da177e4 3622 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3623 /* Send copy to monitor */
3624 hci_send_to_monitor(hdev, skb);
3625
1da177e4
LT
3626 if (atomic_read(&hdev->promisc)) {
3627 /* Send copy to the sockets */
470fe1b5 3628 hci_send_to_sock(hdev, skb);
1da177e4
LT
3629 }
3630
0736cfa8
MH
3631 if (test_bit(HCI_RAW, &hdev->flags) ||
3632 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
3633 kfree_skb(skb);
3634 continue;
3635 }
3636
3637 if (test_bit(HCI_INIT, &hdev->flags)) {
3638 /* Don't process data packets in this states. */
0d48d939 3639 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3640 case HCI_ACLDATA_PKT:
3641 case HCI_SCODATA_PKT:
3642 kfree_skb(skb);
3643 continue;
3ff50b79 3644 }
1da177e4
LT
3645 }
3646
3647 /* Process frame */
0d48d939 3648 switch (bt_cb(skb)->pkt_type) {
1da177e4 3649 case HCI_EVENT_PKT:
b78752cc 3650 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3651 hci_event_packet(hdev, skb);
3652 break;
3653
3654 case HCI_ACLDATA_PKT:
3655 BT_DBG("%s ACL data packet", hdev->name);
3656 hci_acldata_packet(hdev, skb);
3657 break;
3658
3659 case HCI_SCODATA_PKT:
3660 BT_DBG("%s SCO data packet", hdev->name);
3661 hci_scodata_packet(hdev, skb);
3662 break;
3663
3664 default:
3665 kfree_skb(skb);
3666 break;
3667 }
3668 }
1da177e4
LT
3669}
3670
c347b765 3671static void hci_cmd_work(struct work_struct *work)
1da177e4 3672{
c347b765 3673 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3674 struct sk_buff *skb;
3675
2104786b
AE
3676 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3677 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3678
1da177e4 3679 /* Send queued commands */
5a08ecce
AE
3680 if (atomic_read(&hdev->cmd_cnt)) {
3681 skb = skb_dequeue(&hdev->cmd_q);
3682 if (!skb)
3683 return;
3684
7585b97a 3685 kfree_skb(hdev->sent_cmd);
1da177e4 3686
a675d7f1 3687 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 3688 if (hdev->sent_cmd) {
1da177e4 3689 atomic_dec(&hdev->cmd_cnt);
57d17d70 3690 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
3691 if (test_bit(HCI_RESET, &hdev->flags))
3692 del_timer(&hdev->cmd_timer);
3693 else
3694 mod_timer(&hdev->cmd_timer,
5f246e89 3695 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3696 } else {
3697 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3698 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3699 }
3700 }
3701}