Bluetooth: Move BR/EDR default events behind its features
[linux-block.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46
JH
42#include "smp.h"
43
b78752cc 44static void hci_rx_work(struct work_struct *work);
c347b765 45static void hci_cmd_work(struct work_struct *work);
3eff45ea 46static void hci_tx_work(struct work_struct *work);
1da177e4 47
1da177e4
LT
48/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
fba7ecf0 54DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 55
3df92b31
SL
56/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
899de765
MH
59/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
baf27f6e
MH
68/* ---- HCI debugfs entries ---- */
69
4b4148e9
MH
70static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
71 size_t count, loff_t *ppos)
72{
73 struct hci_dev *hdev = file->private_data;
74 char buf[3];
75
b7cb93e5 76 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
4b4148e9
MH
77 buf[1] = '\n';
78 buf[2] = '\0';
79 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
80}
81
82static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
83 size_t count, loff_t *ppos)
84{
85 struct hci_dev *hdev = file->private_data;
86 struct sk_buff *skb;
87 char buf[32];
88 size_t buf_size = min(count, (sizeof(buf)-1));
89 bool enable;
4b4148e9
MH
90
91 if (!test_bit(HCI_UP, &hdev->flags))
92 return -ENETDOWN;
93
94 if (copy_from_user(buf, user_buf, buf_size))
95 return -EFAULT;
96
97 buf[buf_size] = '\0';
98 if (strtobool(buf, &enable))
99 return -EINVAL;
100
b7cb93e5 101 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
102 return -EALREADY;
103
104 hci_req_lock(hdev);
105 if (enable)
106 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 else
109 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
110 HCI_CMD_TIMEOUT);
111 hci_req_unlock(hdev);
112
113 if (IS_ERR(skb))
114 return PTR_ERR(skb);
115
4b4148e9
MH
116 kfree_skb(skb);
117
b7cb93e5 118 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
4b4113d6
MH
130static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
131 size_t count, loff_t *ppos)
132{
133 struct hci_dev *hdev = file->private_data;
134 char buf[3];
135
136 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
137 buf[1] = '\n';
138 buf[2] = '\0';
139 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
140}
141
142static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
143 size_t count, loff_t *ppos)
144{
145 struct hci_dev *hdev = file->private_data;
146 char buf[32];
147 size_t buf_size = min(count, (sizeof(buf)-1));
148 bool enable;
149 int err;
150
151 if (copy_from_user(buf, user_buf, buf_size))
152 return -EFAULT;
153
154 buf[buf_size] = '\0';
155 if (strtobool(buf, &enable))
156 return -EINVAL;
157
7e995b9e
MH
158 /* When the diagnostic flags are not persistent and the transport
159 * is not active, then there is no need for the vendor callback.
160 *
161 * Instead just store the desired value. If needed the setting
162 * will be programmed when the controller gets powered on.
163 */
164 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
165 !test_bit(HCI_RUNNING, &hdev->flags))
166 goto done;
167
4b4113d6
MH
168 hci_req_lock(hdev);
169 err = hdev->set_diag(hdev, enable);
170 hci_req_unlock(hdev);
171
172 if (err < 0)
173 return err;
174
7e995b9e 175done:
4b4113d6
MH
176 if (enable)
177 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
178 else
179 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
180
181 return count;
182}
183
184static const struct file_operations vendor_diag_fops = {
185 .open = simple_open,
186 .read = vendor_diag_read,
187 .write = vendor_diag_write,
188 .llseek = default_llseek,
189};
190
f640ee98
MH
191static void hci_debugfs_create_basic(struct hci_dev *hdev)
192{
193 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
194 &dut_mode_fops);
195
196 if (hdev->set_diag)
197 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
198 &vendor_diag_fops);
199}
200
1da177e4
LT
201/* ---- HCI requests ---- */
202
f60cb305
JH
203static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
204 struct sk_buff *skb)
1da177e4 205{
42c6b129 206 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
207
208 if (hdev->req_status == HCI_REQ_PEND) {
209 hdev->req_result = result;
210 hdev->req_status = HCI_REQ_DONE;
f60cb305
JH
211 if (skb)
212 hdev->req_skb = skb_get(skb);
1da177e4
LT
213 wake_up_interruptible(&hdev->req_wait_q);
214 }
215}
216
217static void hci_req_cancel(struct hci_dev *hdev, int err)
218{
219 BT_DBG("%s err 0x%2.2x", hdev->name, err);
220
221 if (hdev->req_status == HCI_REQ_PEND) {
222 hdev->req_result = err;
223 hdev->req_status = HCI_REQ_CANCELED;
224 wake_up_interruptible(&hdev->req_wait_q);
225 }
226}
227
7b1abbbe 228struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 229 const void *param, u8 event, u32 timeout)
75e84b7c
JH
230{
231 DECLARE_WAITQUEUE(wait, current);
232 struct hci_request req;
f60cb305 233 struct sk_buff *skb;
75e84b7c
JH
234 int err = 0;
235
236 BT_DBG("%s", hdev->name);
237
238 hci_req_init(&req, hdev);
239
7b1abbbe 240 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
241
242 hdev->req_status = HCI_REQ_PEND;
243
75e84b7c
JH
244 add_wait_queue(&hdev->req_wait_q, &wait);
245 set_current_state(TASK_INTERRUPTIBLE);
246
f60cb305 247 err = hci_req_run_skb(&req, hci_req_sync_complete);
039fada5
CP
248 if (err < 0) {
249 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 250 set_current_state(TASK_RUNNING);
039fada5
CP
251 return ERR_PTR(err);
252 }
253
75e84b7c
JH
254 schedule_timeout(timeout);
255
256 remove_wait_queue(&hdev->req_wait_q, &wait);
257
258 if (signal_pending(current))
259 return ERR_PTR(-EINTR);
260
261 switch (hdev->req_status) {
262 case HCI_REQ_DONE:
263 err = -bt_to_errno(hdev->req_result);
264 break;
265
266 case HCI_REQ_CANCELED:
267 err = -hdev->req_result;
268 break;
269
270 default:
271 err = -ETIMEDOUT;
272 break;
273 }
274
275 hdev->req_status = hdev->req_result = 0;
f60cb305
JH
276 skb = hdev->req_skb;
277 hdev->req_skb = NULL;
75e84b7c
JH
278
279 BT_DBG("%s end: err %d", hdev->name, err);
280
f60cb305
JH
281 if (err < 0) {
282 kfree_skb(skb);
75e84b7c 283 return ERR_PTR(err);
f60cb305 284 }
75e84b7c 285
757aa0b5
JH
286 if (!skb)
287 return ERR_PTR(-ENODATA);
288
289 return skb;
7b1abbbe
JH
290}
291EXPORT_SYMBOL(__hci_cmd_sync_ev);
292
293struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 294 const void *param, u32 timeout)
7b1abbbe
JH
295{
296 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
297}
298EXPORT_SYMBOL(__hci_cmd_sync);
299
1da177e4 300/* Execute request and wait for completion. */
01178cd4 301static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
302 void (*func)(struct hci_request *req,
303 unsigned long opt),
01178cd4 304 unsigned long opt, __u32 timeout)
1da177e4 305{
42c6b129 306 struct hci_request req;
1da177e4
LT
307 DECLARE_WAITQUEUE(wait, current);
308 int err = 0;
309
310 BT_DBG("%s start", hdev->name);
311
42c6b129
JH
312 hci_req_init(&req, hdev);
313
1da177e4
LT
314 hdev->req_status = HCI_REQ_PEND;
315
42c6b129 316 func(&req, opt);
53cce22d 317
039fada5
CP
318 add_wait_queue(&hdev->req_wait_q, &wait);
319 set_current_state(TASK_INTERRUPTIBLE);
320
f60cb305 321 err = hci_req_run_skb(&req, hci_req_sync_complete);
42c6b129 322 if (err < 0) {
53cce22d 323 hdev->req_status = 0;
920c8300 324
039fada5 325 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 326 set_current_state(TASK_RUNNING);
039fada5 327
920c8300
AG
328 /* ENODATA means the HCI request command queue is empty.
329 * This can happen when a request with conditionals doesn't
330 * trigger any commands to be sent. This is normal behavior
331 * and should not trigger an error return.
42c6b129 332 */
920c8300
AG
333 if (err == -ENODATA)
334 return 0;
335
336 return err;
53cce22d
JH
337 }
338
1da177e4
LT
339 schedule_timeout(timeout);
340
341 remove_wait_queue(&hdev->req_wait_q, &wait);
342
343 if (signal_pending(current))
344 return -EINTR;
345
346 switch (hdev->req_status) {
347 case HCI_REQ_DONE:
e175072f 348 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
349 break;
350
351 case HCI_REQ_CANCELED:
352 err = -hdev->req_result;
353 break;
354
355 default:
356 err = -ETIMEDOUT;
357 break;
3ff50b79 358 }
1da177e4 359
a5040efa 360 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
361
362 BT_DBG("%s end: err %d", hdev->name, err);
363
364 return err;
365}
366
01178cd4 367static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
368 void (*req)(struct hci_request *req,
369 unsigned long opt),
01178cd4 370 unsigned long opt, __u32 timeout)
1da177e4
LT
371{
372 int ret;
373
7c6a329e
MH
374 if (!test_bit(HCI_UP, &hdev->flags))
375 return -ENETDOWN;
376
1da177e4
LT
377 /* Serialize all requests */
378 hci_req_lock(hdev);
01178cd4 379 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
380 hci_req_unlock(hdev);
381
382 return ret;
383}
384
42c6b129 385static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 386{
42c6b129 387 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
388
389 /* Reset device */
42c6b129
JH
390 set_bit(HCI_RESET, &req->hdev->flags);
391 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
392}
393
42c6b129 394static void bredr_init(struct hci_request *req)
1da177e4 395{
42c6b129 396 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 397
1da177e4 398 /* Read Local Supported Features */
42c6b129 399 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 400
1143e5a6 401 /* Read Local Version */
42c6b129 402 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
403
404 /* Read BD Address */
42c6b129 405 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
406}
407
0af801b9 408static void amp_init1(struct hci_request *req)
e61ef499 409{
42c6b129 410 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 411
e61ef499 412 /* Read Local Version */
42c6b129 413 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 414
f6996cfe
MH
415 /* Read Local Supported Commands */
416 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
417
6bcbc489 418 /* Read Local AMP Info */
42c6b129 419 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
420
421 /* Read Data Blk size */
42c6b129 422 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 423
f38ba941
MH
424 /* Read Flow Control Mode */
425 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
426
7528ca1c
MH
427 /* Read Location Data */
428 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
429}
430
0af801b9
JH
431static void amp_init2(struct hci_request *req)
432{
433 /* Read Local Supported Features. Not all AMP controllers
434 * support this so it's placed conditionally in the second
435 * stage init.
436 */
437 if (req->hdev->commands[14] & 0x20)
438 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
439}
440
42c6b129 441static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 442{
42c6b129 443 struct hci_dev *hdev = req->hdev;
e61ef499
AE
444
445 BT_DBG("%s %ld", hdev->name, opt);
446
11778716
AE
447 /* Reset */
448 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 449 hci_reset_req(req, 0);
11778716 450
e61ef499
AE
451 switch (hdev->dev_type) {
452 case HCI_BREDR:
42c6b129 453 bredr_init(req);
e61ef499
AE
454 break;
455
456 case HCI_AMP:
0af801b9 457 amp_init1(req);
e61ef499
AE
458 break;
459
460 default:
461 BT_ERR("Unknown device type %d", hdev->dev_type);
462 break;
463 }
e61ef499
AE
464}
465
42c6b129 466static void bredr_setup(struct hci_request *req)
2177bab5 467{
2177bab5
JH
468 __le16 param;
469 __u8 flt_type;
470
471 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 472 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
473
474 /* Read Class of Device */
42c6b129 475 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
476
477 /* Read Local Name */
42c6b129 478 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
479
480 /* Read Voice Setting */
42c6b129 481 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 482
b4cb9fb2
MH
483 /* Read Number of Supported IAC */
484 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
485
4b836f39
MH
486 /* Read Current IAC LAP */
487 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
488
2177bab5
JH
489 /* Clear Event Filters */
490 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 491 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
492
493 /* Connection accept timeout ~20 secs */
dcf4adbf 494 param = cpu_to_le16(0x7d00);
42c6b129 495 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
496}
497
42c6b129 498static void le_setup(struct hci_request *req)
2177bab5 499{
c73eee91
JH
500 struct hci_dev *hdev = req->hdev;
501
2177bab5 502 /* Read LE Buffer Size */
42c6b129 503 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
504
505 /* Read LE Local Supported Features */
42c6b129 506 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 507
747d3f03
MH
508 /* Read LE Supported States */
509 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
510
c73eee91
JH
511 /* LE-only controllers have LE implicitly enabled */
512 if (!lmp_bredr_capable(hdev))
a1536da2 513 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
514}
515
42c6b129 516static void hci_setup_event_mask(struct hci_request *req)
2177bab5 517{
42c6b129
JH
518 struct hci_dev *hdev = req->hdev;
519
2177bab5
JH
520 /* The second byte is 0xff instead of 0x9f (two reserved bits
521 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
522 * command otherwise.
523 */
524 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
525
526 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
527 * any event mask for pre 1.2 devices.
528 */
529 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
530 return;
531
532 if (lmp_bredr_capable(hdev)) {
533 events[4] |= 0x01; /* Flow Specification Complete */
c7882cbd
MH
534 } else {
535 /* Use a different default for LE-only devices */
536 memset(events, 0, sizeof(events));
537 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
538 events[1] |= 0x08; /* Read Remote Version Information Complete */
539 events[1] |= 0x20; /* Command Complete */
540 events[1] |= 0x40; /* Command Status */
541 events[1] |= 0x80; /* Hardware Error */
542 events[2] |= 0x04; /* Number of Completed Packets */
543 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
544
545 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
546 events[0] |= 0x80; /* Encryption Change */
547 events[5] |= 0x80; /* Encryption Key Refresh Complete */
548 }
2177bab5
JH
549 }
550
551 if (lmp_inq_rssi_capable(hdev))
552 events[4] |= 0x02; /* Inquiry Result with RSSI */
553
70f56aa2
MH
554 if (lmp_ext_feat_capable(hdev))
555 events[4] |= 0x04; /* Read Remote Extended Features Complete */
556
557 if (lmp_esco_capable(hdev)) {
558 events[5] |= 0x08; /* Synchronous Connection Complete */
559 events[5] |= 0x10; /* Synchronous Connection Changed */
560 }
561
2177bab5
JH
562 if (lmp_sniffsubr_capable(hdev))
563 events[5] |= 0x20; /* Sniff Subrating */
564
565 if (lmp_pause_enc_capable(hdev))
566 events[5] |= 0x80; /* Encryption Key Refresh Complete */
567
568 if (lmp_ext_inq_capable(hdev))
569 events[5] |= 0x40; /* Extended Inquiry Result */
570
571 if (lmp_no_flush_capable(hdev))
572 events[7] |= 0x01; /* Enhanced Flush Complete */
573
574 if (lmp_lsto_capable(hdev))
575 events[6] |= 0x80; /* Link Supervision Timeout Changed */
576
577 if (lmp_ssp_capable(hdev)) {
578 events[6] |= 0x01; /* IO Capability Request */
579 events[6] |= 0x02; /* IO Capability Response */
580 events[6] |= 0x04; /* User Confirmation Request */
581 events[6] |= 0x08; /* User Passkey Request */
582 events[6] |= 0x10; /* Remote OOB Data Request */
583 events[6] |= 0x20; /* Simple Pairing Complete */
584 events[7] |= 0x04; /* User Passkey Notification */
585 events[7] |= 0x08; /* Keypress Notification */
586 events[7] |= 0x10; /* Remote Host Supported
587 * Features Notification
588 */
589 }
590
591 if (lmp_le_capable(hdev))
592 events[7] |= 0x20; /* LE Meta-Event */
593
42c6b129 594 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
595}
596
42c6b129 597static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 598{
42c6b129
JH
599 struct hci_dev *hdev = req->hdev;
600
0af801b9
JH
601 if (hdev->dev_type == HCI_AMP)
602 return amp_init2(req);
603
2177bab5 604 if (lmp_bredr_capable(hdev))
42c6b129 605 bredr_setup(req);
56f87901 606 else
a358dc11 607 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
608
609 if (lmp_le_capable(hdev))
42c6b129 610 le_setup(req);
2177bab5 611
0f3adeae
MH
612 /* All Bluetooth 1.2 and later controllers should support the
613 * HCI command for reading the local supported commands.
614 *
615 * Unfortunately some controllers indicate Bluetooth 1.2 support,
616 * but do not have support for this command. If that is the case,
617 * the driver can quirk the behavior and skip reading the local
618 * supported commands.
3f8e2d75 619 */
0f3adeae
MH
620 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
621 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 622 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
623
624 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
625 /* When SSP is available, then the host features page
626 * should also be available as well. However some
627 * controllers list the max_page as 0 as long as SSP
628 * has not been enabled. To achieve proper debugging
629 * output, force the minimum max_page to 1 at least.
630 */
631 hdev->max_page = 0x01;
632
d7a5a11d 633 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 634 u8 mode = 0x01;
574ea3c7 635
42c6b129
JH
636 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
637 sizeof(mode), &mode);
2177bab5
JH
638 } else {
639 struct hci_cp_write_eir cp;
640
641 memset(hdev->eir, 0, sizeof(hdev->eir));
642 memset(&cp, 0, sizeof(cp));
643
42c6b129 644 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
645 }
646 }
647
043ec9bf
MH
648 if (lmp_inq_rssi_capable(hdev) ||
649 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
650 u8 mode;
651
652 /* If Extended Inquiry Result events are supported, then
653 * they are clearly preferred over Inquiry Result with RSSI
654 * events.
655 */
656 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
657
658 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
659 }
2177bab5
JH
660
661 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 662 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
663
664 if (lmp_ext_feat_capable(hdev)) {
665 struct hci_cp_read_local_ext_features cp;
666
667 cp.page = 0x01;
42c6b129
JH
668 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
669 sizeof(cp), &cp);
2177bab5
JH
670 }
671
d7a5a11d 672 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 673 u8 enable = 1;
42c6b129
JH
674 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
675 &enable);
2177bab5
JH
676 }
677}
678
42c6b129 679static void hci_setup_link_policy(struct hci_request *req)
2177bab5 680{
42c6b129 681 struct hci_dev *hdev = req->hdev;
2177bab5
JH
682 struct hci_cp_write_def_link_policy cp;
683 u16 link_policy = 0;
684
685 if (lmp_rswitch_capable(hdev))
686 link_policy |= HCI_LP_RSWITCH;
687 if (lmp_hold_capable(hdev))
688 link_policy |= HCI_LP_HOLD;
689 if (lmp_sniff_capable(hdev))
690 link_policy |= HCI_LP_SNIFF;
691 if (lmp_park_capable(hdev))
692 link_policy |= HCI_LP_PARK;
693
694 cp.policy = cpu_to_le16(link_policy);
42c6b129 695 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
696}
697
42c6b129 698static void hci_set_le_support(struct hci_request *req)
2177bab5 699{
42c6b129 700 struct hci_dev *hdev = req->hdev;
2177bab5
JH
701 struct hci_cp_write_le_host_supported cp;
702
c73eee91
JH
703 /* LE-only devices do not support explicit enablement */
704 if (!lmp_bredr_capable(hdev))
705 return;
706
2177bab5
JH
707 memset(&cp, 0, sizeof(cp));
708
d7a5a11d 709 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 710 cp.le = 0x01;
32226e4f 711 cp.simul = 0x00;
2177bab5
JH
712 }
713
714 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
715 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
716 &cp);
2177bab5
JH
717}
718
d62e6d67
JH
719static void hci_set_event_mask_page_2(struct hci_request *req)
720{
721 struct hci_dev *hdev = req->hdev;
722 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
723
724 /* If Connectionless Slave Broadcast master role is supported
725 * enable all necessary events for it.
726 */
53b834d2 727 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
728 events[1] |= 0x40; /* Triggered Clock Capture */
729 events[1] |= 0x80; /* Synchronization Train Complete */
730 events[2] |= 0x10; /* Slave Page Response Timeout */
731 events[2] |= 0x20; /* CSB Channel Map Change */
732 }
733
734 /* If Connectionless Slave Broadcast slave role is supported
735 * enable all necessary events for it.
736 */
53b834d2 737 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
738 events[2] |= 0x01; /* Synchronization Train Received */
739 events[2] |= 0x02; /* CSB Receive */
740 events[2] |= 0x04; /* CSB Timeout */
741 events[2] |= 0x08; /* Truncated Page Complete */
742 }
743
40c59fcb 744 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 745 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
746 events[2] |= 0x80;
747
d62e6d67
JH
748 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
749}
750
42c6b129 751static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 752{
42c6b129 753 struct hci_dev *hdev = req->hdev;
d2c5d77f 754 u8 p;
42c6b129 755
0da71f1b
MH
756 hci_setup_event_mask(req);
757
e81be90b
JH
758 if (hdev->commands[6] & 0x20 &&
759 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
760 struct hci_cp_read_stored_link_key cp;
761
762 bacpy(&cp.bdaddr, BDADDR_ANY);
763 cp.read_all = 0x01;
764 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
765 }
766
2177bab5 767 if (hdev->commands[5] & 0x10)
42c6b129 768 hci_setup_link_policy(req);
2177bab5 769
417287de
MH
770 if (hdev->commands[8] & 0x01)
771 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
772
773 /* Some older Broadcom based Bluetooth 1.2 controllers do not
774 * support the Read Page Scan Type command. Check support for
775 * this command in the bit mask of supported commands.
776 */
777 if (hdev->commands[13] & 0x01)
778 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
779
9193c6e8
AG
780 if (lmp_le_capable(hdev)) {
781 u8 events[8];
782
783 memset(events, 0, sizeof(events));
4d6c705b
MH
784 events[0] = 0x0f;
785
786 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
787 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
788
789 /* If controller supports the Connection Parameters Request
790 * Link Layer Procedure, enable the corresponding event.
791 */
792 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
793 events[0] |= 0x20; /* LE Remote Connection
794 * Parameter Request
795 */
796
a9f6068e
MH
797 /* If the controller supports the Data Length Extension
798 * feature, enable the corresponding event.
799 */
800 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
801 events[0] |= 0x40; /* LE Data Length Change */
802
4b71bba4
MH
803 /* If the controller supports Extended Scanner Filter
804 * Policies, enable the correspondig event.
805 */
806 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
807 events[1] |= 0x04; /* LE Direct Advertising
808 * Report
809 */
810
5a34bd5f
MH
811 /* If the controller supports the LE Read Local P-256
812 * Public Key command, enable the corresponding event.
813 */
814 if (hdev->commands[34] & 0x02)
815 events[0] |= 0x80; /* LE Read Local P-256
816 * Public Key Complete
817 */
818
819 /* If the controller supports the LE Generate DHKey
820 * command, enable the corresponding event.
821 */
822 if (hdev->commands[34] & 0x04)
823 events[1] |= 0x01; /* LE Generate DHKey Complete */
824
9193c6e8
AG
825 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
826 events);
827
15a49cca
MH
828 if (hdev->commands[25] & 0x40) {
829 /* Read LE Advertising Channel TX Power */
830 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
831 }
832
2ab216a7
MH
833 if (hdev->commands[26] & 0x40) {
834 /* Read LE White List Size */
835 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
836 0, NULL);
837 }
838
839 if (hdev->commands[26] & 0x80) {
840 /* Clear LE White List */
841 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
842 }
843
a9f6068e
MH
844 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
845 /* Read LE Maximum Data Length */
846 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
847
848 /* Read LE Suggested Default Data Length */
849 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
850 }
851
42c6b129 852 hci_set_le_support(req);
9193c6e8 853 }
d2c5d77f
JH
854
855 /* Read features beyond page 1 if available */
856 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
857 struct hci_cp_read_local_ext_features cp;
858
859 cp.page = p;
860 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
861 sizeof(cp), &cp);
862 }
2177bab5
JH
863}
864
5d4e7e8d
JH
865static void hci_init4_req(struct hci_request *req, unsigned long opt)
866{
867 struct hci_dev *hdev = req->hdev;
868
36f260ce
MH
869 /* Some Broadcom based Bluetooth controllers do not support the
870 * Delete Stored Link Key command. They are clearly indicating its
871 * absence in the bit mask of supported commands.
872 *
873 * Check the supported commands and only if the the command is marked
874 * as supported send it. If not supported assume that the controller
875 * does not have actual support for stored link keys which makes this
876 * command redundant anyway.
877 *
878 * Some controllers indicate that they support handling deleting
879 * stored link keys, but they don't. The quirk lets a driver
880 * just disable this command.
881 */
882 if (hdev->commands[6] & 0x80 &&
883 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
884 struct hci_cp_delete_stored_link_key cp;
885
886 bacpy(&cp.bdaddr, BDADDR_ANY);
887 cp.delete_all = 0x01;
888 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
889 sizeof(cp), &cp);
890 }
891
d62e6d67
JH
892 /* Set event mask page 2 if the HCI command for it is supported */
893 if (hdev->commands[22] & 0x04)
894 hci_set_event_mask_page_2(req);
895
109e3191
MH
896 /* Read local codec list if the HCI command is supported */
897 if (hdev->commands[29] & 0x20)
898 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
899
f4fe73ed
MH
900 /* Get MWS transport configuration if the HCI command is supported */
901 if (hdev->commands[30] & 0x08)
902 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
903
5d4e7e8d 904 /* Check for Synchronization Train support */
53b834d2 905 if (lmp_sync_train_capable(hdev))
5d4e7e8d 906 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
907
908 /* Enable Secure Connections if supported and configured */
d7a5a11d 909 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 910 bredr_sc_enabled(hdev)) {
a6d0d690 911 u8 support = 0x01;
574ea3c7 912
a6d0d690
MH
913 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
914 sizeof(support), &support);
915 }
5d4e7e8d
JH
916}
917
2177bab5
JH
918static int __hci_init(struct hci_dev *hdev)
919{
920 int err;
921
922 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
923 if (err < 0)
924 return err;
925
f640ee98
MH
926 if (hci_dev_test_flag(hdev, HCI_SETUP))
927 hci_debugfs_create_basic(hdev);
4b4148e9 928
0af801b9
JH
929 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
930 if (err < 0)
931 return err;
932
2177bab5
JH
933 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
934 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 935 * first two stages of init.
2177bab5
JH
936 */
937 if (hdev->dev_type != HCI_BREDR)
938 return 0;
939
5d4e7e8d
JH
940 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
941 if (err < 0)
942 return err;
943
baf27f6e
MH
944 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
945 if (err < 0)
946 return err;
947
ec6cef9c
MH
948 /* This function is only called when the controller is actually in
949 * configured state. When the controller is marked as unconfigured,
950 * this initialization procedure is not run.
951 *
952 * It means that it is possible that a controller runs through its
953 * setup phase and then discovers missing settings. If that is the
954 * case, then this function will not be called. It then will only
955 * be called during the config phase.
956 *
957 * So only when in setup phase or config phase, create the debugfs
958 * entries and register the SMP channels.
baf27f6e 959 */
d7a5a11d
MH
960 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
961 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
962 return 0;
963
60c5f5fb
MH
964 hci_debugfs_create_common(hdev);
965
71c3b60e 966 if (lmp_bredr_capable(hdev))
60c5f5fb 967 hci_debugfs_create_bredr(hdev);
2bfa3531 968
162a3bac 969 if (lmp_le_capable(hdev))
60c5f5fb 970 hci_debugfs_create_le(hdev);
e7b8fc92 971
baf27f6e 972 return 0;
2177bab5
JH
973}
974
0ebca7d6
MH
975static void hci_init0_req(struct hci_request *req, unsigned long opt)
976{
977 struct hci_dev *hdev = req->hdev;
978
979 BT_DBG("%s %ld", hdev->name, opt);
980
981 /* Reset */
982 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
983 hci_reset_req(req, 0);
984
985 /* Read Local Version */
986 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
987
988 /* Read BD Address */
989 if (hdev->set_bdaddr)
990 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
991}
992
993static int __hci_unconf_init(struct hci_dev *hdev)
994{
995 int err;
996
cc78b44b
MH
997 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
998 return 0;
999
0ebca7d6
MH
1000 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1001 if (err < 0)
1002 return err;
1003
f640ee98
MH
1004 if (hci_dev_test_flag(hdev, HCI_SETUP))
1005 hci_debugfs_create_basic(hdev);
1006
0ebca7d6
MH
1007 return 0;
1008}
1009
42c6b129 1010static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1011{
1012 __u8 scan = opt;
1013
42c6b129 1014 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1015
1016 /* Inquiry and Page scans */
42c6b129 1017 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1018}
1019
42c6b129 1020static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1021{
1022 __u8 auth = opt;
1023
42c6b129 1024 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1025
1026 /* Authentication */
42c6b129 1027 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1028}
1029
42c6b129 1030static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1031{
1032 __u8 encrypt = opt;
1033
42c6b129 1034 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1035
e4e8e37c 1036 /* Encryption */
42c6b129 1037 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1038}
1039
42c6b129 1040static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1041{
1042 __le16 policy = cpu_to_le16(opt);
1043
42c6b129 1044 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1045
1046 /* Default link policy */
42c6b129 1047 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1048}
1049
8e87d142 1050/* Get HCI device by index.
1da177e4
LT
1051 * Device is held on return. */
1052struct hci_dev *hci_dev_get(int index)
1053{
8035ded4 1054 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1055
1056 BT_DBG("%d", index);
1057
1058 if (index < 0)
1059 return NULL;
1060
1061 read_lock(&hci_dev_list_lock);
8035ded4 1062 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1063 if (d->id == index) {
1064 hdev = hci_dev_hold(d);
1065 break;
1066 }
1067 }
1068 read_unlock(&hci_dev_list_lock);
1069 return hdev;
1070}
1da177e4
LT
1071
1072/* ---- Inquiry support ---- */
ff9ef578 1073
30dc78e1
JH
1074bool hci_discovery_active(struct hci_dev *hdev)
1075{
1076 struct discovery_state *discov = &hdev->discovery;
1077
6fbe195d 1078 switch (discov->state) {
343f935b 1079 case DISCOVERY_FINDING:
6fbe195d 1080 case DISCOVERY_RESOLVING:
30dc78e1
JH
1081 return true;
1082
6fbe195d
AG
1083 default:
1084 return false;
1085 }
30dc78e1
JH
1086}
1087
ff9ef578
JH
1088void hci_discovery_set_state(struct hci_dev *hdev, int state)
1089{
bb3e0a33
JH
1090 int old_state = hdev->discovery.state;
1091
ff9ef578
JH
1092 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1093
bb3e0a33 1094 if (old_state == state)
ff9ef578
JH
1095 return;
1096
bb3e0a33
JH
1097 hdev->discovery.state = state;
1098
ff9ef578
JH
1099 switch (state) {
1100 case DISCOVERY_STOPPED:
c54c3860
AG
1101 hci_update_background_scan(hdev);
1102
bb3e0a33 1103 if (old_state != DISCOVERY_STARTING)
7b99b659 1104 mgmt_discovering(hdev, 0);
ff9ef578
JH
1105 break;
1106 case DISCOVERY_STARTING:
1107 break;
343f935b 1108 case DISCOVERY_FINDING:
ff9ef578
JH
1109 mgmt_discovering(hdev, 1);
1110 break;
30dc78e1
JH
1111 case DISCOVERY_RESOLVING:
1112 break;
ff9ef578
JH
1113 case DISCOVERY_STOPPING:
1114 break;
1115 }
ff9ef578
JH
1116}
1117
1f9b9a5d 1118void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1119{
30883512 1120 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1121 struct inquiry_entry *p, *n;
1da177e4 1122
561aafbc
JH
1123 list_for_each_entry_safe(p, n, &cache->all, all) {
1124 list_del(&p->all);
b57c1a56 1125 kfree(p);
1da177e4 1126 }
561aafbc
JH
1127
1128 INIT_LIST_HEAD(&cache->unknown);
1129 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1130}
1131
a8c5fb1a
GP
1132struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1133 bdaddr_t *bdaddr)
1da177e4 1134{
30883512 1135 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1136 struct inquiry_entry *e;
1137
6ed93dc6 1138 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1139
561aafbc
JH
1140 list_for_each_entry(e, &cache->all, all) {
1141 if (!bacmp(&e->data.bdaddr, bdaddr))
1142 return e;
1143 }
1144
1145 return NULL;
1146}
1147
1148struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1149 bdaddr_t *bdaddr)
561aafbc 1150{
30883512 1151 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1152 struct inquiry_entry *e;
1153
6ed93dc6 1154 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1155
1156 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1157 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1158 return e;
1159 }
1160
1161 return NULL;
1da177e4
LT
1162}
1163
30dc78e1 1164struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1165 bdaddr_t *bdaddr,
1166 int state)
30dc78e1
JH
1167{
1168 struct discovery_state *cache = &hdev->discovery;
1169 struct inquiry_entry *e;
1170
6ed93dc6 1171 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1172
1173 list_for_each_entry(e, &cache->resolve, list) {
1174 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1175 return e;
1176 if (!bacmp(&e->data.bdaddr, bdaddr))
1177 return e;
1178 }
1179
1180 return NULL;
1181}
1182
a3d4e20a 1183void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1184 struct inquiry_entry *ie)
a3d4e20a
JH
1185{
1186 struct discovery_state *cache = &hdev->discovery;
1187 struct list_head *pos = &cache->resolve;
1188 struct inquiry_entry *p;
1189
1190 list_del(&ie->list);
1191
1192 list_for_each_entry(p, &cache->resolve, list) {
1193 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1194 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1195 break;
1196 pos = &p->list;
1197 }
1198
1199 list_add(&ie->list, pos);
1200}
1201
af58925c
MH
1202u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1203 bool name_known)
1da177e4 1204{
30883512 1205 struct discovery_state *cache = &hdev->discovery;
70f23020 1206 struct inquiry_entry *ie;
af58925c 1207 u32 flags = 0;
1da177e4 1208
6ed93dc6 1209 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1210
6928a924 1211 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1212
af58925c
MH
1213 if (!data->ssp_mode)
1214 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1215
70f23020 1216 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1217 if (ie) {
af58925c
MH
1218 if (!ie->data.ssp_mode)
1219 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1220
a3d4e20a 1221 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1222 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1223 ie->data.rssi = data->rssi;
1224 hci_inquiry_cache_update_resolve(hdev, ie);
1225 }
1226
561aafbc 1227 goto update;
a3d4e20a 1228 }
561aafbc
JH
1229
1230 /* Entry not in the cache. Add new one. */
27f70f3e 1231 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1232 if (!ie) {
1233 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1234 goto done;
1235 }
561aafbc
JH
1236
1237 list_add(&ie->all, &cache->all);
1238
1239 if (name_known) {
1240 ie->name_state = NAME_KNOWN;
1241 } else {
1242 ie->name_state = NAME_NOT_KNOWN;
1243 list_add(&ie->list, &cache->unknown);
1244 }
70f23020 1245
561aafbc
JH
1246update:
1247 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1248 ie->name_state != NAME_PENDING) {
561aafbc
JH
1249 ie->name_state = NAME_KNOWN;
1250 list_del(&ie->list);
1da177e4
LT
1251 }
1252
70f23020
AE
1253 memcpy(&ie->data, data, sizeof(*data));
1254 ie->timestamp = jiffies;
1da177e4 1255 cache->timestamp = jiffies;
3175405b
JH
1256
1257 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1258 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1259
af58925c
MH
1260done:
1261 return flags;
1da177e4
LT
1262}
1263
1264static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1265{
30883512 1266 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1267 struct inquiry_info *info = (struct inquiry_info *) buf;
1268 struct inquiry_entry *e;
1269 int copied = 0;
1270
561aafbc 1271 list_for_each_entry(e, &cache->all, all) {
1da177e4 1272 struct inquiry_data *data = &e->data;
b57c1a56
JH
1273
1274 if (copied >= num)
1275 break;
1276
1da177e4
LT
1277 bacpy(&info->bdaddr, &data->bdaddr);
1278 info->pscan_rep_mode = data->pscan_rep_mode;
1279 info->pscan_period_mode = data->pscan_period_mode;
1280 info->pscan_mode = data->pscan_mode;
1281 memcpy(info->dev_class, data->dev_class, 3);
1282 info->clock_offset = data->clock_offset;
b57c1a56 1283
1da177e4 1284 info++;
b57c1a56 1285 copied++;
1da177e4
LT
1286 }
1287
1288 BT_DBG("cache %p, copied %d", cache, copied);
1289 return copied;
1290}
1291
42c6b129 1292static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1293{
1294 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1295 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1296 struct hci_cp_inquiry cp;
1297
1298 BT_DBG("%s", hdev->name);
1299
1300 if (test_bit(HCI_INQUIRY, &hdev->flags))
1301 return;
1302
1303 /* Start Inquiry */
1304 memcpy(&cp.lap, &ir->lap, 3);
1305 cp.length = ir->length;
1306 cp.num_rsp = ir->num_rsp;
42c6b129 1307 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1308}
1309
1310int hci_inquiry(void __user *arg)
1311{
1312 __u8 __user *ptr = arg;
1313 struct hci_inquiry_req ir;
1314 struct hci_dev *hdev;
1315 int err = 0, do_inquiry = 0, max_rsp;
1316 long timeo;
1317 __u8 *buf;
1318
1319 if (copy_from_user(&ir, ptr, sizeof(ir)))
1320 return -EFAULT;
1321
5a08ecce
AE
1322 hdev = hci_dev_get(ir.dev_id);
1323 if (!hdev)
1da177e4
LT
1324 return -ENODEV;
1325
d7a5a11d 1326 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1327 err = -EBUSY;
1328 goto done;
1329 }
1330
d7a5a11d 1331 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1332 err = -EOPNOTSUPP;
1333 goto done;
1334 }
1335
5b69bef5
MH
1336 if (hdev->dev_type != HCI_BREDR) {
1337 err = -EOPNOTSUPP;
1338 goto done;
1339 }
1340
d7a5a11d 1341 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1342 err = -EOPNOTSUPP;
1343 goto done;
1344 }
1345
09fd0de5 1346 hci_dev_lock(hdev);
8e87d142 1347 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1348 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1349 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1350 do_inquiry = 1;
1351 }
09fd0de5 1352 hci_dev_unlock(hdev);
1da177e4 1353
04837f64 1354 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1355
1356 if (do_inquiry) {
01178cd4
JH
1357 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1358 timeo);
70f23020
AE
1359 if (err < 0)
1360 goto done;
3e13fa1e
AG
1361
1362 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1363 * cleared). If it is interrupted by a signal, return -EINTR.
1364 */
74316201 1365 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1366 TASK_INTERRUPTIBLE))
1367 return -EINTR;
70f23020 1368 }
1da177e4 1369
8fc9ced3
GP
1370 /* for unlimited number of responses we will use buffer with
1371 * 255 entries
1372 */
1da177e4
LT
1373 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1374
1375 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1376 * copy it to the user space.
1377 */
01df8c31 1378 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1379 if (!buf) {
1da177e4
LT
1380 err = -ENOMEM;
1381 goto done;
1382 }
1383
09fd0de5 1384 hci_dev_lock(hdev);
1da177e4 1385 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1386 hci_dev_unlock(hdev);
1da177e4
LT
1387
1388 BT_DBG("num_rsp %d", ir.num_rsp);
1389
1390 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1391 ptr += sizeof(ir);
1392 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1393 ir.num_rsp))
1da177e4 1394 err = -EFAULT;
8e87d142 1395 } else
1da177e4
LT
1396 err = -EFAULT;
1397
1398 kfree(buf);
1399
1400done:
1401 hci_dev_put(hdev);
1402 return err;
1403}
1404
cbed0ca1 1405static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1406{
1da177e4
LT
1407 int ret = 0;
1408
1da177e4
LT
1409 BT_DBG("%s %p", hdev->name, hdev);
1410
1411 hci_req_lock(hdev);
1412
d7a5a11d 1413 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1414 ret = -ENODEV;
1415 goto done;
1416 }
1417
d7a5a11d
MH
1418 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1419 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1420 /* Check for rfkill but allow the HCI setup stage to
1421 * proceed (which in itself doesn't cause any RF activity).
1422 */
d7a5a11d 1423 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1424 ret = -ERFKILL;
1425 goto done;
1426 }
1427
1428 /* Check for valid public address or a configured static
1429 * random adddress, but let the HCI setup proceed to
1430 * be able to determine if there is a public address
1431 * or not.
1432 *
c6beca0e
MH
1433 * In case of user channel usage, it is not important
1434 * if a public address or static random address is
1435 * available.
1436 *
a5c8f270
MH
1437 * This check is only valid for BR/EDR controllers
1438 * since AMP controllers do not have an address.
1439 */
d7a5a11d 1440 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
c6beca0e 1441 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1442 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1443 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1444 ret = -EADDRNOTAVAIL;
1445 goto done;
1446 }
611b30f7
MH
1447 }
1448
1da177e4
LT
1449 if (test_bit(HCI_UP, &hdev->flags)) {
1450 ret = -EALREADY;
1451 goto done;
1452 }
1453
1da177e4
LT
1454 if (hdev->open(hdev)) {
1455 ret = -EIO;
1456 goto done;
1457 }
1458
e9ca8bf1 1459 set_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1460 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
4a3f95b7 1461
f41c70c4
MH
1462 atomic_set(&hdev->cmd_cnt, 1);
1463 set_bit(HCI_INIT, &hdev->flags);
1464
d7a5a11d 1465 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
e131d74a
MH
1466 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1467
af202f84
MH
1468 if (hdev->setup)
1469 ret = hdev->setup(hdev);
f41c70c4 1470
af202f84
MH
1471 /* The transport driver can set these quirks before
1472 * creating the HCI device or in its setup callback.
1473 *
1474 * In case any of them is set, the controller has to
1475 * start up as unconfigured.
1476 */
eb1904f4
MH
1477 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1478 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
a1536da2 1479 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1480
0ebca7d6
MH
1481 /* For an unconfigured controller it is required to
1482 * read at least the version information provided by
1483 * the Read Local Version Information command.
1484 *
1485 * If the set_bdaddr driver callback is provided, then
1486 * also the original Bluetooth public device address
1487 * will be read using the Read BD Address command.
1488 */
d7a5a11d 1489 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1490 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1491 }
1492
d7a5a11d 1493 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1494 /* If public address change is configured, ensure that
1495 * the address gets programmed. If the driver does not
1496 * support changing the public address, fail the power
1497 * on procedure.
1498 */
1499 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1500 hdev->set_bdaddr)
24c457e2
MH
1501 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1502 else
1503 ret = -EADDRNOTAVAIL;
1504 }
1505
f41c70c4 1506 if (!ret) {
d7a5a11d 1507 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
98a63aaf 1508 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
f41c70c4 1509 ret = __hci_init(hdev);
98a63aaf
MH
1510 if (!ret && hdev->post_init)
1511 ret = hdev->post_init(hdev);
1512 }
1da177e4
LT
1513 }
1514
7e995b9e
MH
1515 /* If the HCI Reset command is clearing all diagnostic settings,
1516 * then they need to be reprogrammed after the init procedure
1517 * completed.
1518 */
1519 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1520 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1521 ret = hdev->set_diag(hdev, true);
1522
f41c70c4
MH
1523 clear_bit(HCI_INIT, &hdev->flags);
1524
1da177e4
LT
1525 if (!ret) {
1526 hci_dev_hold(hdev);
a1536da2 1527 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1da177e4 1528 set_bit(HCI_UP, &hdev->flags);
05fcd4c4 1529 hci_sock_dev_event(hdev, HCI_DEV_UP);
d7a5a11d
MH
1530 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1531 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1532 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1533 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1514b892 1534 hdev->dev_type == HCI_BREDR) {
09fd0de5 1535 hci_dev_lock(hdev);
744cf19e 1536 mgmt_powered(hdev, 1);
09fd0de5 1537 hci_dev_unlock(hdev);
56e5cb86 1538 }
8e87d142 1539 } else {
1da177e4 1540 /* Init failed, cleanup */
3eff45ea 1541 flush_work(&hdev->tx_work);
c347b765 1542 flush_work(&hdev->cmd_work);
b78752cc 1543 flush_work(&hdev->rx_work);
1da177e4
LT
1544
1545 skb_queue_purge(&hdev->cmd_q);
1546 skb_queue_purge(&hdev->rx_q);
1547
1548 if (hdev->flush)
1549 hdev->flush(hdev);
1550
1551 if (hdev->sent_cmd) {
1552 kfree_skb(hdev->sent_cmd);
1553 hdev->sent_cmd = NULL;
1554 }
1555
e9ca8bf1 1556 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1557 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1558
1da177e4 1559 hdev->close(hdev);
fee746b0 1560 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1561 }
1562
1563done:
1564 hci_req_unlock(hdev);
1da177e4
LT
1565 return ret;
1566}
1567
cbed0ca1
JH
1568/* ---- HCI ioctl helpers ---- */
1569
1570int hci_dev_open(__u16 dev)
1571{
1572 struct hci_dev *hdev;
1573 int err;
1574
1575 hdev = hci_dev_get(dev);
1576 if (!hdev)
1577 return -ENODEV;
1578
4a964404 1579 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1580 * up as user channel. Trying to bring them up as normal devices
1581 * will result into a failure. Only user channel operation is
1582 * possible.
1583 *
1584 * When this function is called for a user channel, the flag
1585 * HCI_USER_CHANNEL will be set first before attempting to
1586 * open the device.
1587 */
d7a5a11d
MH
1588 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1589 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1590 err = -EOPNOTSUPP;
1591 goto done;
1592 }
1593
e1d08f40
JH
1594 /* We need to ensure that no other power on/off work is pending
1595 * before proceeding to call hci_dev_do_open. This is
1596 * particularly important if the setup procedure has not yet
1597 * completed.
1598 */
a69d8927 1599 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1600 cancel_delayed_work(&hdev->power_off);
1601
a5c8f270
MH
1602 /* After this call it is guaranteed that the setup procedure
1603 * has finished. This means that error conditions like RFKILL
1604 * or no valid public or static random address apply.
1605 */
e1d08f40
JH
1606 flush_workqueue(hdev->req_workqueue);
1607
12aa4f0a 1608 /* For controllers not using the management interface and that
b6ae8457 1609 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1610 * so that pairing works for them. Once the management interface
1611 * is in use this bit will be cleared again and userspace has
1612 * to explicitly enable it.
1613 */
d7a5a11d
MH
1614 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1615 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1616 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1617
cbed0ca1
JH
1618 err = hci_dev_do_open(hdev);
1619
fee746b0 1620done:
cbed0ca1 1621 hci_dev_put(hdev);
cbed0ca1
JH
1622 return err;
1623}
1624
d7347f3c
JH
1625/* This function requires the caller holds hdev->lock */
1626static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1627{
1628 struct hci_conn_params *p;
1629
f161dd41
JH
1630 list_for_each_entry(p, &hdev->le_conn_params, list) {
1631 if (p->conn) {
1632 hci_conn_drop(p->conn);
f8aaf9b6 1633 hci_conn_put(p->conn);
f161dd41
JH
1634 p->conn = NULL;
1635 }
d7347f3c 1636 list_del_init(&p->action);
f161dd41 1637 }
d7347f3c
JH
1638
1639 BT_DBG("All LE pending actions cleared");
1640}
1641
6b3cc1db 1642int hci_dev_do_close(struct hci_dev *hdev)
1da177e4 1643{
acc649c6
MH
1644 bool auto_off;
1645
1da177e4
LT
1646 BT_DBG("%s %p", hdev->name, hdev);
1647
d24d8144 1648 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1649 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1650 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1651 /* Execute vendor specific shutdown routine */
1652 if (hdev->shutdown)
1653 hdev->shutdown(hdev);
1654 }
1655
78c04c0b
VCG
1656 cancel_delayed_work(&hdev->power_off);
1657
1da177e4
LT
1658 hci_req_cancel(hdev, ENODEV);
1659 hci_req_lock(hdev);
1660
1661 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1662 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1663 hci_req_unlock(hdev);
1664 return 0;
1665 }
1666
3eff45ea
GP
1667 /* Flush RX and TX works */
1668 flush_work(&hdev->tx_work);
b78752cc 1669 flush_work(&hdev->rx_work);
1da177e4 1670
16ab91ab 1671 if (hdev->discov_timeout > 0) {
e0f9309f 1672 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1673 hdev->discov_timeout = 0;
a358dc11
MH
1674 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1675 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1676 }
1677
a69d8927 1678 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1679 cancel_delayed_work(&hdev->service_cache);
1680
7ba8b4be 1681 cancel_delayed_work_sync(&hdev->le_scan_disable);
2d28cfe7 1682 cancel_delayed_work_sync(&hdev->le_scan_restart);
4518bb0f 1683
d7a5a11d 1684 if (hci_dev_test_flag(hdev, HCI_MGMT))
4518bb0f 1685 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1686
5d900e46
FG
1687 if (hdev->adv_instance_timeout) {
1688 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1689 hdev->adv_instance_timeout = 0;
1690 }
1691
76727c02
JH
1692 /* Avoid potential lockdep warnings from the *_flush() calls by
1693 * ensuring the workqueue is empty up front.
1694 */
1695 drain_workqueue(hdev->workqueue);
1696
09fd0de5 1697 hci_dev_lock(hdev);
1aeb9c65 1698
8f502f84
JH
1699 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1700
acc649c6
MH
1701 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1702
1703 if (!auto_off && hdev->dev_type == HCI_BREDR)
1704 mgmt_powered(hdev, 0);
1aeb9c65 1705
1f9b9a5d 1706 hci_inquiry_cache_flush(hdev);
d7347f3c 1707 hci_pend_le_actions_clear(hdev);
f161dd41 1708 hci_conn_hash_flush(hdev);
09fd0de5 1709 hci_dev_unlock(hdev);
1da177e4 1710
64dae967
MH
1711 smp_unregister(hdev);
1712
05fcd4c4 1713 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1da177e4
LT
1714
1715 if (hdev->flush)
1716 hdev->flush(hdev);
1717
1718 /* Reset device */
1719 skb_queue_purge(&hdev->cmd_q);
1720 atomic_set(&hdev->cmd_cnt, 1);
acc649c6
MH
1721 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1722 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4 1723 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1724 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1725 clear_bit(HCI_INIT, &hdev->flags);
1726 }
1727
c347b765
GP
1728 /* flush cmd work */
1729 flush_work(&hdev->cmd_work);
1da177e4
LT
1730
1731 /* Drop queues */
1732 skb_queue_purge(&hdev->rx_q);
1733 skb_queue_purge(&hdev->cmd_q);
1734 skb_queue_purge(&hdev->raw_q);
1735
1736 /* Drop last sent command */
1737 if (hdev->sent_cmd) {
65cc2b49 1738 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1739 kfree_skb(hdev->sent_cmd);
1740 hdev->sent_cmd = NULL;
1741 }
1742
e9ca8bf1 1743 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1744 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1745
1da177e4
LT
1746 /* After this point our queues are empty
1747 * and no tasks are scheduled. */
1748 hdev->close(hdev);
1749
35b973c9 1750 /* Clear flags */
fee746b0 1751 hdev->flags &= BIT(HCI_RAW);
eacb44df 1752 hci_dev_clear_volatile_flags(hdev);
35b973c9 1753
ced5c338 1754 /* Controller radio is available but is currently powered down */
536619e8 1755 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1756
e59fda8d 1757 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1758 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1759 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1760
1da177e4
LT
1761 hci_req_unlock(hdev);
1762
1763 hci_dev_put(hdev);
1764 return 0;
1765}
1766
1767int hci_dev_close(__u16 dev)
1768{
1769 struct hci_dev *hdev;
1770 int err;
1771
70f23020
AE
1772 hdev = hci_dev_get(dev);
1773 if (!hdev)
1da177e4 1774 return -ENODEV;
8ee56540 1775
d7a5a11d 1776 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1777 err = -EBUSY;
1778 goto done;
1779 }
1780
a69d8927 1781 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1782 cancel_delayed_work(&hdev->power_off);
1783
1da177e4 1784 err = hci_dev_do_close(hdev);
8ee56540 1785
0736cfa8 1786done:
1da177e4
LT
1787 hci_dev_put(hdev);
1788 return err;
1789}
1790
5c912495 1791static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1792{
5c912495 1793 int ret;
1da177e4 1794
5c912495 1795 BT_DBG("%s %p", hdev->name, hdev);
1da177e4
LT
1796
1797 hci_req_lock(hdev);
1da177e4 1798
1da177e4
LT
1799 /* Drop queues */
1800 skb_queue_purge(&hdev->rx_q);
1801 skb_queue_purge(&hdev->cmd_q);
1802
76727c02
JH
1803 /* Avoid potential lockdep warnings from the *_flush() calls by
1804 * ensuring the workqueue is empty up front.
1805 */
1806 drain_workqueue(hdev->workqueue);
1807
09fd0de5 1808 hci_dev_lock(hdev);
1f9b9a5d 1809 hci_inquiry_cache_flush(hdev);
1da177e4 1810 hci_conn_hash_flush(hdev);
09fd0de5 1811 hci_dev_unlock(hdev);
1da177e4
LT
1812
1813 if (hdev->flush)
1814 hdev->flush(hdev);
1815
8e87d142 1816 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1817 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1818
fee746b0 1819 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4 1820
1da177e4 1821 hci_req_unlock(hdev);
1da177e4
LT
1822 return ret;
1823}
1824
5c912495
MH
1825int hci_dev_reset(__u16 dev)
1826{
1827 struct hci_dev *hdev;
1828 int err;
1829
1830 hdev = hci_dev_get(dev);
1831 if (!hdev)
1832 return -ENODEV;
1833
1834 if (!test_bit(HCI_UP, &hdev->flags)) {
1835 err = -ENETDOWN;
1836 goto done;
1837 }
1838
d7a5a11d 1839 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1840 err = -EBUSY;
1841 goto done;
1842 }
1843
d7a5a11d 1844 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1845 err = -EOPNOTSUPP;
1846 goto done;
1847 }
1848
1849 err = hci_dev_do_reset(hdev);
1850
1851done:
1852 hci_dev_put(hdev);
1853 return err;
1854}
1855
1da177e4
LT
1856int hci_dev_reset_stat(__u16 dev)
1857{
1858 struct hci_dev *hdev;
1859 int ret = 0;
1860
70f23020
AE
1861 hdev = hci_dev_get(dev);
1862 if (!hdev)
1da177e4
LT
1863 return -ENODEV;
1864
d7a5a11d 1865 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1866 ret = -EBUSY;
1867 goto done;
1868 }
1869
d7a5a11d 1870 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1871 ret = -EOPNOTSUPP;
1872 goto done;
1873 }
1874
1da177e4
LT
1875 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1876
0736cfa8 1877done:
1da177e4 1878 hci_dev_put(hdev);
1da177e4
LT
1879 return ret;
1880}
1881
123abc08
JH
1882static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1883{
bc6d2d04 1884 bool conn_changed, discov_changed;
123abc08
JH
1885
1886 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1887
1888 if ((scan & SCAN_PAGE))
238be788
MH
1889 conn_changed = !hci_dev_test_and_set_flag(hdev,
1890 HCI_CONNECTABLE);
123abc08 1891 else
a69d8927
MH
1892 conn_changed = hci_dev_test_and_clear_flag(hdev,
1893 HCI_CONNECTABLE);
123abc08 1894
bc6d2d04 1895 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1896 discov_changed = !hci_dev_test_and_set_flag(hdev,
1897 HCI_DISCOVERABLE);
bc6d2d04 1898 } else {
a358dc11 1899 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1900 discov_changed = hci_dev_test_and_clear_flag(hdev,
1901 HCI_DISCOVERABLE);
bc6d2d04
JH
1902 }
1903
d7a5a11d 1904 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1905 return;
1906
bc6d2d04
JH
1907 if (conn_changed || discov_changed) {
1908 /* In case this was disabled through mgmt */
a1536da2 1909 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1910
d7a5a11d 1911 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
bc6d2d04
JH
1912 mgmt_update_adv_data(hdev);
1913
123abc08 1914 mgmt_new_settings(hdev);
bc6d2d04 1915 }
123abc08
JH
1916}
1917
1da177e4
LT
1918int hci_dev_cmd(unsigned int cmd, void __user *arg)
1919{
1920 struct hci_dev *hdev;
1921 struct hci_dev_req dr;
1922 int err = 0;
1923
1924 if (copy_from_user(&dr, arg, sizeof(dr)))
1925 return -EFAULT;
1926
70f23020
AE
1927 hdev = hci_dev_get(dr.dev_id);
1928 if (!hdev)
1da177e4
LT
1929 return -ENODEV;
1930
d7a5a11d 1931 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1932 err = -EBUSY;
1933 goto done;
1934 }
1935
d7a5a11d 1936 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1937 err = -EOPNOTSUPP;
1938 goto done;
1939 }
1940
5b69bef5
MH
1941 if (hdev->dev_type != HCI_BREDR) {
1942 err = -EOPNOTSUPP;
1943 goto done;
1944 }
1945
d7a5a11d 1946 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1947 err = -EOPNOTSUPP;
1948 goto done;
1949 }
1950
1da177e4
LT
1951 switch (cmd) {
1952 case HCISETAUTH:
01178cd4
JH
1953 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1954 HCI_INIT_TIMEOUT);
1da177e4
LT
1955 break;
1956
1957 case HCISETENCRYPT:
1958 if (!lmp_encrypt_capable(hdev)) {
1959 err = -EOPNOTSUPP;
1960 break;
1961 }
1962
1963 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1964 /* Auth must be enabled first */
01178cd4
JH
1965 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1966 HCI_INIT_TIMEOUT);
1da177e4
LT
1967 if (err)
1968 break;
1969 }
1970
01178cd4
JH
1971 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1972 HCI_INIT_TIMEOUT);
1da177e4
LT
1973 break;
1974
1975 case HCISETSCAN:
01178cd4
JH
1976 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1977 HCI_INIT_TIMEOUT);
91a668b0 1978
bc6d2d04
JH
1979 /* Ensure that the connectable and discoverable states
1980 * get correctly modified as this was a non-mgmt change.
91a668b0 1981 */
123abc08
JH
1982 if (!err)
1983 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1984 break;
1985
1da177e4 1986 case HCISETLINKPOL:
01178cd4
JH
1987 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1988 HCI_INIT_TIMEOUT);
1da177e4
LT
1989 break;
1990
1991 case HCISETLINKMODE:
e4e8e37c
MH
1992 hdev->link_mode = ((__u16) dr.dev_opt) &
1993 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1994 break;
1995
1996 case HCISETPTYPE:
1997 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1998 break;
1999
2000 case HCISETACLMTU:
e4e8e37c
MH
2001 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2002 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2003 break;
2004
2005 case HCISETSCOMTU:
e4e8e37c
MH
2006 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2007 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2008 break;
2009
2010 default:
2011 err = -EINVAL;
2012 break;
2013 }
e4e8e37c 2014
0736cfa8 2015done:
1da177e4
LT
2016 hci_dev_put(hdev);
2017 return err;
2018}
2019
2020int hci_get_dev_list(void __user *arg)
2021{
8035ded4 2022 struct hci_dev *hdev;
1da177e4
LT
2023 struct hci_dev_list_req *dl;
2024 struct hci_dev_req *dr;
1da177e4
LT
2025 int n = 0, size, err;
2026 __u16 dev_num;
2027
2028 if (get_user(dev_num, (__u16 __user *) arg))
2029 return -EFAULT;
2030
2031 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2032 return -EINVAL;
2033
2034 size = sizeof(*dl) + dev_num * sizeof(*dr);
2035
70f23020
AE
2036 dl = kzalloc(size, GFP_KERNEL);
2037 if (!dl)
1da177e4
LT
2038 return -ENOMEM;
2039
2040 dr = dl->dev_req;
2041
f20d09d5 2042 read_lock(&hci_dev_list_lock);
8035ded4 2043 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2044 unsigned long flags = hdev->flags;
c542a06c 2045
2e84d8db
MH
2046 /* When the auto-off is configured it means the transport
2047 * is running, but in that case still indicate that the
2048 * device is actually down.
2049 */
d7a5a11d 2050 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 2051 flags &= ~BIT(HCI_UP);
c542a06c 2052
1da177e4 2053 (dr + n)->dev_id = hdev->id;
2e84d8db 2054 (dr + n)->dev_opt = flags;
c542a06c 2055
1da177e4
LT
2056 if (++n >= dev_num)
2057 break;
2058 }
f20d09d5 2059 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2060
2061 dl->dev_num = n;
2062 size = sizeof(*dl) + n * sizeof(*dr);
2063
2064 err = copy_to_user(arg, dl, size);
2065 kfree(dl);
2066
2067 return err ? -EFAULT : 0;
2068}
2069
2070int hci_get_dev_info(void __user *arg)
2071{
2072 struct hci_dev *hdev;
2073 struct hci_dev_info di;
2e84d8db 2074 unsigned long flags;
1da177e4
LT
2075 int err = 0;
2076
2077 if (copy_from_user(&di, arg, sizeof(di)))
2078 return -EFAULT;
2079
70f23020
AE
2080 hdev = hci_dev_get(di.dev_id);
2081 if (!hdev)
1da177e4
LT
2082 return -ENODEV;
2083
2e84d8db
MH
2084 /* When the auto-off is configured it means the transport
2085 * is running, but in that case still indicate that the
2086 * device is actually down.
2087 */
d7a5a11d 2088 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
2089 flags = hdev->flags & ~BIT(HCI_UP);
2090 else
2091 flags = hdev->flags;
c542a06c 2092
1da177e4
LT
2093 strcpy(di.name, hdev->name);
2094 di.bdaddr = hdev->bdaddr;
60f2a3ed 2095 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2096 di.flags = flags;
1da177e4 2097 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2098 if (lmp_bredr_capable(hdev)) {
2099 di.acl_mtu = hdev->acl_mtu;
2100 di.acl_pkts = hdev->acl_pkts;
2101 di.sco_mtu = hdev->sco_mtu;
2102 di.sco_pkts = hdev->sco_pkts;
2103 } else {
2104 di.acl_mtu = hdev->le_mtu;
2105 di.acl_pkts = hdev->le_pkts;
2106 di.sco_mtu = 0;
2107 di.sco_pkts = 0;
2108 }
1da177e4
LT
2109 di.link_policy = hdev->link_policy;
2110 di.link_mode = hdev->link_mode;
2111
2112 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2113 memcpy(&di.features, &hdev->features, sizeof(di.features));
2114
2115 if (copy_to_user(arg, &di, sizeof(di)))
2116 err = -EFAULT;
2117
2118 hci_dev_put(hdev);
2119
2120 return err;
2121}
2122
2123/* ---- Interface to HCI drivers ---- */
2124
611b30f7
MH
2125static int hci_rfkill_set_block(void *data, bool blocked)
2126{
2127 struct hci_dev *hdev = data;
2128
2129 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2130
d7a5a11d 2131 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2132 return -EBUSY;
2133
5e130367 2134 if (blocked) {
a1536da2 2135 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2136 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2137 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2138 hci_dev_do_close(hdev);
5e130367 2139 } else {
a358dc11 2140 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2141 }
611b30f7
MH
2142
2143 return 0;
2144}
2145
2146static const struct rfkill_ops hci_rfkill_ops = {
2147 .set_block = hci_rfkill_set_block,
2148};
2149
ab81cbf9
JH
2150static void hci_power_on(struct work_struct *work)
2151{
2152 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2153 int err;
ab81cbf9
JH
2154
2155 BT_DBG("%s", hdev->name);
2156
cbed0ca1 2157 err = hci_dev_do_open(hdev);
96570ffc 2158 if (err < 0) {
3ad67582 2159 hci_dev_lock(hdev);
96570ffc 2160 mgmt_set_powered_failed(hdev, err);
3ad67582 2161 hci_dev_unlock(hdev);
ab81cbf9 2162 return;
96570ffc 2163 }
ab81cbf9 2164
a5c8f270
MH
2165 /* During the HCI setup phase, a few error conditions are
2166 * ignored and they need to be checked now. If they are still
2167 * valid, it is important to turn the device back off.
2168 */
d7a5a11d
MH
2169 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2170 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
a5c8f270
MH
2171 (hdev->dev_type == HCI_BREDR &&
2172 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2173 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2174 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2175 hci_dev_do_close(hdev);
d7a5a11d 2176 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2177 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2178 HCI_AUTO_OFF_TIMEOUT);
bf543036 2179 }
ab81cbf9 2180
a69d8927 2181 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2182 /* For unconfigured devices, set the HCI_RAW flag
2183 * so that userspace can easily identify them.
4a964404 2184 */
d7a5a11d 2185 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2186 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2187
2188 /* For fully configured devices, this will send
2189 * the Index Added event. For unconfigured devices,
2190 * it will send Unconfigued Index Added event.
2191 *
2192 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2193 * and no event will be send.
2194 */
2195 mgmt_index_added(hdev);
a69d8927 2196 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2197 /* When the controller is now configured, then it
2198 * is important to clear the HCI_RAW flag.
2199 */
d7a5a11d 2200 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2201 clear_bit(HCI_RAW, &hdev->flags);
2202
d603b76b
MH
2203 /* Powering on the controller with HCI_CONFIG set only
2204 * happens with the transition from unconfigured to
2205 * configured. This will send the Index Added event.
2206 */
744cf19e 2207 mgmt_index_added(hdev);
fee746b0 2208 }
ab81cbf9
JH
2209}
2210
2211static void hci_power_off(struct work_struct *work)
2212{
3243553f 2213 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2214 power_off.work);
ab81cbf9
JH
2215
2216 BT_DBG("%s", hdev->name);
2217
8ee56540 2218 hci_dev_do_close(hdev);
ab81cbf9
JH
2219}
2220
c7741d16
MH
2221static void hci_error_reset(struct work_struct *work)
2222{
2223 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2224
2225 BT_DBG("%s", hdev->name);
2226
2227 if (hdev->hw_error)
2228 hdev->hw_error(hdev, hdev->hw_error_code);
2229 else
2230 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2231 hdev->hw_error_code);
2232
2233 if (hci_dev_do_close(hdev))
2234 return;
2235
c7741d16
MH
2236 hci_dev_do_open(hdev);
2237}
2238
16ab91ab
JH
2239static void hci_discov_off(struct work_struct *work)
2240{
2241 struct hci_dev *hdev;
16ab91ab
JH
2242
2243 hdev = container_of(work, struct hci_dev, discov_off.work);
2244
2245 BT_DBG("%s", hdev->name);
2246
d1967ff8 2247 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2248}
2249
5d900e46
FG
2250static void hci_adv_timeout_expire(struct work_struct *work)
2251{
2252 struct hci_dev *hdev;
2253
2254 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2255
2256 BT_DBG("%s", hdev->name);
2257
2258 mgmt_adv_timeout_expired(hdev);
2259}
2260
35f7498a 2261void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2262{
4821002c 2263 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2264
4821002c
JH
2265 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2266 list_del(&uuid->list);
2aeb9a1a
JH
2267 kfree(uuid);
2268 }
2aeb9a1a
JH
2269}
2270
35f7498a 2271void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2272{
0378b597 2273 struct link_key *key;
55ed8ca1 2274
0378b597
JH
2275 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2276 list_del_rcu(&key->list);
2277 kfree_rcu(key, rcu);
55ed8ca1 2278 }
55ed8ca1
JH
2279}
2280
35f7498a 2281void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2282{
970d0f1b 2283 struct smp_ltk *k;
b899efaf 2284
970d0f1b
JH
2285 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2286 list_del_rcu(&k->list);
2287 kfree_rcu(k, rcu);
b899efaf 2288 }
b899efaf
VCG
2289}
2290
970c4e46
JH
2291void hci_smp_irks_clear(struct hci_dev *hdev)
2292{
adae20cb 2293 struct smp_irk *k;
970c4e46 2294
adae20cb
JH
2295 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2296 list_del_rcu(&k->list);
2297 kfree_rcu(k, rcu);
970c4e46
JH
2298 }
2299}
2300
55ed8ca1
JH
2301struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2302{
8035ded4 2303 struct link_key *k;
55ed8ca1 2304
0378b597
JH
2305 rcu_read_lock();
2306 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2307 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2308 rcu_read_unlock();
55ed8ca1 2309 return k;
0378b597
JH
2310 }
2311 }
2312 rcu_read_unlock();
55ed8ca1
JH
2313
2314 return NULL;
2315}
2316
745c0ce3 2317static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2318 u8 key_type, u8 old_key_type)
d25e28ab
JH
2319{
2320 /* Legacy key */
2321 if (key_type < 0x03)
745c0ce3 2322 return true;
d25e28ab
JH
2323
2324 /* Debug keys are insecure so don't store them persistently */
2325 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2326 return false;
d25e28ab
JH
2327
2328 /* Changed combination key and there's no previous one */
2329 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2330 return false;
d25e28ab
JH
2331
2332 /* Security mode 3 case */
2333 if (!conn)
745c0ce3 2334 return true;
d25e28ab 2335
e3befab9
JH
2336 /* BR/EDR key derived using SC from an LE link */
2337 if (conn->type == LE_LINK)
2338 return true;
2339
d25e28ab
JH
2340 /* Neither local nor remote side had no-bonding as requirement */
2341 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2342 return true;
d25e28ab
JH
2343
2344 /* Local side had dedicated bonding as requirement */
2345 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2346 return true;
d25e28ab
JH
2347
2348 /* Remote side had dedicated bonding as requirement */
2349 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2350 return true;
d25e28ab
JH
2351
2352 /* If none of the above criteria match, then don't store the key
2353 * persistently */
745c0ce3 2354 return false;
d25e28ab
JH
2355}
2356
e804d25d 2357static u8 ltk_role(u8 type)
98a0b845 2358{
e804d25d
JH
2359 if (type == SMP_LTK)
2360 return HCI_ROLE_MASTER;
98a0b845 2361
e804d25d 2362 return HCI_ROLE_SLAVE;
98a0b845
JH
2363}
2364
f3a73d97
JH
2365struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2366 u8 addr_type, u8 role)
75d262c2 2367{
c9839a11 2368 struct smp_ltk *k;
75d262c2 2369
970d0f1b
JH
2370 rcu_read_lock();
2371 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2372 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2373 continue;
2374
923e2414 2375 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2376 rcu_read_unlock();
75d262c2 2377 return k;
970d0f1b
JH
2378 }
2379 }
2380 rcu_read_unlock();
75d262c2
VCG
2381
2382 return NULL;
2383}
75d262c2 2384
970c4e46
JH
2385struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2386{
2387 struct smp_irk *irk;
2388
adae20cb
JH
2389 rcu_read_lock();
2390 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2391 if (!bacmp(&irk->rpa, rpa)) {
2392 rcu_read_unlock();
970c4e46 2393 return irk;
adae20cb 2394 }
970c4e46
JH
2395 }
2396
adae20cb 2397 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2398 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2399 bacpy(&irk->rpa, rpa);
adae20cb 2400 rcu_read_unlock();
970c4e46
JH
2401 return irk;
2402 }
2403 }
adae20cb 2404 rcu_read_unlock();
970c4e46
JH
2405
2406 return NULL;
2407}
2408
2409struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2410 u8 addr_type)
2411{
2412 struct smp_irk *irk;
2413
6cfc9988
JH
2414 /* Identity Address must be public or static random */
2415 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2416 return NULL;
2417
adae20cb
JH
2418 rcu_read_lock();
2419 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2420 if (addr_type == irk->addr_type &&
adae20cb
JH
2421 bacmp(bdaddr, &irk->bdaddr) == 0) {
2422 rcu_read_unlock();
970c4e46 2423 return irk;
adae20cb 2424 }
970c4e46 2425 }
adae20cb 2426 rcu_read_unlock();
970c4e46
JH
2427
2428 return NULL;
2429}
2430
567fa2aa 2431struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2432 bdaddr_t *bdaddr, u8 *val, u8 type,
2433 u8 pin_len, bool *persistent)
55ed8ca1
JH
2434{
2435 struct link_key *key, *old_key;
745c0ce3 2436 u8 old_key_type;
55ed8ca1
JH
2437
2438 old_key = hci_find_link_key(hdev, bdaddr);
2439 if (old_key) {
2440 old_key_type = old_key->type;
2441 key = old_key;
2442 } else {
12adcf3a 2443 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2444 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2445 if (!key)
567fa2aa 2446 return NULL;
0378b597 2447 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2448 }
2449
6ed93dc6 2450 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2451
d25e28ab
JH
2452 /* Some buggy controller combinations generate a changed
2453 * combination key for legacy pairing even when there's no
2454 * previous key */
2455 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2456 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2457 type = HCI_LK_COMBINATION;
655fe6ec
JH
2458 if (conn)
2459 conn->key_type = type;
2460 }
d25e28ab 2461
55ed8ca1 2462 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2463 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2464 key->pin_len = pin_len;
2465
b6020ba0 2466 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2467 key->type = old_key_type;
4748fed2
JH
2468 else
2469 key->type = type;
2470
7652ff6a
JH
2471 if (persistent)
2472 *persistent = hci_persistent_key(hdev, conn, type,
2473 old_key_type);
4df378a1 2474
567fa2aa 2475 return key;
55ed8ca1
JH
2476}
2477
ca9142b8 2478struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2479 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2480 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2481{
c9839a11 2482 struct smp_ltk *key, *old_key;
e804d25d 2483 u8 role = ltk_role(type);
75d262c2 2484
f3a73d97 2485 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2486 if (old_key)
75d262c2 2487 key = old_key;
c9839a11 2488 else {
0a14ab41 2489 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2490 if (!key)
ca9142b8 2491 return NULL;
970d0f1b 2492 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2493 }
2494
75d262c2 2495 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2496 key->bdaddr_type = addr_type;
2497 memcpy(key->val, tk, sizeof(key->val));
2498 key->authenticated = authenticated;
2499 key->ediv = ediv;
fe39c7b2 2500 key->rand = rand;
c9839a11
VCG
2501 key->enc_size = enc_size;
2502 key->type = type;
75d262c2 2503
ca9142b8 2504 return key;
75d262c2
VCG
2505}
2506
ca9142b8
JH
2507struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2508 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2509{
2510 struct smp_irk *irk;
2511
2512 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2513 if (!irk) {
2514 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2515 if (!irk)
ca9142b8 2516 return NULL;
970c4e46
JH
2517
2518 bacpy(&irk->bdaddr, bdaddr);
2519 irk->addr_type = addr_type;
2520
adae20cb 2521 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2522 }
2523
2524 memcpy(irk->val, val, 16);
2525 bacpy(&irk->rpa, rpa);
2526
ca9142b8 2527 return irk;
970c4e46
JH
2528}
2529
55ed8ca1
JH
2530int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2531{
2532 struct link_key *key;
2533
2534 key = hci_find_link_key(hdev, bdaddr);
2535 if (!key)
2536 return -ENOENT;
2537
6ed93dc6 2538 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2539
0378b597
JH
2540 list_del_rcu(&key->list);
2541 kfree_rcu(key, rcu);
55ed8ca1
JH
2542
2543 return 0;
2544}
2545
e0b2b27e 2546int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2547{
970d0f1b 2548 struct smp_ltk *k;
c51ffa0b 2549 int removed = 0;
b899efaf 2550
970d0f1b 2551 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2552 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2553 continue;
2554
6ed93dc6 2555 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2556
970d0f1b
JH
2557 list_del_rcu(&k->list);
2558 kfree_rcu(k, rcu);
c51ffa0b 2559 removed++;
b899efaf
VCG
2560 }
2561
c51ffa0b 2562 return removed ? 0 : -ENOENT;
b899efaf
VCG
2563}
2564
a7ec7338
JH
2565void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2566{
adae20cb 2567 struct smp_irk *k;
a7ec7338 2568
adae20cb 2569 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2570 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2571 continue;
2572
2573 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2574
adae20cb
JH
2575 list_del_rcu(&k->list);
2576 kfree_rcu(k, rcu);
a7ec7338
JH
2577 }
2578}
2579
55e76b38
JH
2580bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2581{
2582 struct smp_ltk *k;
4ba9faf3 2583 struct smp_irk *irk;
55e76b38
JH
2584 u8 addr_type;
2585
2586 if (type == BDADDR_BREDR) {
2587 if (hci_find_link_key(hdev, bdaddr))
2588 return true;
2589 return false;
2590 }
2591
2592 /* Convert to HCI addr type which struct smp_ltk uses */
2593 if (type == BDADDR_LE_PUBLIC)
2594 addr_type = ADDR_LE_DEV_PUBLIC;
2595 else
2596 addr_type = ADDR_LE_DEV_RANDOM;
2597
4ba9faf3
JH
2598 irk = hci_get_irk(hdev, bdaddr, addr_type);
2599 if (irk) {
2600 bdaddr = &irk->bdaddr;
2601 addr_type = irk->addr_type;
2602 }
2603
55e76b38
JH
2604 rcu_read_lock();
2605 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2606 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2607 rcu_read_unlock();
55e76b38 2608 return true;
87c8b28d 2609 }
55e76b38
JH
2610 }
2611 rcu_read_unlock();
2612
2613 return false;
2614}
2615
6bd32326 2616/* HCI command timer function */
65cc2b49 2617static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2618{
65cc2b49
MH
2619 struct hci_dev *hdev = container_of(work, struct hci_dev,
2620 cmd_timer.work);
6bd32326 2621
bda4f23a
AE
2622 if (hdev->sent_cmd) {
2623 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2624 u16 opcode = __le16_to_cpu(sent->opcode);
2625
2626 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2627 } else {
2628 BT_ERR("%s command tx timeout", hdev->name);
2629 }
2630
6bd32326 2631 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2632 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2633}
2634
2763eda6 2635struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2636 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2637{
2638 struct oob_data *data;
2639
6928a924
JH
2640 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2641 if (bacmp(bdaddr, &data->bdaddr) != 0)
2642 continue;
2643 if (data->bdaddr_type != bdaddr_type)
2644 continue;
2645 return data;
2646 }
2763eda6
SJ
2647
2648 return NULL;
2649}
2650
6928a924
JH
2651int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2652 u8 bdaddr_type)
2763eda6
SJ
2653{
2654 struct oob_data *data;
2655
6928a924 2656 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2657 if (!data)
2658 return -ENOENT;
2659
6928a924 2660 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2661
2662 list_del(&data->list);
2663 kfree(data);
2664
2665 return 0;
2666}
2667
35f7498a 2668void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2669{
2670 struct oob_data *data, *n;
2671
2672 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2673 list_del(&data->list);
2674 kfree(data);
2675 }
2763eda6
SJ
2676}
2677
0798872e 2678int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2679 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2680 u8 *hash256, u8 *rand256)
2763eda6
SJ
2681{
2682 struct oob_data *data;
2683
6928a924 2684 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2685 if (!data) {
0a14ab41 2686 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2687 if (!data)
2688 return -ENOMEM;
2689
2690 bacpy(&data->bdaddr, bdaddr);
6928a924 2691 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2692 list_add(&data->list, &hdev->remote_oob_data);
2693 }
2694
81328d5c
JH
2695 if (hash192 && rand192) {
2696 memcpy(data->hash192, hash192, sizeof(data->hash192));
2697 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2698 if (hash256 && rand256)
2699 data->present = 0x03;
81328d5c
JH
2700 } else {
2701 memset(data->hash192, 0, sizeof(data->hash192));
2702 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2703 if (hash256 && rand256)
2704 data->present = 0x02;
2705 else
2706 data->present = 0x00;
0798872e
MH
2707 }
2708
81328d5c
JH
2709 if (hash256 && rand256) {
2710 memcpy(data->hash256, hash256, sizeof(data->hash256));
2711 memcpy(data->rand256, rand256, sizeof(data->rand256));
2712 } else {
2713 memset(data->hash256, 0, sizeof(data->hash256));
2714 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2715 if (hash192 && rand192)
2716 data->present = 0x01;
81328d5c 2717 }
0798872e 2718
6ed93dc6 2719 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2720
2721 return 0;
2722}
2723
d2609b34
FG
2724/* This function requires the caller holds hdev->lock */
2725struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2726{
2727 struct adv_info *adv_instance;
2728
2729 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2730 if (adv_instance->instance == instance)
2731 return adv_instance;
2732 }
2733
2734 return NULL;
2735}
2736
2737/* This function requires the caller holds hdev->lock */
2738struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2739 struct adv_info *cur_instance;
2740
2741 cur_instance = hci_find_adv_instance(hdev, instance);
2742 if (!cur_instance)
2743 return NULL;
2744
2745 if (cur_instance == list_last_entry(&hdev->adv_instances,
2746 struct adv_info, list))
2747 return list_first_entry(&hdev->adv_instances,
2748 struct adv_info, list);
2749 else
2750 return list_next_entry(cur_instance, list);
2751}
2752
2753/* This function requires the caller holds hdev->lock */
2754int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2755{
2756 struct adv_info *adv_instance;
2757
2758 adv_instance = hci_find_adv_instance(hdev, instance);
2759 if (!adv_instance)
2760 return -ENOENT;
2761
2762 BT_DBG("%s removing %dMR", hdev->name, instance);
2763
5d900e46
FG
2764 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2765 cancel_delayed_work(&hdev->adv_instance_expire);
2766 hdev->adv_instance_timeout = 0;
2767 }
2768
d2609b34
FG
2769 list_del(&adv_instance->list);
2770 kfree(adv_instance);
2771
2772 hdev->adv_instance_cnt--;
2773
2774 return 0;
2775}
2776
2777/* This function requires the caller holds hdev->lock */
2778void hci_adv_instances_clear(struct hci_dev *hdev)
2779{
2780 struct adv_info *adv_instance, *n;
2781
5d900e46
FG
2782 if (hdev->adv_instance_timeout) {
2783 cancel_delayed_work(&hdev->adv_instance_expire);
2784 hdev->adv_instance_timeout = 0;
2785 }
2786
d2609b34
FG
2787 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2788 list_del(&adv_instance->list);
2789 kfree(adv_instance);
2790 }
2791
2792 hdev->adv_instance_cnt = 0;
2793}
2794
2795/* This function requires the caller holds hdev->lock */
2796int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2797 u16 adv_data_len, u8 *adv_data,
2798 u16 scan_rsp_len, u8 *scan_rsp_data,
2799 u16 timeout, u16 duration)
2800{
2801 struct adv_info *adv_instance;
2802
2803 adv_instance = hci_find_adv_instance(hdev, instance);
2804 if (adv_instance) {
2805 memset(adv_instance->adv_data, 0,
2806 sizeof(adv_instance->adv_data));
2807 memset(adv_instance->scan_rsp_data, 0,
2808 sizeof(adv_instance->scan_rsp_data));
2809 } else {
2810 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2811 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2812 return -EOVERFLOW;
2813
39ecfad6 2814 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2815 if (!adv_instance)
2816 return -ENOMEM;
2817
fffd38bc 2818 adv_instance->pending = true;
d2609b34
FG
2819 adv_instance->instance = instance;
2820 list_add(&adv_instance->list, &hdev->adv_instances);
2821 hdev->adv_instance_cnt++;
2822 }
2823
2824 adv_instance->flags = flags;
2825 adv_instance->adv_data_len = adv_data_len;
2826 adv_instance->scan_rsp_len = scan_rsp_len;
2827
2828 if (adv_data_len)
2829 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2830
2831 if (scan_rsp_len)
2832 memcpy(adv_instance->scan_rsp_data,
2833 scan_rsp_data, scan_rsp_len);
2834
2835 adv_instance->timeout = timeout;
5d900e46 2836 adv_instance->remaining_time = timeout;
d2609b34
FG
2837
2838 if (duration == 0)
2839 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2840 else
2841 adv_instance->duration = duration;
2842
2843 BT_DBG("%s for %dMR", hdev->name, instance);
2844
2845 return 0;
2846}
2847
dcc36c16 2848struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2849 bdaddr_t *bdaddr, u8 type)
b2a66aad 2850{
8035ded4 2851 struct bdaddr_list *b;
b2a66aad 2852
dcc36c16 2853 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2854 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2855 return b;
b9ee0a78 2856 }
b2a66aad
AJ
2857
2858 return NULL;
2859}
2860
dcc36c16 2861void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
2862{
2863 struct list_head *p, *n;
2864
dcc36c16 2865 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 2866 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2867
2868 list_del(p);
2869 kfree(b);
2870 }
b2a66aad
AJ
2871}
2872
dcc36c16 2873int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2874{
2875 struct bdaddr_list *entry;
b2a66aad 2876
b9ee0a78 2877 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2878 return -EBADF;
2879
dcc36c16 2880 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2881 return -EEXIST;
b2a66aad 2882
27f70f3e 2883 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2884 if (!entry)
2885 return -ENOMEM;
b2a66aad
AJ
2886
2887 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2888 entry->bdaddr_type = type;
b2a66aad 2889
dcc36c16 2890 list_add(&entry->list, list);
b2a66aad 2891
2a8357f2 2892 return 0;
b2a66aad
AJ
2893}
2894
dcc36c16 2895int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2896{
2897 struct bdaddr_list *entry;
b2a66aad 2898
35f7498a 2899 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2900 hci_bdaddr_list_clear(list);
35f7498a
JH
2901 return 0;
2902 }
b2a66aad 2903
dcc36c16 2904 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2905 if (!entry)
2906 return -ENOENT;
2907
2908 list_del(&entry->list);
2909 kfree(entry);
2910
2911 return 0;
2912}
2913
15819a70
AG
2914/* This function requires the caller holds hdev->lock */
2915struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2916 bdaddr_t *addr, u8 addr_type)
2917{
2918 struct hci_conn_params *params;
2919
2920 list_for_each_entry(params, &hdev->le_conn_params, list) {
2921 if (bacmp(&params->addr, addr) == 0 &&
2922 params->addr_type == addr_type) {
2923 return params;
2924 }
2925 }
2926
2927 return NULL;
2928}
2929
4b10966f 2930/* This function requires the caller holds hdev->lock */
501f8827
JH
2931struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2932 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2933{
912b42ef 2934 struct hci_conn_params *param;
a9b0a04c 2935
501f8827 2936 list_for_each_entry(param, list, action) {
912b42ef
JH
2937 if (bacmp(&param->addr, addr) == 0 &&
2938 param->addr_type == addr_type)
2939 return param;
4b10966f
MH
2940 }
2941
2942 return NULL;
a9b0a04c
AG
2943}
2944
15819a70 2945/* This function requires the caller holds hdev->lock */
51d167c0
MH
2946struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2947 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2948{
2949 struct hci_conn_params *params;
2950
2951 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2952 if (params)
51d167c0 2953 return params;
15819a70
AG
2954
2955 params = kzalloc(sizeof(*params), GFP_KERNEL);
2956 if (!params) {
2957 BT_ERR("Out of memory");
51d167c0 2958 return NULL;
15819a70
AG
2959 }
2960
2961 bacpy(&params->addr, addr);
2962 params->addr_type = addr_type;
cef952ce
AG
2963
2964 list_add(&params->list, &hdev->le_conn_params);
93450c75 2965 INIT_LIST_HEAD(&params->action);
cef952ce 2966
bf5b3c8b
MH
2967 params->conn_min_interval = hdev->le_conn_min_interval;
2968 params->conn_max_interval = hdev->le_conn_max_interval;
2969 params->conn_latency = hdev->le_conn_latency;
2970 params->supervision_timeout = hdev->le_supv_timeout;
2971 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2972
2973 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2974
51d167c0 2975 return params;
bf5b3c8b
MH
2976}
2977
f6c63249 2978static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2979{
f8aaf9b6 2980 if (params->conn) {
f161dd41 2981 hci_conn_drop(params->conn);
f8aaf9b6
JH
2982 hci_conn_put(params->conn);
2983 }
f161dd41 2984
95305baa 2985 list_del(&params->action);
15819a70
AG
2986 list_del(&params->list);
2987 kfree(params);
f6c63249
JH
2988}
2989
2990/* This function requires the caller holds hdev->lock */
2991void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2992{
2993 struct hci_conn_params *params;
2994
2995 params = hci_conn_params_lookup(hdev, addr, addr_type);
2996 if (!params)
2997 return;
2998
2999 hci_conn_params_free(params);
15819a70 3000
95305baa
JH
3001 hci_update_background_scan(hdev);
3002
15819a70
AG
3003 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3004}
3005
3006/* This function requires the caller holds hdev->lock */
55af49a8 3007void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3008{
3009 struct hci_conn_params *params, *tmp;
3010
3011 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3012 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3013 continue;
f75113a2
JP
3014
3015 /* If trying to estabilish one time connection to disabled
3016 * device, leave the params, but mark them as just once.
3017 */
3018 if (params->explicit_connect) {
3019 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3020 continue;
3021 }
3022
15819a70
AG
3023 list_del(&params->list);
3024 kfree(params);
3025 }
3026
55af49a8 3027 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3028}
3029
3030/* This function requires the caller holds hdev->lock */
373110c5 3031void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3032{
15819a70 3033 struct hci_conn_params *params, *tmp;
77a77a30 3034
f6c63249
JH
3035 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3036 hci_conn_params_free(params);
77a77a30 3037
a4790dbd 3038 hci_update_background_scan(hdev);
77a77a30 3039
15819a70 3040 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3041}
3042
1904a853 3043static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7ba8b4be 3044{
4c87eaab
AG
3045 if (status) {
3046 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3047
4c87eaab
AG
3048 hci_dev_lock(hdev);
3049 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3050 hci_dev_unlock(hdev);
3051 return;
3052 }
7ba8b4be
AG
3053}
3054
1904a853
MH
3055static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3056 u16 opcode)
7ba8b4be 3057{
4c87eaab
AG
3058 /* General inquiry access code (GIAC) */
3059 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4c87eaab 3060 struct hci_cp_inquiry cp;
7ba8b4be
AG
3061 int err;
3062
4c87eaab
AG
3063 if (status) {
3064 BT_ERR("Failed to disable LE scanning: status %d", status);
3065 return;
3066 }
7ba8b4be 3067
2d28cfe7
JP
3068 hdev->discovery.scan_start = 0;
3069
4c87eaab
AG
3070 switch (hdev->discovery.type) {
3071 case DISCOV_TYPE_LE:
3072 hci_dev_lock(hdev);
3073 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3074 hci_dev_unlock(hdev);
3075 break;
7ba8b4be 3076
4c87eaab 3077 case DISCOV_TYPE_INTERLEAVED:
4c87eaab 3078 hci_dev_lock(hdev);
7dbfac1d 3079
07d2334a
JP
3080 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3081 &hdev->quirks)) {
3082 /* If we were running LE only scan, change discovery
3083 * state. If we were running both LE and BR/EDR inquiry
3084 * simultaneously, and BR/EDR inquiry is already
3085 * finished, stop discovery, otherwise BR/EDR inquiry
177d0506
WK
3086 * will stop discovery when finished. If we will resolve
3087 * remote device name, do not change discovery state.
07d2334a 3088 */
177d0506
WK
3089 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3090 hdev->discovery.state != DISCOVERY_RESOLVING)
07d2334a
JP
3091 hci_discovery_set_state(hdev,
3092 DISCOVERY_STOPPED);
3093 } else {
baf880a9
JH
3094 struct hci_request req;
3095
07d2334a
JP
3096 hci_inquiry_cache_flush(hdev);
3097
baf880a9
JH
3098 hci_req_init(&req, hdev);
3099
3100 memset(&cp, 0, sizeof(cp));
3101 memcpy(&cp.lap, lap, sizeof(cp.lap));
3102 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3103 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3104
07d2334a
JP
3105 err = hci_req_run(&req, inquiry_complete);
3106 if (err) {
3107 BT_ERR("Inquiry request failed: err %d", err);
3108 hci_discovery_set_state(hdev,
3109 DISCOVERY_STOPPED);
3110 }
4c87eaab 3111 }
7dbfac1d 3112
4c87eaab
AG
3113 hci_dev_unlock(hdev);
3114 break;
7dbfac1d 3115 }
7dbfac1d
AG
3116}
3117
7ba8b4be
AG
3118static void le_scan_disable_work(struct work_struct *work)
3119{
3120 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3121 le_scan_disable.work);
4c87eaab
AG
3122 struct hci_request req;
3123 int err;
7ba8b4be
AG
3124
3125 BT_DBG("%s", hdev->name);
3126
2d28cfe7
JP
3127 cancel_delayed_work_sync(&hdev->le_scan_restart);
3128
4c87eaab 3129 hci_req_init(&req, hdev);
28b75a89 3130
b1efcc28 3131 hci_req_add_le_scan_disable(&req);
28b75a89 3132
4c87eaab
AG
3133 err = hci_req_run(&req, le_scan_disable_work_complete);
3134 if (err)
3135 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3136}
3137
2d28cfe7
JP
3138static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3139 u16 opcode)
3140{
3141 unsigned long timeout, duration, scan_start, now;
3142
3143 BT_DBG("%s", hdev->name);
3144
3145 if (status) {
3146 BT_ERR("Failed to restart LE scan: status %d", status);
3147 return;
3148 }
3149
3150 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3151 !hdev->discovery.scan_start)
3152 return;
3153
3154 /* When the scan was started, hdev->le_scan_disable has been queued
3155 * after duration from scan_start. During scan restart this job
3156 * has been canceled, and we need to queue it again after proper
3157 * timeout, to make sure that scan does not run indefinitely.
3158 */
3159 duration = hdev->discovery.scan_duration;
3160 scan_start = hdev->discovery.scan_start;
3161 now = jiffies;
3162 if (now - scan_start <= duration) {
3163 int elapsed;
3164
3165 if (now >= scan_start)
3166 elapsed = now - scan_start;
3167 else
3168 elapsed = ULONG_MAX - scan_start + now;
3169
3170 timeout = duration - elapsed;
3171 } else {
3172 timeout = 0;
3173 }
3174 queue_delayed_work(hdev->workqueue,
3175 &hdev->le_scan_disable, timeout);
3176}
3177
3178static void le_scan_restart_work(struct work_struct *work)
3179{
3180 struct hci_dev *hdev = container_of(work, struct hci_dev,
3181 le_scan_restart.work);
3182 struct hci_request req;
3183 struct hci_cp_le_set_scan_enable cp;
3184 int err;
3185
3186 BT_DBG("%s", hdev->name);
3187
3188 /* If controller is not scanning we are done. */
d7a5a11d 3189 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2d28cfe7
JP
3190 return;
3191
3192 hci_req_init(&req, hdev);
3193
3194 hci_req_add_le_scan_disable(&req);
3195
3196 memset(&cp, 0, sizeof(cp));
3197 cp.enable = LE_SCAN_ENABLE;
3198 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3199 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3200
3201 err = hci_req_run(&req, le_scan_restart_work_complete);
3202 if (err)
3203 BT_ERR("Restart LE scan request failed: err %d", err);
3204}
3205
a1f4c318
JH
3206/* Copy the Identity Address of the controller.
3207 *
3208 * If the controller has a public BD_ADDR, then by default use that one.
3209 * If this is a LE only controller without a public address, default to
3210 * the static random address.
3211 *
3212 * For debugging purposes it is possible to force controllers with a
3213 * public address to use the static random address instead.
50b5b952
MH
3214 *
3215 * In case BR/EDR has been disabled on a dual-mode controller and
3216 * userspace has configured a static address, then that address
3217 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
3218 */
3219void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3220 u8 *bdaddr_type)
3221{
b7cb93e5 3222 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 3223 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 3224 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 3225 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
3226 bacpy(bdaddr, &hdev->static_addr);
3227 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3228 } else {
3229 bacpy(bdaddr, &hdev->bdaddr);
3230 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3231 }
3232}
3233
9be0dab7
DH
3234/* Alloc HCI device */
3235struct hci_dev *hci_alloc_dev(void)
3236{
3237 struct hci_dev *hdev;
3238
27f70f3e 3239 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3240 if (!hdev)
3241 return NULL;
3242
b1b813d4
DH
3243 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3244 hdev->esco_type = (ESCO_HV1);
3245 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3246 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3247 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3248 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3249 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3250 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
3251 hdev->adv_instance_cnt = 0;
3252 hdev->cur_adv_instance = 0x00;
5d900e46 3253 hdev->adv_instance_timeout = 0;
b1b813d4 3254
b1b813d4
DH
3255 hdev->sniff_max_interval = 800;
3256 hdev->sniff_min_interval = 80;
3257
3f959d46 3258 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3259 hdev->le_adv_min_interval = 0x0800;
3260 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3261 hdev->le_scan_interval = 0x0060;
3262 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3263 hdev->le_conn_min_interval = 0x0028;
3264 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3265 hdev->le_conn_latency = 0x0000;
3266 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3267 hdev->le_def_tx_len = 0x001b;
3268 hdev->le_def_tx_time = 0x0148;
3269 hdev->le_max_tx_len = 0x001b;
3270 hdev->le_max_tx_time = 0x0148;
3271 hdev->le_max_rx_len = 0x001b;
3272 hdev->le_max_rx_time = 0x0148;
bef64738 3273
d6bfd59c 3274 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3275 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3276 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3277 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3278
b1b813d4
DH
3279 mutex_init(&hdev->lock);
3280 mutex_init(&hdev->req_lock);
3281
3282 INIT_LIST_HEAD(&hdev->mgmt_pending);
3283 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3284 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3285 INIT_LIST_HEAD(&hdev->uuids);
3286 INIT_LIST_HEAD(&hdev->link_keys);
3287 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3288 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3289 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3290 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3291 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3292 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3293 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3294 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3295 INIT_LIST_HEAD(&hdev->adv_instances);
b1b813d4
DH
3296
3297 INIT_WORK(&hdev->rx_work, hci_rx_work);
3298 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3299 INIT_WORK(&hdev->tx_work, hci_tx_work);
3300 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3301 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 3302
b1b813d4
DH
3303 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3304 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3305 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2d28cfe7 3306 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
5d900e46 3307 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
b1b813d4 3308
b1b813d4
DH
3309 skb_queue_head_init(&hdev->rx_q);
3310 skb_queue_head_init(&hdev->cmd_q);
3311 skb_queue_head_init(&hdev->raw_q);
3312
3313 init_waitqueue_head(&hdev->req_wait_q);
3314
65cc2b49 3315 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3316
b1b813d4
DH
3317 hci_init_sysfs(hdev);
3318 discovery_init(hdev);
9be0dab7
DH
3319
3320 return hdev;
3321}
3322EXPORT_SYMBOL(hci_alloc_dev);
3323
3324/* Free HCI device */
3325void hci_free_dev(struct hci_dev *hdev)
3326{
9be0dab7
DH
3327 /* will free via device release */
3328 put_device(&hdev->dev);
3329}
3330EXPORT_SYMBOL(hci_free_dev);
3331
1da177e4
LT
3332/* Register HCI device */
3333int hci_register_dev(struct hci_dev *hdev)
3334{
b1b813d4 3335 int id, error;
1da177e4 3336
74292d5a 3337 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3338 return -EINVAL;
3339
08add513
MM
3340 /* Do not allow HCI_AMP devices to register at index 0,
3341 * so the index can be used as the AMP controller ID.
3342 */
3df92b31
SL
3343 switch (hdev->dev_type) {
3344 case HCI_BREDR:
3345 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3346 break;
3347 case HCI_AMP:
3348 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3349 break;
3350 default:
3351 return -EINVAL;
1da177e4 3352 }
8e87d142 3353
3df92b31
SL
3354 if (id < 0)
3355 return id;
3356
1da177e4
LT
3357 sprintf(hdev->name, "hci%d", id);
3358 hdev->id = id;
2d8b3a11
AE
3359
3360 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3361
d8537548
KC
3362 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3363 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3364 if (!hdev->workqueue) {
3365 error = -ENOMEM;
3366 goto err;
3367 }
f48fd9c8 3368
d8537548
KC
3369 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3370 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3371 if (!hdev->req_workqueue) {
3372 destroy_workqueue(hdev->workqueue);
3373 error = -ENOMEM;
3374 goto err;
3375 }
3376
0153e2ec
MH
3377 if (!IS_ERR_OR_NULL(bt_debugfs))
3378 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3379
bdc3e0f1
MH
3380 dev_set_name(&hdev->dev, "%s", hdev->name);
3381
3382 error = device_add(&hdev->dev);
33ca954d 3383 if (error < 0)
54506918 3384 goto err_wqueue;
1da177e4 3385
611b30f7 3386 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3387 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3388 hdev);
611b30f7
MH
3389 if (hdev->rfkill) {
3390 if (rfkill_register(hdev->rfkill) < 0) {
3391 rfkill_destroy(hdev->rfkill);
3392 hdev->rfkill = NULL;
3393 }
3394 }
3395
5e130367 3396 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3397 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3398
a1536da2
MH
3399 hci_dev_set_flag(hdev, HCI_SETUP);
3400 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3401
01cd3404 3402 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3403 /* Assume BR/EDR support until proven otherwise (such as
3404 * through reading supported features during init.
3405 */
a1536da2 3406 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3407 }
ce2be9ac 3408
fcee3377
GP
3409 write_lock(&hci_dev_list_lock);
3410 list_add(&hdev->list, &hci_dev_list);
3411 write_unlock(&hci_dev_list_lock);
3412
4a964404
MH
3413 /* Devices that are marked for raw-only usage are unconfigured
3414 * and should not be included in normal operation.
fee746b0
MH
3415 */
3416 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3417 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3418
05fcd4c4 3419 hci_sock_dev_event(hdev, HCI_DEV_REG);
dc946bd8 3420 hci_dev_hold(hdev);
1da177e4 3421
19202573 3422 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3423
1da177e4 3424 return id;
f48fd9c8 3425
33ca954d
DH
3426err_wqueue:
3427 destroy_workqueue(hdev->workqueue);
6ead1bbc 3428 destroy_workqueue(hdev->req_workqueue);
33ca954d 3429err:
3df92b31 3430 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3431
33ca954d 3432 return error;
1da177e4
LT
3433}
3434EXPORT_SYMBOL(hci_register_dev);
3435
3436/* Unregister HCI device */
59735631 3437void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3438{
2d7cc19e 3439 int id;
ef222013 3440
c13854ce 3441 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3442
a1536da2 3443 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3444
3df92b31
SL
3445 id = hdev->id;
3446
f20d09d5 3447 write_lock(&hci_dev_list_lock);
1da177e4 3448 list_del(&hdev->list);
f20d09d5 3449 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3450
3451 hci_dev_do_close(hdev);
3452
b9b5ef18
GP
3453 cancel_work_sync(&hdev->power_on);
3454
ab81cbf9 3455 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3456 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3457 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3458 hci_dev_lock(hdev);
744cf19e 3459 mgmt_index_removed(hdev);
09fd0de5 3460 hci_dev_unlock(hdev);
56e5cb86 3461 }
ab81cbf9 3462
2e58ef3e
JH
3463 /* mgmt_index_removed should take care of emptying the
3464 * pending list */
3465 BUG_ON(!list_empty(&hdev->mgmt_pending));
3466
05fcd4c4 3467 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
1da177e4 3468
611b30f7
MH
3469 if (hdev->rfkill) {
3470 rfkill_unregister(hdev->rfkill);
3471 rfkill_destroy(hdev->rfkill);
3472 }
3473
bdc3e0f1 3474 device_del(&hdev->dev);
147e2d59 3475
0153e2ec
MH
3476 debugfs_remove_recursive(hdev->debugfs);
3477
f48fd9c8 3478 destroy_workqueue(hdev->workqueue);
6ead1bbc 3479 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3480
09fd0de5 3481 hci_dev_lock(hdev);
dcc36c16 3482 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3483 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3484 hci_uuids_clear(hdev);
55ed8ca1 3485 hci_link_keys_clear(hdev);
b899efaf 3486 hci_smp_ltks_clear(hdev);
970c4e46 3487 hci_smp_irks_clear(hdev);
2763eda6 3488 hci_remote_oob_data_clear(hdev);
d2609b34 3489 hci_adv_instances_clear(hdev);
dcc36c16 3490 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 3491 hci_conn_params_clear_all(hdev);
22078800 3492 hci_discovery_filter_clear(hdev);
09fd0de5 3493 hci_dev_unlock(hdev);
e2e0cacb 3494
dc946bd8 3495 hci_dev_put(hdev);
3df92b31
SL
3496
3497 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3498}
3499EXPORT_SYMBOL(hci_unregister_dev);
3500
3501/* Suspend HCI device */
3502int hci_suspend_dev(struct hci_dev *hdev)
3503{
05fcd4c4 3504 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
1da177e4
LT
3505 return 0;
3506}
3507EXPORT_SYMBOL(hci_suspend_dev);
3508
3509/* Resume HCI device */
3510int hci_resume_dev(struct hci_dev *hdev)
3511{
05fcd4c4 3512 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
1da177e4
LT
3513 return 0;
3514}
3515EXPORT_SYMBOL(hci_resume_dev);
3516
75e0569f
MH
3517/* Reset HCI device */
3518int hci_reset_dev(struct hci_dev *hdev)
3519{
3520 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3521 struct sk_buff *skb;
3522
3523 skb = bt_skb_alloc(3, GFP_ATOMIC);
3524 if (!skb)
3525 return -ENOMEM;
3526
3527 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3528 memcpy(skb_put(skb, 3), hw_err, 3);
3529
3530 /* Send Hardware Error to upper stack */
3531 return hci_recv_frame(hdev, skb);
3532}
3533EXPORT_SYMBOL(hci_reset_dev);
3534
76bca880 3535/* Receive frame from HCI drivers */
e1a26170 3536int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3537{
76bca880 3538 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3539 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3540 kfree_skb(skb);
3541 return -ENXIO;
3542 }
3543
fe806dce
MH
3544 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3545 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3546 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3547 kfree_skb(skb);
3548 return -EINVAL;
3549 }
3550
d82603c6 3551 /* Incoming skb */
76bca880
MH
3552 bt_cb(skb)->incoming = 1;
3553
3554 /* Time stamp */
3555 __net_timestamp(skb);
3556
76bca880 3557 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3558 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3559
76bca880
MH
3560 return 0;
3561}
3562EXPORT_SYMBOL(hci_recv_frame);
3563
e875ff84
MH
3564/* Receive diagnostic message from HCI drivers */
3565int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3566{
581d6fd6
MH
3567 /* Mark as diagnostic packet */
3568 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3569
e875ff84
MH
3570 /* Time stamp */
3571 __net_timestamp(skb);
3572
581d6fd6
MH
3573 skb_queue_tail(&hdev->rx_q, skb);
3574 queue_work(hdev->workqueue, &hdev->rx_work);
e875ff84 3575
e875ff84
MH
3576 return 0;
3577}
3578EXPORT_SYMBOL(hci_recv_diag);
3579
1da177e4
LT
3580/* ---- Interface to upper protocols ---- */
3581
1da177e4
LT
3582int hci_register_cb(struct hci_cb *cb)
3583{
3584 BT_DBG("%p name %s", cb, cb->name);
3585
fba7ecf0 3586 mutex_lock(&hci_cb_list_lock);
00629e0f 3587 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3588 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3589
3590 return 0;
3591}
3592EXPORT_SYMBOL(hci_register_cb);
3593
3594int hci_unregister_cb(struct hci_cb *cb)
3595{
3596 BT_DBG("%p name %s", cb, cb->name);
3597
fba7ecf0 3598 mutex_lock(&hci_cb_list_lock);
1da177e4 3599 list_del(&cb->list);
fba7ecf0 3600 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3601
3602 return 0;
3603}
3604EXPORT_SYMBOL(hci_unregister_cb);
3605
51086991 3606static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3607{
cdc52faa
MH
3608 int err;
3609
0d48d939 3610 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3611
cd82e61c
MH
3612 /* Time stamp */
3613 __net_timestamp(skb);
1da177e4 3614
cd82e61c
MH
3615 /* Send copy to monitor */
3616 hci_send_to_monitor(hdev, skb);
3617
3618 if (atomic_read(&hdev->promisc)) {
3619 /* Send copy to the sockets */
470fe1b5 3620 hci_send_to_sock(hdev, skb);
1da177e4
LT
3621 }
3622
3623 /* Get rid of skb owner, prior to sending to the driver. */
3624 skb_orphan(skb);
3625
73d0d3c8
MH
3626 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3627 kfree_skb(skb);
3628 return;
3629 }
3630
cdc52faa
MH
3631 err = hdev->send(hdev, skb);
3632 if (err < 0) {
3633 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3634 kfree_skb(skb);
3635 }
1da177e4
LT
3636}
3637
1ca3a9d0 3638/* Send HCI command */
07dc93dd
JH
3639int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3640 const void *param)
1ca3a9d0
JH
3641{
3642 struct sk_buff *skb;
3643
3644 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3645
3646 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3647 if (!skb) {
3648 BT_ERR("%s no memory for command", hdev->name);
3649 return -ENOMEM;
3650 }
3651
49c922bb 3652 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3653 * single-command requests.
3654 */
242c0ebd 3655 bt_cb(skb)->hci.req_start = true;
11714b3d 3656
1da177e4 3657 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3658 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3659
3660 return 0;
3661}
1da177e4
LT
3662
3663/* Get data from the previously sent command */
a9de9248 3664void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3665{
3666 struct hci_command_hdr *hdr;
3667
3668 if (!hdev->sent_cmd)
3669 return NULL;
3670
3671 hdr = (void *) hdev->sent_cmd->data;
3672
a9de9248 3673 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3674 return NULL;
3675
f0e09510 3676 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3677
3678 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3679}
3680
fbef168f
LP
3681/* Send HCI command and wait for command commplete event */
3682struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3683 const void *param, u32 timeout)
3684{
3685 struct sk_buff *skb;
3686
3687 if (!test_bit(HCI_UP, &hdev->flags))
3688 return ERR_PTR(-ENETDOWN);
3689
3690 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3691
3692 hci_req_lock(hdev);
3693 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3694 hci_req_unlock(hdev);
3695
3696 return skb;
3697}
3698EXPORT_SYMBOL(hci_cmd_sync);
3699
1da177e4
LT
3700/* Send ACL data */
3701static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3702{
3703 struct hci_acl_hdr *hdr;
3704 int len = skb->len;
3705
badff6d0
ACM
3706 skb_push(skb, HCI_ACL_HDR_SIZE);
3707 skb_reset_transport_header(skb);
9c70220b 3708 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3709 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3710 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3711}
3712
ee22be7e 3713static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3714 struct sk_buff *skb, __u16 flags)
1da177e4 3715{
ee22be7e 3716 struct hci_conn *conn = chan->conn;
1da177e4
LT
3717 struct hci_dev *hdev = conn->hdev;
3718 struct sk_buff *list;
3719
087bfd99
GP
3720 skb->len = skb_headlen(skb);
3721 skb->data_len = 0;
3722
3723 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3724
3725 switch (hdev->dev_type) {
3726 case HCI_BREDR:
3727 hci_add_acl_hdr(skb, conn->handle, flags);
3728 break;
3729 case HCI_AMP:
3730 hci_add_acl_hdr(skb, chan->handle, flags);
3731 break;
3732 default:
3733 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3734 return;
3735 }
087bfd99 3736
70f23020
AE
3737 list = skb_shinfo(skb)->frag_list;
3738 if (!list) {
1da177e4
LT
3739 /* Non fragmented */
3740 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3741
73d80deb 3742 skb_queue_tail(queue, skb);
1da177e4
LT
3743 } else {
3744 /* Fragmented */
3745 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3746
3747 skb_shinfo(skb)->frag_list = NULL;
3748
9cfd5a23
JR
3749 /* Queue all fragments atomically. We need to use spin_lock_bh
3750 * here because of 6LoWPAN links, as there this function is
3751 * called from softirq and using normal spin lock could cause
3752 * deadlocks.
3753 */
3754 spin_lock_bh(&queue->lock);
1da177e4 3755
73d80deb 3756 __skb_queue_tail(queue, skb);
e702112f
AE
3757
3758 flags &= ~ACL_START;
3759 flags |= ACL_CONT;
1da177e4
LT
3760 do {
3761 skb = list; list = list->next;
8e87d142 3762
0d48d939 3763 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3764 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3765
3766 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3767
73d80deb 3768 __skb_queue_tail(queue, skb);
1da177e4
LT
3769 } while (list);
3770
9cfd5a23 3771 spin_unlock_bh(&queue->lock);
1da177e4 3772 }
73d80deb
LAD
3773}
3774
3775void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3776{
ee22be7e 3777 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3778
f0e09510 3779 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3780
ee22be7e 3781 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3782
3eff45ea 3783 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3784}
1da177e4
LT
3785
3786/* Send SCO data */
0d861d8b 3787void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3788{
3789 struct hci_dev *hdev = conn->hdev;
3790 struct hci_sco_hdr hdr;
3791
3792 BT_DBG("%s len %d", hdev->name, skb->len);
3793
aca3192c 3794 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3795 hdr.dlen = skb->len;
3796
badff6d0
ACM
3797 skb_push(skb, HCI_SCO_HDR_SIZE);
3798 skb_reset_transport_header(skb);
9c70220b 3799 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3800
0d48d939 3801 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3802
1da177e4 3803 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3804 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3805}
1da177e4
LT
3806
3807/* ---- HCI TX task (outgoing data) ---- */
3808
3809/* HCI Connection scheduler */
6039aa73
GP
3810static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3811 int *quote)
1da177e4
LT
3812{
3813 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3814 struct hci_conn *conn = NULL, *c;
abc5de8f 3815 unsigned int num = 0, min = ~0;
1da177e4 3816
8e87d142 3817 /* We don't have to lock device here. Connections are always
1da177e4 3818 * added and removed with TX task disabled. */
bf4c6325
GP
3819
3820 rcu_read_lock();
3821
3822 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3823 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3824 continue;
769be974
MH
3825
3826 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3827 continue;
3828
1da177e4
LT
3829 num++;
3830
3831 if (c->sent < min) {
3832 min = c->sent;
3833 conn = c;
3834 }
52087a79
LAD
3835
3836 if (hci_conn_num(hdev, type) == num)
3837 break;
1da177e4
LT
3838 }
3839
bf4c6325
GP
3840 rcu_read_unlock();
3841
1da177e4 3842 if (conn) {
6ed58ec5
VT
3843 int cnt, q;
3844
3845 switch (conn->type) {
3846 case ACL_LINK:
3847 cnt = hdev->acl_cnt;
3848 break;
3849 case SCO_LINK:
3850 case ESCO_LINK:
3851 cnt = hdev->sco_cnt;
3852 break;
3853 case LE_LINK:
3854 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3855 break;
3856 default:
3857 cnt = 0;
3858 BT_ERR("Unknown link type");
3859 }
3860
3861 q = cnt / num;
1da177e4
LT
3862 *quote = q ? q : 1;
3863 } else
3864 *quote = 0;
3865
3866 BT_DBG("conn %p quote %d", conn, *quote);
3867 return conn;
3868}
3869
6039aa73 3870static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3871{
3872 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3873 struct hci_conn *c;
1da177e4 3874
bae1f5d9 3875 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3876
bf4c6325
GP
3877 rcu_read_lock();
3878
1da177e4 3879 /* Kill stalled connections */
bf4c6325 3880 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3881 if (c->type == type && c->sent) {
6ed93dc6
AE
3882 BT_ERR("%s killing stalled connection %pMR",
3883 hdev->name, &c->dst);
bed71748 3884 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3885 }
3886 }
bf4c6325
GP
3887
3888 rcu_read_unlock();
1da177e4
LT
3889}
3890
6039aa73
GP
3891static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3892 int *quote)
1da177e4 3893{
73d80deb
LAD
3894 struct hci_conn_hash *h = &hdev->conn_hash;
3895 struct hci_chan *chan = NULL;
abc5de8f 3896 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3897 struct hci_conn *conn;
73d80deb
LAD
3898 int cnt, q, conn_num = 0;
3899
3900 BT_DBG("%s", hdev->name);
3901
bf4c6325
GP
3902 rcu_read_lock();
3903
3904 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3905 struct hci_chan *tmp;
3906
3907 if (conn->type != type)
3908 continue;
3909
3910 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3911 continue;
3912
3913 conn_num++;
3914
8192edef 3915 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3916 struct sk_buff *skb;
3917
3918 if (skb_queue_empty(&tmp->data_q))
3919 continue;
3920
3921 skb = skb_peek(&tmp->data_q);
3922 if (skb->priority < cur_prio)
3923 continue;
3924
3925 if (skb->priority > cur_prio) {
3926 num = 0;
3927 min = ~0;
3928 cur_prio = skb->priority;
3929 }
3930
3931 num++;
3932
3933 if (conn->sent < min) {
3934 min = conn->sent;
3935 chan = tmp;
3936 }
3937 }
3938
3939 if (hci_conn_num(hdev, type) == conn_num)
3940 break;
3941 }
3942
bf4c6325
GP
3943 rcu_read_unlock();
3944
73d80deb
LAD
3945 if (!chan)
3946 return NULL;
3947
3948 switch (chan->conn->type) {
3949 case ACL_LINK:
3950 cnt = hdev->acl_cnt;
3951 break;
bd1eb66b
AE
3952 case AMP_LINK:
3953 cnt = hdev->block_cnt;
3954 break;
73d80deb
LAD
3955 case SCO_LINK:
3956 case ESCO_LINK:
3957 cnt = hdev->sco_cnt;
3958 break;
3959 case LE_LINK:
3960 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3961 break;
3962 default:
3963 cnt = 0;
3964 BT_ERR("Unknown link type");
3965 }
3966
3967 q = cnt / num;
3968 *quote = q ? q : 1;
3969 BT_DBG("chan %p quote %d", chan, *quote);
3970 return chan;
3971}
3972
02b20f0b
LAD
3973static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3974{
3975 struct hci_conn_hash *h = &hdev->conn_hash;
3976 struct hci_conn *conn;
3977 int num = 0;
3978
3979 BT_DBG("%s", hdev->name);
3980
bf4c6325
GP
3981 rcu_read_lock();
3982
3983 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3984 struct hci_chan *chan;
3985
3986 if (conn->type != type)
3987 continue;
3988
3989 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3990 continue;
3991
3992 num++;
3993
8192edef 3994 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3995 struct sk_buff *skb;
3996
3997 if (chan->sent) {
3998 chan->sent = 0;
3999 continue;
4000 }
4001
4002 if (skb_queue_empty(&chan->data_q))
4003 continue;
4004
4005 skb = skb_peek(&chan->data_q);
4006 if (skb->priority >= HCI_PRIO_MAX - 1)
4007 continue;
4008
4009 skb->priority = HCI_PRIO_MAX - 1;
4010
4011 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4012 skb->priority);
02b20f0b
LAD
4013 }
4014
4015 if (hci_conn_num(hdev, type) == num)
4016 break;
4017 }
bf4c6325
GP
4018
4019 rcu_read_unlock();
4020
02b20f0b
LAD
4021}
4022
b71d385a
AE
4023static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4024{
4025 /* Calculate count of blocks used by this packet */
4026 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4027}
4028
6039aa73 4029static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4030{
d7a5a11d 4031 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
4032 /* ACL tx timeout must be longer than maximum
4033 * link supervision timeout (40.9 seconds) */
63d2bc1b 4034 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4035 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4036 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4037 }
63d2bc1b 4038}
1da177e4 4039
6039aa73 4040static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4041{
4042 unsigned int cnt = hdev->acl_cnt;
4043 struct hci_chan *chan;
4044 struct sk_buff *skb;
4045 int quote;
4046
4047 __check_timeout(hdev, cnt);
04837f64 4048
73d80deb 4049 while (hdev->acl_cnt &&
a8c5fb1a 4050 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4051 u32 priority = (skb_peek(&chan->data_q))->priority;
4052 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4053 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4054 skb->len, skb->priority);
73d80deb 4055
ec1cce24
LAD
4056 /* Stop if priority has changed */
4057 if (skb->priority < priority)
4058 break;
4059
4060 skb = skb_dequeue(&chan->data_q);
4061
73d80deb 4062 hci_conn_enter_active_mode(chan->conn,
04124681 4063 bt_cb(skb)->force_active);
04837f64 4064
57d17d70 4065 hci_send_frame(hdev, skb);
1da177e4
LT
4066 hdev->acl_last_tx = jiffies;
4067
4068 hdev->acl_cnt--;
73d80deb
LAD
4069 chan->sent++;
4070 chan->conn->sent++;
1da177e4
LT
4071 }
4072 }
02b20f0b
LAD
4073
4074 if (cnt != hdev->acl_cnt)
4075 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4076}
4077
6039aa73 4078static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4079{
63d2bc1b 4080 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4081 struct hci_chan *chan;
4082 struct sk_buff *skb;
4083 int quote;
bd1eb66b 4084 u8 type;
b71d385a 4085
63d2bc1b 4086 __check_timeout(hdev, cnt);
b71d385a 4087
bd1eb66b
AE
4088 BT_DBG("%s", hdev->name);
4089
4090 if (hdev->dev_type == HCI_AMP)
4091 type = AMP_LINK;
4092 else
4093 type = ACL_LINK;
4094
b71d385a 4095 while (hdev->block_cnt > 0 &&
bd1eb66b 4096 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4097 u32 priority = (skb_peek(&chan->data_q))->priority;
4098 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4099 int blocks;
4100
4101 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4102 skb->len, skb->priority);
b71d385a
AE
4103
4104 /* Stop if priority has changed */
4105 if (skb->priority < priority)
4106 break;
4107
4108 skb = skb_dequeue(&chan->data_q);
4109
4110 blocks = __get_blocks(hdev, skb);
4111 if (blocks > hdev->block_cnt)
4112 return;
4113
4114 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4115 bt_cb(skb)->force_active);
b71d385a 4116
57d17d70 4117 hci_send_frame(hdev, skb);
b71d385a
AE
4118 hdev->acl_last_tx = jiffies;
4119
4120 hdev->block_cnt -= blocks;
4121 quote -= blocks;
4122
4123 chan->sent += blocks;
4124 chan->conn->sent += blocks;
4125 }
4126 }
4127
4128 if (cnt != hdev->block_cnt)
bd1eb66b 4129 hci_prio_recalculate(hdev, type);
b71d385a
AE
4130}
4131
6039aa73 4132static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4133{
4134 BT_DBG("%s", hdev->name);
4135
bd1eb66b
AE
4136 /* No ACL link over BR/EDR controller */
4137 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4138 return;
4139
4140 /* No AMP link over AMP controller */
4141 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4142 return;
4143
4144 switch (hdev->flow_ctl_mode) {
4145 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4146 hci_sched_acl_pkt(hdev);
4147 break;
4148
4149 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4150 hci_sched_acl_blk(hdev);
4151 break;
4152 }
4153}
4154
1da177e4 4155/* Schedule SCO */
6039aa73 4156static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4157{
4158 struct hci_conn *conn;
4159 struct sk_buff *skb;
4160 int quote;
4161
4162 BT_DBG("%s", hdev->name);
4163
52087a79
LAD
4164 if (!hci_conn_num(hdev, SCO_LINK))
4165 return;
4166
1da177e4
LT
4167 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4168 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4169 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4170 hci_send_frame(hdev, skb);
1da177e4
LT
4171
4172 conn->sent++;
4173 if (conn->sent == ~0)
4174 conn->sent = 0;
4175 }
4176 }
4177}
4178
6039aa73 4179static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4180{
4181 struct hci_conn *conn;
4182 struct sk_buff *skb;
4183 int quote;
4184
4185 BT_DBG("%s", hdev->name);
4186
52087a79
LAD
4187 if (!hci_conn_num(hdev, ESCO_LINK))
4188 return;
4189
8fc9ced3
GP
4190 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4191 &quote))) {
b6a0dc82
MH
4192 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4193 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4194 hci_send_frame(hdev, skb);
b6a0dc82
MH
4195
4196 conn->sent++;
4197 if (conn->sent == ~0)
4198 conn->sent = 0;
4199 }
4200 }
4201}
4202
6039aa73 4203static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4204{
73d80deb 4205 struct hci_chan *chan;
6ed58ec5 4206 struct sk_buff *skb;
02b20f0b 4207 int quote, cnt, tmp;
6ed58ec5
VT
4208
4209 BT_DBG("%s", hdev->name);
4210
52087a79
LAD
4211 if (!hci_conn_num(hdev, LE_LINK))
4212 return;
4213
d7a5a11d 4214 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6ed58ec5
VT
4215 /* LE tx timeout must be longer than maximum
4216 * link supervision timeout (40.9 seconds) */
bae1f5d9 4217 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4218 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4219 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4220 }
4221
4222 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4223 tmp = cnt;
73d80deb 4224 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4225 u32 priority = (skb_peek(&chan->data_q))->priority;
4226 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4227 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4228 skb->len, skb->priority);
6ed58ec5 4229
ec1cce24
LAD
4230 /* Stop if priority has changed */
4231 if (skb->priority < priority)
4232 break;
4233
4234 skb = skb_dequeue(&chan->data_q);
4235
57d17d70 4236 hci_send_frame(hdev, skb);
6ed58ec5
VT
4237 hdev->le_last_tx = jiffies;
4238
4239 cnt--;
73d80deb
LAD
4240 chan->sent++;
4241 chan->conn->sent++;
6ed58ec5
VT
4242 }
4243 }
73d80deb 4244
6ed58ec5
VT
4245 if (hdev->le_pkts)
4246 hdev->le_cnt = cnt;
4247 else
4248 hdev->acl_cnt = cnt;
02b20f0b
LAD
4249
4250 if (cnt != tmp)
4251 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4252}
4253
3eff45ea 4254static void hci_tx_work(struct work_struct *work)
1da177e4 4255{
3eff45ea 4256 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4257 struct sk_buff *skb;
4258
6ed58ec5 4259 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4260 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4261
d7a5a11d 4262 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e
MH
4263 /* Schedule queues and send stuff to HCI driver */
4264 hci_sched_acl(hdev);
4265 hci_sched_sco(hdev);
4266 hci_sched_esco(hdev);
4267 hci_sched_le(hdev);
4268 }
6ed58ec5 4269
1da177e4
LT
4270 /* Send next queued raw (unknown type) packet */
4271 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4272 hci_send_frame(hdev, skb);
1da177e4
LT
4273}
4274
25985edc 4275/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4276
4277/* ACL data packet */
6039aa73 4278static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4279{
4280 struct hci_acl_hdr *hdr = (void *) skb->data;
4281 struct hci_conn *conn;
4282 __u16 handle, flags;
4283
4284 skb_pull(skb, HCI_ACL_HDR_SIZE);
4285
4286 handle = __le16_to_cpu(hdr->handle);
4287 flags = hci_flags(handle);
4288 handle = hci_handle(handle);
4289
f0e09510 4290 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4291 handle, flags);
1da177e4
LT
4292
4293 hdev->stat.acl_rx++;
4294
4295 hci_dev_lock(hdev);
4296 conn = hci_conn_hash_lookup_handle(hdev, handle);
4297 hci_dev_unlock(hdev);
8e87d142 4298
1da177e4 4299 if (conn) {
65983fc7 4300 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4301
1da177e4 4302 /* Send to upper protocol */
686ebf28
UF
4303 l2cap_recv_acldata(conn, skb, flags);
4304 return;
1da177e4 4305 } else {
8e87d142 4306 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4307 hdev->name, handle);
1da177e4
LT
4308 }
4309
4310 kfree_skb(skb);
4311}
4312
4313/* SCO data packet */
6039aa73 4314static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4315{
4316 struct hci_sco_hdr *hdr = (void *) skb->data;
4317 struct hci_conn *conn;
4318 __u16 handle;
4319
4320 skb_pull(skb, HCI_SCO_HDR_SIZE);
4321
4322 handle = __le16_to_cpu(hdr->handle);
4323
f0e09510 4324 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4325
4326 hdev->stat.sco_rx++;
4327
4328 hci_dev_lock(hdev);
4329 conn = hci_conn_hash_lookup_handle(hdev, handle);
4330 hci_dev_unlock(hdev);
4331
4332 if (conn) {
1da177e4 4333 /* Send to upper protocol */
686ebf28
UF
4334 sco_recv_scodata(conn, skb);
4335 return;
1da177e4 4336 } else {
8e87d142 4337 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4338 hdev->name, handle);
1da177e4
LT
4339 }
4340
4341 kfree_skb(skb);
4342}
4343
9238f36a
JH
4344static bool hci_req_is_complete(struct hci_dev *hdev)
4345{
4346 struct sk_buff *skb;
4347
4348 skb = skb_peek(&hdev->cmd_q);
4349 if (!skb)
4350 return true;
4351
242c0ebd 4352 return bt_cb(skb)->hci.req_start;
9238f36a
JH
4353}
4354
42c6b129
JH
4355static void hci_resend_last(struct hci_dev *hdev)
4356{
4357 struct hci_command_hdr *sent;
4358 struct sk_buff *skb;
4359 u16 opcode;
4360
4361 if (!hdev->sent_cmd)
4362 return;
4363
4364 sent = (void *) hdev->sent_cmd->data;
4365 opcode = __le16_to_cpu(sent->opcode);
4366 if (opcode == HCI_OP_RESET)
4367 return;
4368
4369 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4370 if (!skb)
4371 return;
4372
4373 skb_queue_head(&hdev->cmd_q, skb);
4374 queue_work(hdev->workqueue, &hdev->cmd_work);
4375}
4376
e6214487
JH
4377void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4378 hci_req_complete_t *req_complete,
4379 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4380{
9238f36a
JH
4381 struct sk_buff *skb;
4382 unsigned long flags;
4383
4384 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4385
42c6b129
JH
4386 /* If the completed command doesn't match the last one that was
4387 * sent we need to do special handling of it.
9238f36a 4388 */
42c6b129
JH
4389 if (!hci_sent_cmd_data(hdev, opcode)) {
4390 /* Some CSR based controllers generate a spontaneous
4391 * reset complete event during init and any pending
4392 * command will never be completed. In such a case we
4393 * need to resend whatever was the last sent
4394 * command.
4395 */
4396 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4397 hci_resend_last(hdev);
4398
9238f36a 4399 return;
42c6b129 4400 }
9238f36a
JH
4401
4402 /* If the command succeeded and there's still more commands in
4403 * this request the request is not yet complete.
4404 */
4405 if (!status && !hci_req_is_complete(hdev))
4406 return;
4407
4408 /* If this was the last command in a request the complete
4409 * callback would be found in hdev->sent_cmd instead of the
4410 * command queue (hdev->cmd_q).
4411 */
242c0ebd
MH
4412 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4413 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
e6214487
JH
4414 return;
4415 }
53e21fbc 4416
242c0ebd
MH
4417 if (bt_cb(hdev->sent_cmd)->hci.req_complete_skb) {
4418 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
e6214487 4419 return;
9238f36a
JH
4420 }
4421
4422 /* Remove all pending commands belonging to this request */
4423 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4424 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
242c0ebd 4425 if (bt_cb(skb)->hci.req_start) {
9238f36a
JH
4426 __skb_queue_head(&hdev->cmd_q, skb);
4427 break;
4428 }
4429
242c0ebd
MH
4430 *req_complete = bt_cb(skb)->hci.req_complete;
4431 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
9238f36a
JH
4432 kfree_skb(skb);
4433 }
4434 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4435}
4436
b78752cc 4437static void hci_rx_work(struct work_struct *work)
1da177e4 4438{
b78752cc 4439 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4440 struct sk_buff *skb;
4441
4442 BT_DBG("%s", hdev->name);
4443
1da177e4 4444 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4445 /* Send copy to monitor */
4446 hci_send_to_monitor(hdev, skb);
4447
1da177e4
LT
4448 if (atomic_read(&hdev->promisc)) {
4449 /* Send copy to the sockets */
470fe1b5 4450 hci_send_to_sock(hdev, skb);
1da177e4
LT
4451 }
4452
d7a5a11d 4453 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1da177e4
LT
4454 kfree_skb(skb);
4455 continue;
4456 }
4457
4458 if (test_bit(HCI_INIT, &hdev->flags)) {
4459 /* Don't process data packets in this states. */
0d48d939 4460 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4461 case HCI_ACLDATA_PKT:
4462 case HCI_SCODATA_PKT:
4463 kfree_skb(skb);
4464 continue;
3ff50b79 4465 }
1da177e4
LT
4466 }
4467
4468 /* Process frame */
0d48d939 4469 switch (bt_cb(skb)->pkt_type) {
1da177e4 4470 case HCI_EVENT_PKT:
b78752cc 4471 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4472 hci_event_packet(hdev, skb);
4473 break;
4474
4475 case HCI_ACLDATA_PKT:
4476 BT_DBG("%s ACL data packet", hdev->name);
4477 hci_acldata_packet(hdev, skb);
4478 break;
4479
4480 case HCI_SCODATA_PKT:
4481 BT_DBG("%s SCO data packet", hdev->name);
4482 hci_scodata_packet(hdev, skb);
4483 break;
4484
4485 default:
4486 kfree_skb(skb);
4487 break;
4488 }
4489 }
1da177e4
LT
4490}
4491
c347b765 4492static void hci_cmd_work(struct work_struct *work)
1da177e4 4493{
c347b765 4494 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4495 struct sk_buff *skb;
4496
2104786b
AE
4497 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4498 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4499
1da177e4 4500 /* Send queued commands */
5a08ecce
AE
4501 if (atomic_read(&hdev->cmd_cnt)) {
4502 skb = skb_dequeue(&hdev->cmd_q);
4503 if (!skb)
4504 return;
4505
7585b97a 4506 kfree_skb(hdev->sent_cmd);
1da177e4 4507
a675d7f1 4508 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4509 if (hdev->sent_cmd) {
1da177e4 4510 atomic_dec(&hdev->cmd_cnt);
57d17d70 4511 hci_send_frame(hdev, skb);
7bdb8a5c 4512 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4513 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4514 else
65cc2b49
MH
4515 schedule_delayed_work(&hdev->cmd_timer,
4516 HCI_CMD_TIMEOUT);
1da177e4
LT
4517 } else {
4518 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4519 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4520 }
4521 }
4522}