Bluetooth: Limit userspace exposure of stack internal events
[linux-block.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46
JH
42#include "smp.h"
43
b78752cc 44static void hci_rx_work(struct work_struct *work);
c347b765 45static void hci_cmd_work(struct work_struct *work);
3eff45ea 46static void hci_tx_work(struct work_struct *work);
1da177e4 47
1da177e4
LT
48/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
fba7ecf0 54DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 55
3df92b31
SL
56/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
899de765
MH
59/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
1da177e4
LT
68/* ---- HCI notifications ---- */
69
6516455d 70static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 71{
040030ef 72 hci_sock_dev_event(hdev, event);
1da177e4
LT
73}
74
baf27f6e
MH
75/* ---- HCI debugfs entries ---- */
76
4b4148e9
MH
77static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
b7cb93e5 83 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
4b4148e9
MH
84 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
4b4148e9
MH
97
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
100
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
103
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
107
b7cb93e5 108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
109 return -EALREADY;
110
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
119
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
122
4b4148e9
MH
123 kfree_skb(skb);
124
b7cb93e5 125 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
126
127 return count;
128}
129
130static const struct file_operations dut_mode_fops = {
131 .open = simple_open,
132 .read = dut_mode_read,
133 .write = dut_mode_write,
134 .llseek = default_llseek,
135};
136
1da177e4
LT
137/* ---- HCI requests ---- */
138
f60cb305
JH
139static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
140 struct sk_buff *skb)
1da177e4 141{
42c6b129 142 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
143
144 if (hdev->req_status == HCI_REQ_PEND) {
145 hdev->req_result = result;
146 hdev->req_status = HCI_REQ_DONE;
f60cb305
JH
147 if (skb)
148 hdev->req_skb = skb_get(skb);
1da177e4
LT
149 wake_up_interruptible(&hdev->req_wait_q);
150 }
151}
152
153static void hci_req_cancel(struct hci_dev *hdev, int err)
154{
155 BT_DBG("%s err 0x%2.2x", hdev->name, err);
156
157 if (hdev->req_status == HCI_REQ_PEND) {
158 hdev->req_result = err;
159 hdev->req_status = HCI_REQ_CANCELED;
160 wake_up_interruptible(&hdev->req_wait_q);
161 }
162}
163
7b1abbbe 164struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 165 const void *param, u8 event, u32 timeout)
75e84b7c
JH
166{
167 DECLARE_WAITQUEUE(wait, current);
168 struct hci_request req;
f60cb305 169 struct sk_buff *skb;
75e84b7c
JH
170 int err = 0;
171
172 BT_DBG("%s", hdev->name);
173
174 hci_req_init(&req, hdev);
175
7b1abbbe 176 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
177
178 hdev->req_status = HCI_REQ_PEND;
179
75e84b7c
JH
180 add_wait_queue(&hdev->req_wait_q, &wait);
181 set_current_state(TASK_INTERRUPTIBLE);
182
f60cb305 183 err = hci_req_run_skb(&req, hci_req_sync_complete);
039fada5
CP
184 if (err < 0) {
185 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 186 set_current_state(TASK_RUNNING);
039fada5
CP
187 return ERR_PTR(err);
188 }
189
75e84b7c
JH
190 schedule_timeout(timeout);
191
192 remove_wait_queue(&hdev->req_wait_q, &wait);
193
194 if (signal_pending(current))
195 return ERR_PTR(-EINTR);
196
197 switch (hdev->req_status) {
198 case HCI_REQ_DONE:
199 err = -bt_to_errno(hdev->req_result);
200 break;
201
202 case HCI_REQ_CANCELED:
203 err = -hdev->req_result;
204 break;
205
206 default:
207 err = -ETIMEDOUT;
208 break;
209 }
210
211 hdev->req_status = hdev->req_result = 0;
f60cb305
JH
212 skb = hdev->req_skb;
213 hdev->req_skb = NULL;
75e84b7c
JH
214
215 BT_DBG("%s end: err %d", hdev->name, err);
216
f60cb305
JH
217 if (err < 0) {
218 kfree_skb(skb);
75e84b7c 219 return ERR_PTR(err);
f60cb305 220 }
75e84b7c 221
757aa0b5
JH
222 if (!skb)
223 return ERR_PTR(-ENODATA);
224
225 return skb;
7b1abbbe
JH
226}
227EXPORT_SYMBOL(__hci_cmd_sync_ev);
228
229struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 230 const void *param, u32 timeout)
7b1abbbe
JH
231{
232 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
233}
234EXPORT_SYMBOL(__hci_cmd_sync);
235
1da177e4 236/* Execute request and wait for completion. */
01178cd4 237static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
238 void (*func)(struct hci_request *req,
239 unsigned long opt),
01178cd4 240 unsigned long opt, __u32 timeout)
1da177e4 241{
42c6b129 242 struct hci_request req;
1da177e4
LT
243 DECLARE_WAITQUEUE(wait, current);
244 int err = 0;
245
246 BT_DBG("%s start", hdev->name);
247
42c6b129
JH
248 hci_req_init(&req, hdev);
249
1da177e4
LT
250 hdev->req_status = HCI_REQ_PEND;
251
42c6b129 252 func(&req, opt);
53cce22d 253
039fada5
CP
254 add_wait_queue(&hdev->req_wait_q, &wait);
255 set_current_state(TASK_INTERRUPTIBLE);
256
f60cb305 257 err = hci_req_run_skb(&req, hci_req_sync_complete);
42c6b129 258 if (err < 0) {
53cce22d 259 hdev->req_status = 0;
920c8300 260
039fada5 261 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 262 set_current_state(TASK_RUNNING);
039fada5 263
920c8300
AG
264 /* ENODATA means the HCI request command queue is empty.
265 * This can happen when a request with conditionals doesn't
266 * trigger any commands to be sent. This is normal behavior
267 * and should not trigger an error return.
42c6b129 268 */
920c8300
AG
269 if (err == -ENODATA)
270 return 0;
271
272 return err;
53cce22d
JH
273 }
274
1da177e4
LT
275 schedule_timeout(timeout);
276
277 remove_wait_queue(&hdev->req_wait_q, &wait);
278
279 if (signal_pending(current))
280 return -EINTR;
281
282 switch (hdev->req_status) {
283 case HCI_REQ_DONE:
e175072f 284 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
285 break;
286
287 case HCI_REQ_CANCELED:
288 err = -hdev->req_result;
289 break;
290
291 default:
292 err = -ETIMEDOUT;
293 break;
3ff50b79 294 }
1da177e4 295
a5040efa 296 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
297
298 BT_DBG("%s end: err %d", hdev->name, err);
299
300 return err;
301}
302
01178cd4 303static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
304 void (*req)(struct hci_request *req,
305 unsigned long opt),
01178cd4 306 unsigned long opt, __u32 timeout)
1da177e4
LT
307{
308 int ret;
309
7c6a329e
MH
310 if (!test_bit(HCI_UP, &hdev->flags))
311 return -ENETDOWN;
312
1da177e4
LT
313 /* Serialize all requests */
314 hci_req_lock(hdev);
01178cd4 315 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
316 hci_req_unlock(hdev);
317
318 return ret;
319}
320
42c6b129 321static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 322{
42c6b129 323 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
324
325 /* Reset device */
42c6b129
JH
326 set_bit(HCI_RESET, &req->hdev->flags);
327 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
328}
329
42c6b129 330static void bredr_init(struct hci_request *req)
1da177e4 331{
42c6b129 332 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 333
1da177e4 334 /* Read Local Supported Features */
42c6b129 335 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 336
1143e5a6 337 /* Read Local Version */
42c6b129 338 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
339
340 /* Read BD Address */
42c6b129 341 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
342}
343
0af801b9 344static void amp_init1(struct hci_request *req)
e61ef499 345{
42c6b129 346 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 347
e61ef499 348 /* Read Local Version */
42c6b129 349 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 350
f6996cfe
MH
351 /* Read Local Supported Commands */
352 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
353
6bcbc489 354 /* Read Local AMP Info */
42c6b129 355 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
356
357 /* Read Data Blk size */
42c6b129 358 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 359
f38ba941
MH
360 /* Read Flow Control Mode */
361 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
362
7528ca1c
MH
363 /* Read Location Data */
364 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
365}
366
0af801b9
JH
367static void amp_init2(struct hci_request *req)
368{
369 /* Read Local Supported Features. Not all AMP controllers
370 * support this so it's placed conditionally in the second
371 * stage init.
372 */
373 if (req->hdev->commands[14] & 0x20)
374 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
375}
376
42c6b129 377static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 378{
42c6b129 379 struct hci_dev *hdev = req->hdev;
e61ef499
AE
380
381 BT_DBG("%s %ld", hdev->name, opt);
382
11778716
AE
383 /* Reset */
384 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 385 hci_reset_req(req, 0);
11778716 386
e61ef499
AE
387 switch (hdev->dev_type) {
388 case HCI_BREDR:
42c6b129 389 bredr_init(req);
e61ef499
AE
390 break;
391
392 case HCI_AMP:
0af801b9 393 amp_init1(req);
e61ef499
AE
394 break;
395
396 default:
397 BT_ERR("Unknown device type %d", hdev->dev_type);
398 break;
399 }
e61ef499
AE
400}
401
42c6b129 402static void bredr_setup(struct hci_request *req)
2177bab5 403{
2177bab5
JH
404 __le16 param;
405 __u8 flt_type;
406
407 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 408 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
409
410 /* Read Class of Device */
42c6b129 411 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
412
413 /* Read Local Name */
42c6b129 414 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
415
416 /* Read Voice Setting */
42c6b129 417 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 418
b4cb9fb2
MH
419 /* Read Number of Supported IAC */
420 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
421
4b836f39
MH
422 /* Read Current IAC LAP */
423 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
424
2177bab5
JH
425 /* Clear Event Filters */
426 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 427 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
428
429 /* Connection accept timeout ~20 secs */
dcf4adbf 430 param = cpu_to_le16(0x7d00);
42c6b129 431 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
432}
433
42c6b129 434static void le_setup(struct hci_request *req)
2177bab5 435{
c73eee91
JH
436 struct hci_dev *hdev = req->hdev;
437
2177bab5 438 /* Read LE Buffer Size */
42c6b129 439 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
440
441 /* Read LE Local Supported Features */
42c6b129 442 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 443
747d3f03
MH
444 /* Read LE Supported States */
445 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
446
2177bab5 447 /* Read LE White List Size */
42c6b129 448 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 449
747d3f03
MH
450 /* Clear LE White List */
451 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
452
453 /* LE-only controllers have LE implicitly enabled */
454 if (!lmp_bredr_capable(hdev))
a1536da2 455 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
456}
457
42c6b129 458static void hci_setup_event_mask(struct hci_request *req)
2177bab5 459{
42c6b129
JH
460 struct hci_dev *hdev = req->hdev;
461
2177bab5
JH
462 /* The second byte is 0xff instead of 0x9f (two reserved bits
463 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
464 * command otherwise.
465 */
466 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
467
468 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
469 * any event mask for pre 1.2 devices.
470 */
471 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
472 return;
473
474 if (lmp_bredr_capable(hdev)) {
475 events[4] |= 0x01; /* Flow Specification Complete */
476 events[4] |= 0x02; /* Inquiry Result with RSSI */
477 events[4] |= 0x04; /* Read Remote Extended Features Complete */
478 events[5] |= 0x08; /* Synchronous Connection Complete */
479 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
480 } else {
481 /* Use a different default for LE-only devices */
482 memset(events, 0, sizeof(events));
483 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
484 events[1] |= 0x08; /* Read Remote Version Information Complete */
485 events[1] |= 0x20; /* Command Complete */
486 events[1] |= 0x40; /* Command Status */
487 events[1] |= 0x80; /* Hardware Error */
488 events[2] |= 0x04; /* Number of Completed Packets */
489 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
490
491 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
492 events[0] |= 0x80; /* Encryption Change */
493 events[5] |= 0x80; /* Encryption Key Refresh Complete */
494 }
2177bab5
JH
495 }
496
497 if (lmp_inq_rssi_capable(hdev))
498 events[4] |= 0x02; /* Inquiry Result with RSSI */
499
500 if (lmp_sniffsubr_capable(hdev))
501 events[5] |= 0x20; /* Sniff Subrating */
502
503 if (lmp_pause_enc_capable(hdev))
504 events[5] |= 0x80; /* Encryption Key Refresh Complete */
505
506 if (lmp_ext_inq_capable(hdev))
507 events[5] |= 0x40; /* Extended Inquiry Result */
508
509 if (lmp_no_flush_capable(hdev))
510 events[7] |= 0x01; /* Enhanced Flush Complete */
511
512 if (lmp_lsto_capable(hdev))
513 events[6] |= 0x80; /* Link Supervision Timeout Changed */
514
515 if (lmp_ssp_capable(hdev)) {
516 events[6] |= 0x01; /* IO Capability Request */
517 events[6] |= 0x02; /* IO Capability Response */
518 events[6] |= 0x04; /* User Confirmation Request */
519 events[6] |= 0x08; /* User Passkey Request */
520 events[6] |= 0x10; /* Remote OOB Data Request */
521 events[6] |= 0x20; /* Simple Pairing Complete */
522 events[7] |= 0x04; /* User Passkey Notification */
523 events[7] |= 0x08; /* Keypress Notification */
524 events[7] |= 0x10; /* Remote Host Supported
525 * Features Notification
526 */
527 }
528
529 if (lmp_le_capable(hdev))
530 events[7] |= 0x20; /* LE Meta-Event */
531
42c6b129 532 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
533}
534
42c6b129 535static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 536{
42c6b129
JH
537 struct hci_dev *hdev = req->hdev;
538
0af801b9
JH
539 if (hdev->dev_type == HCI_AMP)
540 return amp_init2(req);
541
2177bab5 542 if (lmp_bredr_capable(hdev))
42c6b129 543 bredr_setup(req);
56f87901 544 else
a358dc11 545 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
546
547 if (lmp_le_capable(hdev))
42c6b129 548 le_setup(req);
2177bab5 549
0f3adeae
MH
550 /* All Bluetooth 1.2 and later controllers should support the
551 * HCI command for reading the local supported commands.
552 *
553 * Unfortunately some controllers indicate Bluetooth 1.2 support,
554 * but do not have support for this command. If that is the case,
555 * the driver can quirk the behavior and skip reading the local
556 * supported commands.
3f8e2d75 557 */
0f3adeae
MH
558 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
559 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 560 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
561
562 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
563 /* When SSP is available, then the host features page
564 * should also be available as well. However some
565 * controllers list the max_page as 0 as long as SSP
566 * has not been enabled. To achieve proper debugging
567 * output, force the minimum max_page to 1 at least.
568 */
569 hdev->max_page = 0x01;
570
d7a5a11d 571 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 572 u8 mode = 0x01;
574ea3c7 573
42c6b129
JH
574 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
575 sizeof(mode), &mode);
2177bab5
JH
576 } else {
577 struct hci_cp_write_eir cp;
578
579 memset(hdev->eir, 0, sizeof(hdev->eir));
580 memset(&cp, 0, sizeof(cp));
581
42c6b129 582 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
583 }
584 }
585
043ec9bf
MH
586 if (lmp_inq_rssi_capable(hdev) ||
587 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
588 u8 mode;
589
590 /* If Extended Inquiry Result events are supported, then
591 * they are clearly preferred over Inquiry Result with RSSI
592 * events.
593 */
594 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
595
596 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
597 }
2177bab5
JH
598
599 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 600 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
601
602 if (lmp_ext_feat_capable(hdev)) {
603 struct hci_cp_read_local_ext_features cp;
604
605 cp.page = 0x01;
42c6b129
JH
606 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
607 sizeof(cp), &cp);
2177bab5
JH
608 }
609
d7a5a11d 610 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 611 u8 enable = 1;
42c6b129
JH
612 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
613 &enable);
2177bab5
JH
614 }
615}
616
42c6b129 617static void hci_setup_link_policy(struct hci_request *req)
2177bab5 618{
42c6b129 619 struct hci_dev *hdev = req->hdev;
2177bab5
JH
620 struct hci_cp_write_def_link_policy cp;
621 u16 link_policy = 0;
622
623 if (lmp_rswitch_capable(hdev))
624 link_policy |= HCI_LP_RSWITCH;
625 if (lmp_hold_capable(hdev))
626 link_policy |= HCI_LP_HOLD;
627 if (lmp_sniff_capable(hdev))
628 link_policy |= HCI_LP_SNIFF;
629 if (lmp_park_capable(hdev))
630 link_policy |= HCI_LP_PARK;
631
632 cp.policy = cpu_to_le16(link_policy);
42c6b129 633 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
634}
635
42c6b129 636static void hci_set_le_support(struct hci_request *req)
2177bab5 637{
42c6b129 638 struct hci_dev *hdev = req->hdev;
2177bab5
JH
639 struct hci_cp_write_le_host_supported cp;
640
c73eee91
JH
641 /* LE-only devices do not support explicit enablement */
642 if (!lmp_bredr_capable(hdev))
643 return;
644
2177bab5
JH
645 memset(&cp, 0, sizeof(cp));
646
d7a5a11d 647 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 648 cp.le = 0x01;
32226e4f 649 cp.simul = 0x00;
2177bab5
JH
650 }
651
652 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
653 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
654 &cp);
2177bab5
JH
655}
656
d62e6d67
JH
657static void hci_set_event_mask_page_2(struct hci_request *req)
658{
659 struct hci_dev *hdev = req->hdev;
660 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
661
662 /* If Connectionless Slave Broadcast master role is supported
663 * enable all necessary events for it.
664 */
53b834d2 665 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
666 events[1] |= 0x40; /* Triggered Clock Capture */
667 events[1] |= 0x80; /* Synchronization Train Complete */
668 events[2] |= 0x10; /* Slave Page Response Timeout */
669 events[2] |= 0x20; /* CSB Channel Map Change */
670 }
671
672 /* If Connectionless Slave Broadcast slave role is supported
673 * enable all necessary events for it.
674 */
53b834d2 675 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
676 events[2] |= 0x01; /* Synchronization Train Received */
677 events[2] |= 0x02; /* CSB Receive */
678 events[2] |= 0x04; /* CSB Timeout */
679 events[2] |= 0x08; /* Truncated Page Complete */
680 }
681
40c59fcb 682 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 683 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
684 events[2] |= 0x80;
685
d62e6d67
JH
686 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
687}
688
42c6b129 689static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 690{
42c6b129 691 struct hci_dev *hdev = req->hdev;
d2c5d77f 692 u8 p;
42c6b129 693
0da71f1b
MH
694 hci_setup_event_mask(req);
695
e81be90b
JH
696 if (hdev->commands[6] & 0x20 &&
697 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
698 struct hci_cp_read_stored_link_key cp;
699
700 bacpy(&cp.bdaddr, BDADDR_ANY);
701 cp.read_all = 0x01;
702 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
703 }
704
2177bab5 705 if (hdev->commands[5] & 0x10)
42c6b129 706 hci_setup_link_policy(req);
2177bab5 707
417287de
MH
708 if (hdev->commands[8] & 0x01)
709 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
710
711 /* Some older Broadcom based Bluetooth 1.2 controllers do not
712 * support the Read Page Scan Type command. Check support for
713 * this command in the bit mask of supported commands.
714 */
715 if (hdev->commands[13] & 0x01)
716 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
717
9193c6e8
AG
718 if (lmp_le_capable(hdev)) {
719 u8 events[8];
720
721 memset(events, 0, sizeof(events));
4d6c705b
MH
722 events[0] = 0x0f;
723
724 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
725 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
726
727 /* If controller supports the Connection Parameters Request
728 * Link Layer Procedure, enable the corresponding event.
729 */
730 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
731 events[0] |= 0x20; /* LE Remote Connection
732 * Parameter Request
733 */
734
a9f6068e
MH
735 /* If the controller supports the Data Length Extension
736 * feature, enable the corresponding event.
737 */
738 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
739 events[0] |= 0x40; /* LE Data Length Change */
740
4b71bba4
MH
741 /* If the controller supports Extended Scanner Filter
742 * Policies, enable the correspondig event.
743 */
744 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
745 events[1] |= 0x04; /* LE Direct Advertising
746 * Report
747 */
748
5a34bd5f
MH
749 /* If the controller supports the LE Read Local P-256
750 * Public Key command, enable the corresponding event.
751 */
752 if (hdev->commands[34] & 0x02)
753 events[0] |= 0x80; /* LE Read Local P-256
754 * Public Key Complete
755 */
756
757 /* If the controller supports the LE Generate DHKey
758 * command, enable the corresponding event.
759 */
760 if (hdev->commands[34] & 0x04)
761 events[1] |= 0x01; /* LE Generate DHKey Complete */
762
9193c6e8
AG
763 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
764 events);
765
15a49cca
MH
766 if (hdev->commands[25] & 0x40) {
767 /* Read LE Advertising Channel TX Power */
768 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
769 }
770
a9f6068e
MH
771 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
772 /* Read LE Maximum Data Length */
773 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
774
775 /* Read LE Suggested Default Data Length */
776 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
777 }
778
42c6b129 779 hci_set_le_support(req);
9193c6e8 780 }
d2c5d77f
JH
781
782 /* Read features beyond page 1 if available */
783 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
784 struct hci_cp_read_local_ext_features cp;
785
786 cp.page = p;
787 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
788 sizeof(cp), &cp);
789 }
2177bab5
JH
790}
791
5d4e7e8d
JH
792static void hci_init4_req(struct hci_request *req, unsigned long opt)
793{
794 struct hci_dev *hdev = req->hdev;
795
36f260ce
MH
796 /* Some Broadcom based Bluetooth controllers do not support the
797 * Delete Stored Link Key command. They are clearly indicating its
798 * absence in the bit mask of supported commands.
799 *
800 * Check the supported commands and only if the the command is marked
801 * as supported send it. If not supported assume that the controller
802 * does not have actual support for stored link keys which makes this
803 * command redundant anyway.
804 *
805 * Some controllers indicate that they support handling deleting
806 * stored link keys, but they don't. The quirk lets a driver
807 * just disable this command.
808 */
809 if (hdev->commands[6] & 0x80 &&
810 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
811 struct hci_cp_delete_stored_link_key cp;
812
813 bacpy(&cp.bdaddr, BDADDR_ANY);
814 cp.delete_all = 0x01;
815 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
816 sizeof(cp), &cp);
817 }
818
d62e6d67
JH
819 /* Set event mask page 2 if the HCI command for it is supported */
820 if (hdev->commands[22] & 0x04)
821 hci_set_event_mask_page_2(req);
822
109e3191
MH
823 /* Read local codec list if the HCI command is supported */
824 if (hdev->commands[29] & 0x20)
825 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
826
f4fe73ed
MH
827 /* Get MWS transport configuration if the HCI command is supported */
828 if (hdev->commands[30] & 0x08)
829 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
830
5d4e7e8d 831 /* Check for Synchronization Train support */
53b834d2 832 if (lmp_sync_train_capable(hdev))
5d4e7e8d 833 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
834
835 /* Enable Secure Connections if supported and configured */
d7a5a11d 836 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 837 bredr_sc_enabled(hdev)) {
a6d0d690 838 u8 support = 0x01;
574ea3c7 839
a6d0d690
MH
840 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
841 sizeof(support), &support);
842 }
5d4e7e8d
JH
843}
844
2177bab5
JH
845static int __hci_init(struct hci_dev *hdev)
846{
847 int err;
848
849 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
850 if (err < 0)
851 return err;
852
4b4148e9
MH
853 /* The Device Under Test (DUT) mode is special and available for
854 * all controller types. So just create it early on.
855 */
d7a5a11d 856 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
4b4148e9
MH
857 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
858 &dut_mode_fops);
859 }
860
0af801b9
JH
861 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
862 if (err < 0)
863 return err;
864
2177bab5
JH
865 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
866 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 867 * first two stages of init.
2177bab5
JH
868 */
869 if (hdev->dev_type != HCI_BREDR)
870 return 0;
871
5d4e7e8d
JH
872 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
873 if (err < 0)
874 return err;
875
baf27f6e
MH
876 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
877 if (err < 0)
878 return err;
879
ec6cef9c
MH
880 /* This function is only called when the controller is actually in
881 * configured state. When the controller is marked as unconfigured,
882 * this initialization procedure is not run.
883 *
884 * It means that it is possible that a controller runs through its
885 * setup phase and then discovers missing settings. If that is the
886 * case, then this function will not be called. It then will only
887 * be called during the config phase.
888 *
889 * So only when in setup phase or config phase, create the debugfs
890 * entries and register the SMP channels.
baf27f6e 891 */
d7a5a11d
MH
892 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
893 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
894 return 0;
895
60c5f5fb
MH
896 hci_debugfs_create_common(hdev);
897
71c3b60e 898 if (lmp_bredr_capable(hdev))
60c5f5fb 899 hci_debugfs_create_bredr(hdev);
2bfa3531 900
162a3bac 901 if (lmp_le_capable(hdev))
60c5f5fb 902 hci_debugfs_create_le(hdev);
e7b8fc92 903
baf27f6e 904 return 0;
2177bab5
JH
905}
906
0ebca7d6
MH
907static void hci_init0_req(struct hci_request *req, unsigned long opt)
908{
909 struct hci_dev *hdev = req->hdev;
910
911 BT_DBG("%s %ld", hdev->name, opt);
912
913 /* Reset */
914 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
915 hci_reset_req(req, 0);
916
917 /* Read Local Version */
918 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
919
920 /* Read BD Address */
921 if (hdev->set_bdaddr)
922 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
923}
924
925static int __hci_unconf_init(struct hci_dev *hdev)
926{
927 int err;
928
cc78b44b
MH
929 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
930 return 0;
931
0ebca7d6
MH
932 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
933 if (err < 0)
934 return err;
935
936 return 0;
937}
938
42c6b129 939static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
940{
941 __u8 scan = opt;
942
42c6b129 943 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
944
945 /* Inquiry and Page scans */
42c6b129 946 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
947}
948
42c6b129 949static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
950{
951 __u8 auth = opt;
952
42c6b129 953 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
954
955 /* Authentication */
42c6b129 956 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
957}
958
42c6b129 959static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
960{
961 __u8 encrypt = opt;
962
42c6b129 963 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 964
e4e8e37c 965 /* Encryption */
42c6b129 966 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
967}
968
42c6b129 969static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
970{
971 __le16 policy = cpu_to_le16(opt);
972
42c6b129 973 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
974
975 /* Default link policy */
42c6b129 976 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
977}
978
8e87d142 979/* Get HCI device by index.
1da177e4
LT
980 * Device is held on return. */
981struct hci_dev *hci_dev_get(int index)
982{
8035ded4 983 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
984
985 BT_DBG("%d", index);
986
987 if (index < 0)
988 return NULL;
989
990 read_lock(&hci_dev_list_lock);
8035ded4 991 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
992 if (d->id == index) {
993 hdev = hci_dev_hold(d);
994 break;
995 }
996 }
997 read_unlock(&hci_dev_list_lock);
998 return hdev;
999}
1da177e4
LT
1000
1001/* ---- Inquiry support ---- */
ff9ef578 1002
30dc78e1
JH
1003bool hci_discovery_active(struct hci_dev *hdev)
1004{
1005 struct discovery_state *discov = &hdev->discovery;
1006
6fbe195d 1007 switch (discov->state) {
343f935b 1008 case DISCOVERY_FINDING:
6fbe195d 1009 case DISCOVERY_RESOLVING:
30dc78e1
JH
1010 return true;
1011
6fbe195d
AG
1012 default:
1013 return false;
1014 }
30dc78e1
JH
1015}
1016
ff9ef578
JH
1017void hci_discovery_set_state(struct hci_dev *hdev, int state)
1018{
bb3e0a33
JH
1019 int old_state = hdev->discovery.state;
1020
ff9ef578
JH
1021 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1022
bb3e0a33 1023 if (old_state == state)
ff9ef578
JH
1024 return;
1025
bb3e0a33
JH
1026 hdev->discovery.state = state;
1027
ff9ef578
JH
1028 switch (state) {
1029 case DISCOVERY_STOPPED:
c54c3860
AG
1030 hci_update_background_scan(hdev);
1031
bb3e0a33 1032 if (old_state != DISCOVERY_STARTING)
7b99b659 1033 mgmt_discovering(hdev, 0);
ff9ef578
JH
1034 break;
1035 case DISCOVERY_STARTING:
1036 break;
343f935b 1037 case DISCOVERY_FINDING:
ff9ef578
JH
1038 mgmt_discovering(hdev, 1);
1039 break;
30dc78e1
JH
1040 case DISCOVERY_RESOLVING:
1041 break;
ff9ef578
JH
1042 case DISCOVERY_STOPPING:
1043 break;
1044 }
ff9ef578
JH
1045}
1046
1f9b9a5d 1047void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1048{
30883512 1049 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1050 struct inquiry_entry *p, *n;
1da177e4 1051
561aafbc
JH
1052 list_for_each_entry_safe(p, n, &cache->all, all) {
1053 list_del(&p->all);
b57c1a56 1054 kfree(p);
1da177e4 1055 }
561aafbc
JH
1056
1057 INIT_LIST_HEAD(&cache->unknown);
1058 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1059}
1060
a8c5fb1a
GP
1061struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1062 bdaddr_t *bdaddr)
1da177e4 1063{
30883512 1064 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1065 struct inquiry_entry *e;
1066
6ed93dc6 1067 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1068
561aafbc
JH
1069 list_for_each_entry(e, &cache->all, all) {
1070 if (!bacmp(&e->data.bdaddr, bdaddr))
1071 return e;
1072 }
1073
1074 return NULL;
1075}
1076
1077struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1078 bdaddr_t *bdaddr)
561aafbc 1079{
30883512 1080 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1081 struct inquiry_entry *e;
1082
6ed93dc6 1083 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1084
1085 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1086 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1087 return e;
1088 }
1089
1090 return NULL;
1da177e4
LT
1091}
1092
30dc78e1 1093struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1094 bdaddr_t *bdaddr,
1095 int state)
30dc78e1
JH
1096{
1097 struct discovery_state *cache = &hdev->discovery;
1098 struct inquiry_entry *e;
1099
6ed93dc6 1100 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1101
1102 list_for_each_entry(e, &cache->resolve, list) {
1103 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1104 return e;
1105 if (!bacmp(&e->data.bdaddr, bdaddr))
1106 return e;
1107 }
1108
1109 return NULL;
1110}
1111
a3d4e20a 1112void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1113 struct inquiry_entry *ie)
a3d4e20a
JH
1114{
1115 struct discovery_state *cache = &hdev->discovery;
1116 struct list_head *pos = &cache->resolve;
1117 struct inquiry_entry *p;
1118
1119 list_del(&ie->list);
1120
1121 list_for_each_entry(p, &cache->resolve, list) {
1122 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1123 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1124 break;
1125 pos = &p->list;
1126 }
1127
1128 list_add(&ie->list, pos);
1129}
1130
af58925c
MH
1131u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1132 bool name_known)
1da177e4 1133{
30883512 1134 struct discovery_state *cache = &hdev->discovery;
70f23020 1135 struct inquiry_entry *ie;
af58925c 1136 u32 flags = 0;
1da177e4 1137
6ed93dc6 1138 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1139
6928a924 1140 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1141
af58925c
MH
1142 if (!data->ssp_mode)
1143 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1144
70f23020 1145 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1146 if (ie) {
af58925c
MH
1147 if (!ie->data.ssp_mode)
1148 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1149
a3d4e20a 1150 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1151 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1152 ie->data.rssi = data->rssi;
1153 hci_inquiry_cache_update_resolve(hdev, ie);
1154 }
1155
561aafbc 1156 goto update;
a3d4e20a 1157 }
561aafbc
JH
1158
1159 /* Entry not in the cache. Add new one. */
27f70f3e 1160 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1161 if (!ie) {
1162 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1163 goto done;
1164 }
561aafbc
JH
1165
1166 list_add(&ie->all, &cache->all);
1167
1168 if (name_known) {
1169 ie->name_state = NAME_KNOWN;
1170 } else {
1171 ie->name_state = NAME_NOT_KNOWN;
1172 list_add(&ie->list, &cache->unknown);
1173 }
70f23020 1174
561aafbc
JH
1175update:
1176 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1177 ie->name_state != NAME_PENDING) {
561aafbc
JH
1178 ie->name_state = NAME_KNOWN;
1179 list_del(&ie->list);
1da177e4
LT
1180 }
1181
70f23020
AE
1182 memcpy(&ie->data, data, sizeof(*data));
1183 ie->timestamp = jiffies;
1da177e4 1184 cache->timestamp = jiffies;
3175405b
JH
1185
1186 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1187 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1188
af58925c
MH
1189done:
1190 return flags;
1da177e4
LT
1191}
1192
1193static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1194{
30883512 1195 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1196 struct inquiry_info *info = (struct inquiry_info *) buf;
1197 struct inquiry_entry *e;
1198 int copied = 0;
1199
561aafbc 1200 list_for_each_entry(e, &cache->all, all) {
1da177e4 1201 struct inquiry_data *data = &e->data;
b57c1a56
JH
1202
1203 if (copied >= num)
1204 break;
1205
1da177e4
LT
1206 bacpy(&info->bdaddr, &data->bdaddr);
1207 info->pscan_rep_mode = data->pscan_rep_mode;
1208 info->pscan_period_mode = data->pscan_period_mode;
1209 info->pscan_mode = data->pscan_mode;
1210 memcpy(info->dev_class, data->dev_class, 3);
1211 info->clock_offset = data->clock_offset;
b57c1a56 1212
1da177e4 1213 info++;
b57c1a56 1214 copied++;
1da177e4
LT
1215 }
1216
1217 BT_DBG("cache %p, copied %d", cache, copied);
1218 return copied;
1219}
1220
42c6b129 1221static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1222{
1223 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1224 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1225 struct hci_cp_inquiry cp;
1226
1227 BT_DBG("%s", hdev->name);
1228
1229 if (test_bit(HCI_INQUIRY, &hdev->flags))
1230 return;
1231
1232 /* Start Inquiry */
1233 memcpy(&cp.lap, &ir->lap, 3);
1234 cp.length = ir->length;
1235 cp.num_rsp = ir->num_rsp;
42c6b129 1236 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1237}
1238
1239int hci_inquiry(void __user *arg)
1240{
1241 __u8 __user *ptr = arg;
1242 struct hci_inquiry_req ir;
1243 struct hci_dev *hdev;
1244 int err = 0, do_inquiry = 0, max_rsp;
1245 long timeo;
1246 __u8 *buf;
1247
1248 if (copy_from_user(&ir, ptr, sizeof(ir)))
1249 return -EFAULT;
1250
5a08ecce
AE
1251 hdev = hci_dev_get(ir.dev_id);
1252 if (!hdev)
1da177e4
LT
1253 return -ENODEV;
1254
d7a5a11d 1255 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1256 err = -EBUSY;
1257 goto done;
1258 }
1259
d7a5a11d 1260 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1261 err = -EOPNOTSUPP;
1262 goto done;
1263 }
1264
5b69bef5
MH
1265 if (hdev->dev_type != HCI_BREDR) {
1266 err = -EOPNOTSUPP;
1267 goto done;
1268 }
1269
d7a5a11d 1270 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1271 err = -EOPNOTSUPP;
1272 goto done;
1273 }
1274
09fd0de5 1275 hci_dev_lock(hdev);
8e87d142 1276 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1277 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1278 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1279 do_inquiry = 1;
1280 }
09fd0de5 1281 hci_dev_unlock(hdev);
1da177e4 1282
04837f64 1283 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1284
1285 if (do_inquiry) {
01178cd4
JH
1286 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1287 timeo);
70f23020
AE
1288 if (err < 0)
1289 goto done;
3e13fa1e
AG
1290
1291 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1292 * cleared). If it is interrupted by a signal, return -EINTR.
1293 */
74316201 1294 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1295 TASK_INTERRUPTIBLE))
1296 return -EINTR;
70f23020 1297 }
1da177e4 1298
8fc9ced3
GP
1299 /* for unlimited number of responses we will use buffer with
1300 * 255 entries
1301 */
1da177e4
LT
1302 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1303
1304 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1305 * copy it to the user space.
1306 */
01df8c31 1307 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1308 if (!buf) {
1da177e4
LT
1309 err = -ENOMEM;
1310 goto done;
1311 }
1312
09fd0de5 1313 hci_dev_lock(hdev);
1da177e4 1314 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1315 hci_dev_unlock(hdev);
1da177e4
LT
1316
1317 BT_DBG("num_rsp %d", ir.num_rsp);
1318
1319 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1320 ptr += sizeof(ir);
1321 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1322 ir.num_rsp))
1da177e4 1323 err = -EFAULT;
8e87d142 1324 } else
1da177e4
LT
1325 err = -EFAULT;
1326
1327 kfree(buf);
1328
1329done:
1330 hci_dev_put(hdev);
1331 return err;
1332}
1333
cbed0ca1 1334static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1335{
1da177e4
LT
1336 int ret = 0;
1337
1da177e4
LT
1338 BT_DBG("%s %p", hdev->name, hdev);
1339
1340 hci_req_lock(hdev);
1341
d7a5a11d 1342 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1343 ret = -ENODEV;
1344 goto done;
1345 }
1346
d7a5a11d
MH
1347 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1348 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1349 /* Check for rfkill but allow the HCI setup stage to
1350 * proceed (which in itself doesn't cause any RF activity).
1351 */
d7a5a11d 1352 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1353 ret = -ERFKILL;
1354 goto done;
1355 }
1356
1357 /* Check for valid public address or a configured static
1358 * random adddress, but let the HCI setup proceed to
1359 * be able to determine if there is a public address
1360 * or not.
1361 *
c6beca0e
MH
1362 * In case of user channel usage, it is not important
1363 * if a public address or static random address is
1364 * available.
1365 *
a5c8f270
MH
1366 * This check is only valid for BR/EDR controllers
1367 * since AMP controllers do not have an address.
1368 */
d7a5a11d 1369 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
c6beca0e 1370 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1371 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1372 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1373 ret = -EADDRNOTAVAIL;
1374 goto done;
1375 }
611b30f7
MH
1376 }
1377
1da177e4
LT
1378 if (test_bit(HCI_UP, &hdev->flags)) {
1379 ret = -EALREADY;
1380 goto done;
1381 }
1382
1da177e4
LT
1383 if (hdev->open(hdev)) {
1384 ret = -EIO;
1385 goto done;
1386 }
1387
f41c70c4
MH
1388 atomic_set(&hdev->cmd_cnt, 1);
1389 set_bit(HCI_INIT, &hdev->flags);
1390
d7a5a11d 1391 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
af202f84
MH
1392 if (hdev->setup)
1393 ret = hdev->setup(hdev);
f41c70c4 1394
af202f84
MH
1395 /* The transport driver can set these quirks before
1396 * creating the HCI device or in its setup callback.
1397 *
1398 * In case any of them is set, the controller has to
1399 * start up as unconfigured.
1400 */
eb1904f4
MH
1401 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1402 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
a1536da2 1403 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1404
0ebca7d6
MH
1405 /* For an unconfigured controller it is required to
1406 * read at least the version information provided by
1407 * the Read Local Version Information command.
1408 *
1409 * If the set_bdaddr driver callback is provided, then
1410 * also the original Bluetooth public device address
1411 * will be read using the Read BD Address command.
1412 */
d7a5a11d 1413 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1414 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1415 }
1416
d7a5a11d 1417 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1418 /* If public address change is configured, ensure that
1419 * the address gets programmed. If the driver does not
1420 * support changing the public address, fail the power
1421 * on procedure.
1422 */
1423 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1424 hdev->set_bdaddr)
24c457e2
MH
1425 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1426 else
1427 ret = -EADDRNOTAVAIL;
1428 }
1429
f41c70c4 1430 if (!ret) {
d7a5a11d
MH
1431 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1432 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
f41c70c4 1433 ret = __hci_init(hdev);
1da177e4
LT
1434 }
1435
f41c70c4
MH
1436 clear_bit(HCI_INIT, &hdev->flags);
1437
1da177e4
LT
1438 if (!ret) {
1439 hci_dev_hold(hdev);
a1536da2 1440 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1da177e4
LT
1441 set_bit(HCI_UP, &hdev->flags);
1442 hci_notify(hdev, HCI_DEV_UP);
d7a5a11d
MH
1443 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1444 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1445 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1446 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1514b892 1447 hdev->dev_type == HCI_BREDR) {
09fd0de5 1448 hci_dev_lock(hdev);
744cf19e 1449 mgmt_powered(hdev, 1);
09fd0de5 1450 hci_dev_unlock(hdev);
56e5cb86 1451 }
8e87d142 1452 } else {
1da177e4 1453 /* Init failed, cleanup */
3eff45ea 1454 flush_work(&hdev->tx_work);
c347b765 1455 flush_work(&hdev->cmd_work);
b78752cc 1456 flush_work(&hdev->rx_work);
1da177e4
LT
1457
1458 skb_queue_purge(&hdev->cmd_q);
1459 skb_queue_purge(&hdev->rx_q);
1460
1461 if (hdev->flush)
1462 hdev->flush(hdev);
1463
1464 if (hdev->sent_cmd) {
1465 kfree_skb(hdev->sent_cmd);
1466 hdev->sent_cmd = NULL;
1467 }
1468
1469 hdev->close(hdev);
fee746b0 1470 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1471 }
1472
1473done:
1474 hci_req_unlock(hdev);
1da177e4
LT
1475 return ret;
1476}
1477
cbed0ca1
JH
1478/* ---- HCI ioctl helpers ---- */
1479
1480int hci_dev_open(__u16 dev)
1481{
1482 struct hci_dev *hdev;
1483 int err;
1484
1485 hdev = hci_dev_get(dev);
1486 if (!hdev)
1487 return -ENODEV;
1488
4a964404 1489 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1490 * up as user channel. Trying to bring them up as normal devices
1491 * will result into a failure. Only user channel operation is
1492 * possible.
1493 *
1494 * When this function is called for a user channel, the flag
1495 * HCI_USER_CHANNEL will be set first before attempting to
1496 * open the device.
1497 */
d7a5a11d
MH
1498 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1499 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1500 err = -EOPNOTSUPP;
1501 goto done;
1502 }
1503
e1d08f40
JH
1504 /* We need to ensure that no other power on/off work is pending
1505 * before proceeding to call hci_dev_do_open. This is
1506 * particularly important if the setup procedure has not yet
1507 * completed.
1508 */
a69d8927 1509 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1510 cancel_delayed_work(&hdev->power_off);
1511
a5c8f270
MH
1512 /* After this call it is guaranteed that the setup procedure
1513 * has finished. This means that error conditions like RFKILL
1514 * or no valid public or static random address apply.
1515 */
e1d08f40
JH
1516 flush_workqueue(hdev->req_workqueue);
1517
12aa4f0a 1518 /* For controllers not using the management interface and that
b6ae8457 1519 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1520 * so that pairing works for them. Once the management interface
1521 * is in use this bit will be cleared again and userspace has
1522 * to explicitly enable it.
1523 */
d7a5a11d
MH
1524 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1525 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1526 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1527
cbed0ca1
JH
1528 err = hci_dev_do_open(hdev);
1529
fee746b0 1530done:
cbed0ca1 1531 hci_dev_put(hdev);
cbed0ca1
JH
1532 return err;
1533}
1534
d7347f3c
JH
1535/* This function requires the caller holds hdev->lock */
1536static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1537{
1538 struct hci_conn_params *p;
1539
f161dd41
JH
1540 list_for_each_entry(p, &hdev->le_conn_params, list) {
1541 if (p->conn) {
1542 hci_conn_drop(p->conn);
f8aaf9b6 1543 hci_conn_put(p->conn);
f161dd41
JH
1544 p->conn = NULL;
1545 }
d7347f3c 1546 list_del_init(&p->action);
f161dd41 1547 }
d7347f3c
JH
1548
1549 BT_DBG("All LE pending actions cleared");
1550}
1551
6b3cc1db 1552int hci_dev_do_close(struct hci_dev *hdev)
1da177e4
LT
1553{
1554 BT_DBG("%s %p", hdev->name, hdev);
1555
d24d8144 1556 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1557 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1558 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1559 /* Execute vendor specific shutdown routine */
1560 if (hdev->shutdown)
1561 hdev->shutdown(hdev);
1562 }
1563
78c04c0b
VCG
1564 cancel_delayed_work(&hdev->power_off);
1565
1da177e4
LT
1566 hci_req_cancel(hdev, ENODEV);
1567 hci_req_lock(hdev);
1568
1569 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1570 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1571 hci_req_unlock(hdev);
1572 return 0;
1573 }
1574
3eff45ea
GP
1575 /* Flush RX and TX works */
1576 flush_work(&hdev->tx_work);
b78752cc 1577 flush_work(&hdev->rx_work);
1da177e4 1578
16ab91ab 1579 if (hdev->discov_timeout > 0) {
e0f9309f 1580 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1581 hdev->discov_timeout = 0;
a358dc11
MH
1582 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1583 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1584 }
1585
a69d8927 1586 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1587 cancel_delayed_work(&hdev->service_cache);
1588
7ba8b4be 1589 cancel_delayed_work_sync(&hdev->le_scan_disable);
2d28cfe7 1590 cancel_delayed_work_sync(&hdev->le_scan_restart);
4518bb0f 1591
d7a5a11d 1592 if (hci_dev_test_flag(hdev, HCI_MGMT))
4518bb0f 1593 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1594
5d900e46
FG
1595 if (hdev->adv_instance_timeout) {
1596 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1597 hdev->adv_instance_timeout = 0;
1598 }
1599
76727c02
JH
1600 /* Avoid potential lockdep warnings from the *_flush() calls by
1601 * ensuring the workqueue is empty up front.
1602 */
1603 drain_workqueue(hdev->workqueue);
1604
09fd0de5 1605 hci_dev_lock(hdev);
1aeb9c65 1606
8f502f84
JH
1607 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1608
a69d8927 1609 if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1aeb9c65
JH
1610 if (hdev->dev_type == HCI_BREDR)
1611 mgmt_powered(hdev, 0);
1612 }
1613
1f9b9a5d 1614 hci_inquiry_cache_flush(hdev);
d7347f3c 1615 hci_pend_le_actions_clear(hdev);
f161dd41 1616 hci_conn_hash_flush(hdev);
09fd0de5 1617 hci_dev_unlock(hdev);
1da177e4 1618
64dae967
MH
1619 smp_unregister(hdev);
1620
1da177e4
LT
1621 hci_notify(hdev, HCI_DEV_DOWN);
1622
1623 if (hdev->flush)
1624 hdev->flush(hdev);
1625
1626 /* Reset device */
1627 skb_queue_purge(&hdev->cmd_q);
1628 atomic_set(&hdev->cmd_cnt, 1);
d7a5a11d
MH
1629 if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1630 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
a6c511c6 1631 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1632 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1633 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1634 clear_bit(HCI_INIT, &hdev->flags);
1635 }
1636
c347b765
GP
1637 /* flush cmd work */
1638 flush_work(&hdev->cmd_work);
1da177e4
LT
1639
1640 /* Drop queues */
1641 skb_queue_purge(&hdev->rx_q);
1642 skb_queue_purge(&hdev->cmd_q);
1643 skb_queue_purge(&hdev->raw_q);
1644
1645 /* Drop last sent command */
1646 if (hdev->sent_cmd) {
65cc2b49 1647 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1648 kfree_skb(hdev->sent_cmd);
1649 hdev->sent_cmd = NULL;
1650 }
1651
1652 /* After this point our queues are empty
1653 * and no tasks are scheduled. */
1654 hdev->close(hdev);
1655
35b973c9 1656 /* Clear flags */
fee746b0 1657 hdev->flags &= BIT(HCI_RAW);
eacb44df 1658 hci_dev_clear_volatile_flags(hdev);
35b973c9 1659
ced5c338 1660 /* Controller radio is available but is currently powered down */
536619e8 1661 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1662
e59fda8d 1663 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1664 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1665 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1666
1da177e4
LT
1667 hci_req_unlock(hdev);
1668
1669 hci_dev_put(hdev);
1670 return 0;
1671}
1672
1673int hci_dev_close(__u16 dev)
1674{
1675 struct hci_dev *hdev;
1676 int err;
1677
70f23020
AE
1678 hdev = hci_dev_get(dev);
1679 if (!hdev)
1da177e4 1680 return -ENODEV;
8ee56540 1681
d7a5a11d 1682 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1683 err = -EBUSY;
1684 goto done;
1685 }
1686
a69d8927 1687 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1688 cancel_delayed_work(&hdev->power_off);
1689
1da177e4 1690 err = hci_dev_do_close(hdev);
8ee56540 1691
0736cfa8 1692done:
1da177e4
LT
1693 hci_dev_put(hdev);
1694 return err;
1695}
1696
5c912495 1697static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1698{
5c912495 1699 int ret;
1da177e4 1700
5c912495 1701 BT_DBG("%s %p", hdev->name, hdev);
1da177e4
LT
1702
1703 hci_req_lock(hdev);
1da177e4 1704
1da177e4
LT
1705 /* Drop queues */
1706 skb_queue_purge(&hdev->rx_q);
1707 skb_queue_purge(&hdev->cmd_q);
1708
76727c02
JH
1709 /* Avoid potential lockdep warnings from the *_flush() calls by
1710 * ensuring the workqueue is empty up front.
1711 */
1712 drain_workqueue(hdev->workqueue);
1713
09fd0de5 1714 hci_dev_lock(hdev);
1f9b9a5d 1715 hci_inquiry_cache_flush(hdev);
1da177e4 1716 hci_conn_hash_flush(hdev);
09fd0de5 1717 hci_dev_unlock(hdev);
1da177e4
LT
1718
1719 if (hdev->flush)
1720 hdev->flush(hdev);
1721
8e87d142 1722 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1723 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1724
fee746b0 1725 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4 1726
1da177e4 1727 hci_req_unlock(hdev);
1da177e4
LT
1728 return ret;
1729}
1730
5c912495
MH
1731int hci_dev_reset(__u16 dev)
1732{
1733 struct hci_dev *hdev;
1734 int err;
1735
1736 hdev = hci_dev_get(dev);
1737 if (!hdev)
1738 return -ENODEV;
1739
1740 if (!test_bit(HCI_UP, &hdev->flags)) {
1741 err = -ENETDOWN;
1742 goto done;
1743 }
1744
d7a5a11d 1745 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1746 err = -EBUSY;
1747 goto done;
1748 }
1749
d7a5a11d 1750 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1751 err = -EOPNOTSUPP;
1752 goto done;
1753 }
1754
1755 err = hci_dev_do_reset(hdev);
1756
1757done:
1758 hci_dev_put(hdev);
1759 return err;
1760}
1761
1da177e4
LT
1762int hci_dev_reset_stat(__u16 dev)
1763{
1764 struct hci_dev *hdev;
1765 int ret = 0;
1766
70f23020
AE
1767 hdev = hci_dev_get(dev);
1768 if (!hdev)
1da177e4
LT
1769 return -ENODEV;
1770
d7a5a11d 1771 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1772 ret = -EBUSY;
1773 goto done;
1774 }
1775
d7a5a11d 1776 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1777 ret = -EOPNOTSUPP;
1778 goto done;
1779 }
1780
1da177e4
LT
1781 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1782
0736cfa8 1783done:
1da177e4 1784 hci_dev_put(hdev);
1da177e4
LT
1785 return ret;
1786}
1787
123abc08
JH
1788static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1789{
bc6d2d04 1790 bool conn_changed, discov_changed;
123abc08
JH
1791
1792 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1793
1794 if ((scan & SCAN_PAGE))
238be788
MH
1795 conn_changed = !hci_dev_test_and_set_flag(hdev,
1796 HCI_CONNECTABLE);
123abc08 1797 else
a69d8927
MH
1798 conn_changed = hci_dev_test_and_clear_flag(hdev,
1799 HCI_CONNECTABLE);
123abc08 1800
bc6d2d04 1801 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1802 discov_changed = !hci_dev_test_and_set_flag(hdev,
1803 HCI_DISCOVERABLE);
bc6d2d04 1804 } else {
a358dc11 1805 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1806 discov_changed = hci_dev_test_and_clear_flag(hdev,
1807 HCI_DISCOVERABLE);
bc6d2d04
JH
1808 }
1809
d7a5a11d 1810 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1811 return;
1812
bc6d2d04
JH
1813 if (conn_changed || discov_changed) {
1814 /* In case this was disabled through mgmt */
a1536da2 1815 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1816
d7a5a11d 1817 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
bc6d2d04
JH
1818 mgmt_update_adv_data(hdev);
1819
123abc08 1820 mgmt_new_settings(hdev);
bc6d2d04 1821 }
123abc08
JH
1822}
1823
1da177e4
LT
1824int hci_dev_cmd(unsigned int cmd, void __user *arg)
1825{
1826 struct hci_dev *hdev;
1827 struct hci_dev_req dr;
1828 int err = 0;
1829
1830 if (copy_from_user(&dr, arg, sizeof(dr)))
1831 return -EFAULT;
1832
70f23020
AE
1833 hdev = hci_dev_get(dr.dev_id);
1834 if (!hdev)
1da177e4
LT
1835 return -ENODEV;
1836
d7a5a11d 1837 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1838 err = -EBUSY;
1839 goto done;
1840 }
1841
d7a5a11d 1842 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1843 err = -EOPNOTSUPP;
1844 goto done;
1845 }
1846
5b69bef5
MH
1847 if (hdev->dev_type != HCI_BREDR) {
1848 err = -EOPNOTSUPP;
1849 goto done;
1850 }
1851
d7a5a11d 1852 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1853 err = -EOPNOTSUPP;
1854 goto done;
1855 }
1856
1da177e4
LT
1857 switch (cmd) {
1858 case HCISETAUTH:
01178cd4
JH
1859 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1860 HCI_INIT_TIMEOUT);
1da177e4
LT
1861 break;
1862
1863 case HCISETENCRYPT:
1864 if (!lmp_encrypt_capable(hdev)) {
1865 err = -EOPNOTSUPP;
1866 break;
1867 }
1868
1869 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1870 /* Auth must be enabled first */
01178cd4
JH
1871 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1872 HCI_INIT_TIMEOUT);
1da177e4
LT
1873 if (err)
1874 break;
1875 }
1876
01178cd4
JH
1877 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1878 HCI_INIT_TIMEOUT);
1da177e4
LT
1879 break;
1880
1881 case HCISETSCAN:
01178cd4
JH
1882 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1883 HCI_INIT_TIMEOUT);
91a668b0 1884
bc6d2d04
JH
1885 /* Ensure that the connectable and discoverable states
1886 * get correctly modified as this was a non-mgmt change.
91a668b0 1887 */
123abc08
JH
1888 if (!err)
1889 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1890 break;
1891
1da177e4 1892 case HCISETLINKPOL:
01178cd4
JH
1893 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1894 HCI_INIT_TIMEOUT);
1da177e4
LT
1895 break;
1896
1897 case HCISETLINKMODE:
e4e8e37c
MH
1898 hdev->link_mode = ((__u16) dr.dev_opt) &
1899 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1900 break;
1901
1902 case HCISETPTYPE:
1903 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1904 break;
1905
1906 case HCISETACLMTU:
e4e8e37c
MH
1907 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1908 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1909 break;
1910
1911 case HCISETSCOMTU:
e4e8e37c
MH
1912 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1913 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1914 break;
1915
1916 default:
1917 err = -EINVAL;
1918 break;
1919 }
e4e8e37c 1920
0736cfa8 1921done:
1da177e4
LT
1922 hci_dev_put(hdev);
1923 return err;
1924}
1925
1926int hci_get_dev_list(void __user *arg)
1927{
8035ded4 1928 struct hci_dev *hdev;
1da177e4
LT
1929 struct hci_dev_list_req *dl;
1930 struct hci_dev_req *dr;
1da177e4
LT
1931 int n = 0, size, err;
1932 __u16 dev_num;
1933
1934 if (get_user(dev_num, (__u16 __user *) arg))
1935 return -EFAULT;
1936
1937 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1938 return -EINVAL;
1939
1940 size = sizeof(*dl) + dev_num * sizeof(*dr);
1941
70f23020
AE
1942 dl = kzalloc(size, GFP_KERNEL);
1943 if (!dl)
1da177e4
LT
1944 return -ENOMEM;
1945
1946 dr = dl->dev_req;
1947
f20d09d5 1948 read_lock(&hci_dev_list_lock);
8035ded4 1949 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 1950 unsigned long flags = hdev->flags;
c542a06c 1951
2e84d8db
MH
1952 /* When the auto-off is configured it means the transport
1953 * is running, but in that case still indicate that the
1954 * device is actually down.
1955 */
d7a5a11d 1956 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 1957 flags &= ~BIT(HCI_UP);
c542a06c 1958
1da177e4 1959 (dr + n)->dev_id = hdev->id;
2e84d8db 1960 (dr + n)->dev_opt = flags;
c542a06c 1961
1da177e4
LT
1962 if (++n >= dev_num)
1963 break;
1964 }
f20d09d5 1965 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1966
1967 dl->dev_num = n;
1968 size = sizeof(*dl) + n * sizeof(*dr);
1969
1970 err = copy_to_user(arg, dl, size);
1971 kfree(dl);
1972
1973 return err ? -EFAULT : 0;
1974}
1975
1976int hci_get_dev_info(void __user *arg)
1977{
1978 struct hci_dev *hdev;
1979 struct hci_dev_info di;
2e84d8db 1980 unsigned long flags;
1da177e4
LT
1981 int err = 0;
1982
1983 if (copy_from_user(&di, arg, sizeof(di)))
1984 return -EFAULT;
1985
70f23020
AE
1986 hdev = hci_dev_get(di.dev_id);
1987 if (!hdev)
1da177e4
LT
1988 return -ENODEV;
1989
2e84d8db
MH
1990 /* When the auto-off is configured it means the transport
1991 * is running, but in that case still indicate that the
1992 * device is actually down.
1993 */
d7a5a11d 1994 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
1995 flags = hdev->flags & ~BIT(HCI_UP);
1996 else
1997 flags = hdev->flags;
c542a06c 1998
1da177e4
LT
1999 strcpy(di.name, hdev->name);
2000 di.bdaddr = hdev->bdaddr;
60f2a3ed 2001 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2002 di.flags = flags;
1da177e4 2003 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2004 if (lmp_bredr_capable(hdev)) {
2005 di.acl_mtu = hdev->acl_mtu;
2006 di.acl_pkts = hdev->acl_pkts;
2007 di.sco_mtu = hdev->sco_mtu;
2008 di.sco_pkts = hdev->sco_pkts;
2009 } else {
2010 di.acl_mtu = hdev->le_mtu;
2011 di.acl_pkts = hdev->le_pkts;
2012 di.sco_mtu = 0;
2013 di.sco_pkts = 0;
2014 }
1da177e4
LT
2015 di.link_policy = hdev->link_policy;
2016 di.link_mode = hdev->link_mode;
2017
2018 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2019 memcpy(&di.features, &hdev->features, sizeof(di.features));
2020
2021 if (copy_to_user(arg, &di, sizeof(di)))
2022 err = -EFAULT;
2023
2024 hci_dev_put(hdev);
2025
2026 return err;
2027}
2028
2029/* ---- Interface to HCI drivers ---- */
2030
611b30f7
MH
2031static int hci_rfkill_set_block(void *data, bool blocked)
2032{
2033 struct hci_dev *hdev = data;
2034
2035 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2036
d7a5a11d 2037 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2038 return -EBUSY;
2039
5e130367 2040 if (blocked) {
a1536da2 2041 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2042 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2043 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2044 hci_dev_do_close(hdev);
5e130367 2045 } else {
a358dc11 2046 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2047 }
611b30f7
MH
2048
2049 return 0;
2050}
2051
2052static const struct rfkill_ops hci_rfkill_ops = {
2053 .set_block = hci_rfkill_set_block,
2054};
2055
ab81cbf9
JH
2056static void hci_power_on(struct work_struct *work)
2057{
2058 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2059 int err;
ab81cbf9
JH
2060
2061 BT_DBG("%s", hdev->name);
2062
cbed0ca1 2063 err = hci_dev_do_open(hdev);
96570ffc 2064 if (err < 0) {
3ad67582 2065 hci_dev_lock(hdev);
96570ffc 2066 mgmt_set_powered_failed(hdev, err);
3ad67582 2067 hci_dev_unlock(hdev);
ab81cbf9 2068 return;
96570ffc 2069 }
ab81cbf9 2070
a5c8f270
MH
2071 /* During the HCI setup phase, a few error conditions are
2072 * ignored and they need to be checked now. If they are still
2073 * valid, it is important to turn the device back off.
2074 */
d7a5a11d
MH
2075 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2076 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
a5c8f270
MH
2077 (hdev->dev_type == HCI_BREDR &&
2078 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2079 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2080 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2081 hci_dev_do_close(hdev);
d7a5a11d 2082 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2083 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2084 HCI_AUTO_OFF_TIMEOUT);
bf543036 2085 }
ab81cbf9 2086
a69d8927 2087 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2088 /* For unconfigured devices, set the HCI_RAW flag
2089 * so that userspace can easily identify them.
4a964404 2090 */
d7a5a11d 2091 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2092 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2093
2094 /* For fully configured devices, this will send
2095 * the Index Added event. For unconfigured devices,
2096 * it will send Unconfigued Index Added event.
2097 *
2098 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2099 * and no event will be send.
2100 */
2101 mgmt_index_added(hdev);
a69d8927 2102 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2103 /* When the controller is now configured, then it
2104 * is important to clear the HCI_RAW flag.
2105 */
d7a5a11d 2106 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2107 clear_bit(HCI_RAW, &hdev->flags);
2108
d603b76b
MH
2109 /* Powering on the controller with HCI_CONFIG set only
2110 * happens with the transition from unconfigured to
2111 * configured. This will send the Index Added event.
2112 */
744cf19e 2113 mgmt_index_added(hdev);
fee746b0 2114 }
ab81cbf9
JH
2115}
2116
2117static void hci_power_off(struct work_struct *work)
2118{
3243553f 2119 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2120 power_off.work);
ab81cbf9
JH
2121
2122 BT_DBG("%s", hdev->name);
2123
8ee56540 2124 hci_dev_do_close(hdev);
ab81cbf9
JH
2125}
2126
c7741d16
MH
2127static void hci_error_reset(struct work_struct *work)
2128{
2129 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2130
2131 BT_DBG("%s", hdev->name);
2132
2133 if (hdev->hw_error)
2134 hdev->hw_error(hdev, hdev->hw_error_code);
2135 else
2136 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2137 hdev->hw_error_code);
2138
2139 if (hci_dev_do_close(hdev))
2140 return;
2141
c7741d16
MH
2142 hci_dev_do_open(hdev);
2143}
2144
16ab91ab
JH
2145static void hci_discov_off(struct work_struct *work)
2146{
2147 struct hci_dev *hdev;
16ab91ab
JH
2148
2149 hdev = container_of(work, struct hci_dev, discov_off.work);
2150
2151 BT_DBG("%s", hdev->name);
2152
d1967ff8 2153 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2154}
2155
5d900e46
FG
2156static void hci_adv_timeout_expire(struct work_struct *work)
2157{
2158 struct hci_dev *hdev;
2159
2160 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2161
2162 BT_DBG("%s", hdev->name);
2163
2164 mgmt_adv_timeout_expired(hdev);
2165}
2166
35f7498a 2167void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2168{
4821002c 2169 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2170
4821002c
JH
2171 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2172 list_del(&uuid->list);
2aeb9a1a
JH
2173 kfree(uuid);
2174 }
2aeb9a1a
JH
2175}
2176
35f7498a 2177void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2178{
0378b597 2179 struct link_key *key;
55ed8ca1 2180
0378b597
JH
2181 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2182 list_del_rcu(&key->list);
2183 kfree_rcu(key, rcu);
55ed8ca1 2184 }
55ed8ca1
JH
2185}
2186
35f7498a 2187void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2188{
970d0f1b 2189 struct smp_ltk *k;
b899efaf 2190
970d0f1b
JH
2191 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2192 list_del_rcu(&k->list);
2193 kfree_rcu(k, rcu);
b899efaf 2194 }
b899efaf
VCG
2195}
2196
970c4e46
JH
2197void hci_smp_irks_clear(struct hci_dev *hdev)
2198{
adae20cb 2199 struct smp_irk *k;
970c4e46 2200
adae20cb
JH
2201 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2202 list_del_rcu(&k->list);
2203 kfree_rcu(k, rcu);
970c4e46
JH
2204 }
2205}
2206
55ed8ca1
JH
2207struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2208{
8035ded4 2209 struct link_key *k;
55ed8ca1 2210
0378b597
JH
2211 rcu_read_lock();
2212 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2213 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2214 rcu_read_unlock();
55ed8ca1 2215 return k;
0378b597
JH
2216 }
2217 }
2218 rcu_read_unlock();
55ed8ca1
JH
2219
2220 return NULL;
2221}
2222
745c0ce3 2223static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2224 u8 key_type, u8 old_key_type)
d25e28ab
JH
2225{
2226 /* Legacy key */
2227 if (key_type < 0x03)
745c0ce3 2228 return true;
d25e28ab
JH
2229
2230 /* Debug keys are insecure so don't store them persistently */
2231 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2232 return false;
d25e28ab
JH
2233
2234 /* Changed combination key and there's no previous one */
2235 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2236 return false;
d25e28ab
JH
2237
2238 /* Security mode 3 case */
2239 if (!conn)
745c0ce3 2240 return true;
d25e28ab 2241
e3befab9
JH
2242 /* BR/EDR key derived using SC from an LE link */
2243 if (conn->type == LE_LINK)
2244 return true;
2245
d25e28ab
JH
2246 /* Neither local nor remote side had no-bonding as requirement */
2247 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2248 return true;
d25e28ab
JH
2249
2250 /* Local side had dedicated bonding as requirement */
2251 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2252 return true;
d25e28ab
JH
2253
2254 /* Remote side had dedicated bonding as requirement */
2255 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2256 return true;
d25e28ab
JH
2257
2258 /* If none of the above criteria match, then don't store the key
2259 * persistently */
745c0ce3 2260 return false;
d25e28ab
JH
2261}
2262
e804d25d 2263static u8 ltk_role(u8 type)
98a0b845 2264{
e804d25d
JH
2265 if (type == SMP_LTK)
2266 return HCI_ROLE_MASTER;
98a0b845 2267
e804d25d 2268 return HCI_ROLE_SLAVE;
98a0b845
JH
2269}
2270
f3a73d97
JH
2271struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2272 u8 addr_type, u8 role)
75d262c2 2273{
c9839a11 2274 struct smp_ltk *k;
75d262c2 2275
970d0f1b
JH
2276 rcu_read_lock();
2277 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2278 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2279 continue;
2280
923e2414 2281 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2282 rcu_read_unlock();
75d262c2 2283 return k;
970d0f1b
JH
2284 }
2285 }
2286 rcu_read_unlock();
75d262c2
VCG
2287
2288 return NULL;
2289}
75d262c2 2290
970c4e46
JH
2291struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2292{
2293 struct smp_irk *irk;
2294
adae20cb
JH
2295 rcu_read_lock();
2296 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2297 if (!bacmp(&irk->rpa, rpa)) {
2298 rcu_read_unlock();
970c4e46 2299 return irk;
adae20cb 2300 }
970c4e46
JH
2301 }
2302
adae20cb 2303 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2304 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2305 bacpy(&irk->rpa, rpa);
adae20cb 2306 rcu_read_unlock();
970c4e46
JH
2307 return irk;
2308 }
2309 }
adae20cb 2310 rcu_read_unlock();
970c4e46
JH
2311
2312 return NULL;
2313}
2314
2315struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2316 u8 addr_type)
2317{
2318 struct smp_irk *irk;
2319
6cfc9988
JH
2320 /* Identity Address must be public or static random */
2321 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2322 return NULL;
2323
adae20cb
JH
2324 rcu_read_lock();
2325 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2326 if (addr_type == irk->addr_type &&
adae20cb
JH
2327 bacmp(bdaddr, &irk->bdaddr) == 0) {
2328 rcu_read_unlock();
970c4e46 2329 return irk;
adae20cb 2330 }
970c4e46 2331 }
adae20cb 2332 rcu_read_unlock();
970c4e46
JH
2333
2334 return NULL;
2335}
2336
567fa2aa 2337struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2338 bdaddr_t *bdaddr, u8 *val, u8 type,
2339 u8 pin_len, bool *persistent)
55ed8ca1
JH
2340{
2341 struct link_key *key, *old_key;
745c0ce3 2342 u8 old_key_type;
55ed8ca1
JH
2343
2344 old_key = hci_find_link_key(hdev, bdaddr);
2345 if (old_key) {
2346 old_key_type = old_key->type;
2347 key = old_key;
2348 } else {
12adcf3a 2349 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2350 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2351 if (!key)
567fa2aa 2352 return NULL;
0378b597 2353 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2354 }
2355
6ed93dc6 2356 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2357
d25e28ab
JH
2358 /* Some buggy controller combinations generate a changed
2359 * combination key for legacy pairing even when there's no
2360 * previous key */
2361 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2362 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2363 type = HCI_LK_COMBINATION;
655fe6ec
JH
2364 if (conn)
2365 conn->key_type = type;
2366 }
d25e28ab 2367
55ed8ca1 2368 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2369 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2370 key->pin_len = pin_len;
2371
b6020ba0 2372 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2373 key->type = old_key_type;
4748fed2
JH
2374 else
2375 key->type = type;
2376
7652ff6a
JH
2377 if (persistent)
2378 *persistent = hci_persistent_key(hdev, conn, type,
2379 old_key_type);
4df378a1 2380
567fa2aa 2381 return key;
55ed8ca1
JH
2382}
2383
ca9142b8 2384struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2385 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2386 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2387{
c9839a11 2388 struct smp_ltk *key, *old_key;
e804d25d 2389 u8 role = ltk_role(type);
75d262c2 2390
f3a73d97 2391 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2392 if (old_key)
75d262c2 2393 key = old_key;
c9839a11 2394 else {
0a14ab41 2395 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2396 if (!key)
ca9142b8 2397 return NULL;
970d0f1b 2398 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2399 }
2400
75d262c2 2401 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2402 key->bdaddr_type = addr_type;
2403 memcpy(key->val, tk, sizeof(key->val));
2404 key->authenticated = authenticated;
2405 key->ediv = ediv;
fe39c7b2 2406 key->rand = rand;
c9839a11
VCG
2407 key->enc_size = enc_size;
2408 key->type = type;
75d262c2 2409
ca9142b8 2410 return key;
75d262c2
VCG
2411}
2412
ca9142b8
JH
2413struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2414 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2415{
2416 struct smp_irk *irk;
2417
2418 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2419 if (!irk) {
2420 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2421 if (!irk)
ca9142b8 2422 return NULL;
970c4e46
JH
2423
2424 bacpy(&irk->bdaddr, bdaddr);
2425 irk->addr_type = addr_type;
2426
adae20cb 2427 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2428 }
2429
2430 memcpy(irk->val, val, 16);
2431 bacpy(&irk->rpa, rpa);
2432
ca9142b8 2433 return irk;
970c4e46
JH
2434}
2435
55ed8ca1
JH
2436int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2437{
2438 struct link_key *key;
2439
2440 key = hci_find_link_key(hdev, bdaddr);
2441 if (!key)
2442 return -ENOENT;
2443
6ed93dc6 2444 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2445
0378b597
JH
2446 list_del_rcu(&key->list);
2447 kfree_rcu(key, rcu);
55ed8ca1
JH
2448
2449 return 0;
2450}
2451
e0b2b27e 2452int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2453{
970d0f1b 2454 struct smp_ltk *k;
c51ffa0b 2455 int removed = 0;
b899efaf 2456
970d0f1b 2457 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2458 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2459 continue;
2460
6ed93dc6 2461 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2462
970d0f1b
JH
2463 list_del_rcu(&k->list);
2464 kfree_rcu(k, rcu);
c51ffa0b 2465 removed++;
b899efaf
VCG
2466 }
2467
c51ffa0b 2468 return removed ? 0 : -ENOENT;
b899efaf
VCG
2469}
2470
a7ec7338
JH
2471void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2472{
adae20cb 2473 struct smp_irk *k;
a7ec7338 2474
adae20cb 2475 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2476 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2477 continue;
2478
2479 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2480
adae20cb
JH
2481 list_del_rcu(&k->list);
2482 kfree_rcu(k, rcu);
a7ec7338
JH
2483 }
2484}
2485
55e76b38
JH
2486bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2487{
2488 struct smp_ltk *k;
4ba9faf3 2489 struct smp_irk *irk;
55e76b38
JH
2490 u8 addr_type;
2491
2492 if (type == BDADDR_BREDR) {
2493 if (hci_find_link_key(hdev, bdaddr))
2494 return true;
2495 return false;
2496 }
2497
2498 /* Convert to HCI addr type which struct smp_ltk uses */
2499 if (type == BDADDR_LE_PUBLIC)
2500 addr_type = ADDR_LE_DEV_PUBLIC;
2501 else
2502 addr_type = ADDR_LE_DEV_RANDOM;
2503
4ba9faf3
JH
2504 irk = hci_get_irk(hdev, bdaddr, addr_type);
2505 if (irk) {
2506 bdaddr = &irk->bdaddr;
2507 addr_type = irk->addr_type;
2508 }
2509
55e76b38
JH
2510 rcu_read_lock();
2511 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2512 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2513 rcu_read_unlock();
55e76b38 2514 return true;
87c8b28d 2515 }
55e76b38
JH
2516 }
2517 rcu_read_unlock();
2518
2519 return false;
2520}
2521
6bd32326 2522/* HCI command timer function */
65cc2b49 2523static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2524{
65cc2b49
MH
2525 struct hci_dev *hdev = container_of(work, struct hci_dev,
2526 cmd_timer.work);
6bd32326 2527
bda4f23a
AE
2528 if (hdev->sent_cmd) {
2529 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2530 u16 opcode = __le16_to_cpu(sent->opcode);
2531
2532 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2533 } else {
2534 BT_ERR("%s command tx timeout", hdev->name);
2535 }
2536
6bd32326 2537 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2538 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2539}
2540
2763eda6 2541struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2542 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2543{
2544 struct oob_data *data;
2545
6928a924
JH
2546 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2547 if (bacmp(bdaddr, &data->bdaddr) != 0)
2548 continue;
2549 if (data->bdaddr_type != bdaddr_type)
2550 continue;
2551 return data;
2552 }
2763eda6
SJ
2553
2554 return NULL;
2555}
2556
6928a924
JH
2557int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2558 u8 bdaddr_type)
2763eda6
SJ
2559{
2560 struct oob_data *data;
2561
6928a924 2562 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2563 if (!data)
2564 return -ENOENT;
2565
6928a924 2566 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2567
2568 list_del(&data->list);
2569 kfree(data);
2570
2571 return 0;
2572}
2573
35f7498a 2574void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2575{
2576 struct oob_data *data, *n;
2577
2578 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2579 list_del(&data->list);
2580 kfree(data);
2581 }
2763eda6
SJ
2582}
2583
0798872e 2584int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2585 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2586 u8 *hash256, u8 *rand256)
2763eda6
SJ
2587{
2588 struct oob_data *data;
2589
6928a924 2590 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2591 if (!data) {
0a14ab41 2592 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2593 if (!data)
2594 return -ENOMEM;
2595
2596 bacpy(&data->bdaddr, bdaddr);
6928a924 2597 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2598 list_add(&data->list, &hdev->remote_oob_data);
2599 }
2600
81328d5c
JH
2601 if (hash192 && rand192) {
2602 memcpy(data->hash192, hash192, sizeof(data->hash192));
2603 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2604 if (hash256 && rand256)
2605 data->present = 0x03;
81328d5c
JH
2606 } else {
2607 memset(data->hash192, 0, sizeof(data->hash192));
2608 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2609 if (hash256 && rand256)
2610 data->present = 0x02;
2611 else
2612 data->present = 0x00;
0798872e
MH
2613 }
2614
81328d5c
JH
2615 if (hash256 && rand256) {
2616 memcpy(data->hash256, hash256, sizeof(data->hash256));
2617 memcpy(data->rand256, rand256, sizeof(data->rand256));
2618 } else {
2619 memset(data->hash256, 0, sizeof(data->hash256));
2620 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2621 if (hash192 && rand192)
2622 data->present = 0x01;
81328d5c 2623 }
0798872e 2624
6ed93dc6 2625 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2626
2627 return 0;
2628}
2629
d2609b34
FG
2630/* This function requires the caller holds hdev->lock */
2631struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2632{
2633 struct adv_info *adv_instance;
2634
2635 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2636 if (adv_instance->instance == instance)
2637 return adv_instance;
2638 }
2639
2640 return NULL;
2641}
2642
2643/* This function requires the caller holds hdev->lock */
2644struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2645 struct adv_info *cur_instance;
2646
2647 cur_instance = hci_find_adv_instance(hdev, instance);
2648 if (!cur_instance)
2649 return NULL;
2650
2651 if (cur_instance == list_last_entry(&hdev->adv_instances,
2652 struct adv_info, list))
2653 return list_first_entry(&hdev->adv_instances,
2654 struct adv_info, list);
2655 else
2656 return list_next_entry(cur_instance, list);
2657}
2658
2659/* This function requires the caller holds hdev->lock */
2660int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2661{
2662 struct adv_info *adv_instance;
2663
2664 adv_instance = hci_find_adv_instance(hdev, instance);
2665 if (!adv_instance)
2666 return -ENOENT;
2667
2668 BT_DBG("%s removing %dMR", hdev->name, instance);
2669
5d900e46
FG
2670 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2671 cancel_delayed_work(&hdev->adv_instance_expire);
2672 hdev->adv_instance_timeout = 0;
2673 }
2674
d2609b34
FG
2675 list_del(&adv_instance->list);
2676 kfree(adv_instance);
2677
2678 hdev->adv_instance_cnt--;
2679
2680 return 0;
2681}
2682
2683/* This function requires the caller holds hdev->lock */
2684void hci_adv_instances_clear(struct hci_dev *hdev)
2685{
2686 struct adv_info *adv_instance, *n;
2687
5d900e46
FG
2688 if (hdev->adv_instance_timeout) {
2689 cancel_delayed_work(&hdev->adv_instance_expire);
2690 hdev->adv_instance_timeout = 0;
2691 }
2692
d2609b34
FG
2693 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2694 list_del(&adv_instance->list);
2695 kfree(adv_instance);
2696 }
2697
2698 hdev->adv_instance_cnt = 0;
2699}
2700
2701/* This function requires the caller holds hdev->lock */
2702int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2703 u16 adv_data_len, u8 *adv_data,
2704 u16 scan_rsp_len, u8 *scan_rsp_data,
2705 u16 timeout, u16 duration)
2706{
2707 struct adv_info *adv_instance;
2708
2709 adv_instance = hci_find_adv_instance(hdev, instance);
2710 if (adv_instance) {
2711 memset(adv_instance->adv_data, 0,
2712 sizeof(adv_instance->adv_data));
2713 memset(adv_instance->scan_rsp_data, 0,
2714 sizeof(adv_instance->scan_rsp_data));
2715 } else {
2716 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2717 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2718 return -EOVERFLOW;
2719
39ecfad6 2720 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2721 if (!adv_instance)
2722 return -ENOMEM;
2723
fffd38bc 2724 adv_instance->pending = true;
d2609b34
FG
2725 adv_instance->instance = instance;
2726 list_add(&adv_instance->list, &hdev->adv_instances);
2727 hdev->adv_instance_cnt++;
2728 }
2729
2730 adv_instance->flags = flags;
2731 adv_instance->adv_data_len = adv_data_len;
2732 adv_instance->scan_rsp_len = scan_rsp_len;
2733
2734 if (adv_data_len)
2735 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2736
2737 if (scan_rsp_len)
2738 memcpy(adv_instance->scan_rsp_data,
2739 scan_rsp_data, scan_rsp_len);
2740
2741 adv_instance->timeout = timeout;
5d900e46 2742 adv_instance->remaining_time = timeout;
d2609b34
FG
2743
2744 if (duration == 0)
2745 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2746 else
2747 adv_instance->duration = duration;
2748
2749 BT_DBG("%s for %dMR", hdev->name, instance);
2750
2751 return 0;
2752}
2753
dcc36c16 2754struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2755 bdaddr_t *bdaddr, u8 type)
b2a66aad 2756{
8035ded4 2757 struct bdaddr_list *b;
b2a66aad 2758
dcc36c16 2759 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2760 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2761 return b;
b9ee0a78 2762 }
b2a66aad
AJ
2763
2764 return NULL;
2765}
2766
dcc36c16 2767void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
2768{
2769 struct list_head *p, *n;
2770
dcc36c16 2771 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 2772 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2773
2774 list_del(p);
2775 kfree(b);
2776 }
b2a66aad
AJ
2777}
2778
dcc36c16 2779int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2780{
2781 struct bdaddr_list *entry;
b2a66aad 2782
b9ee0a78 2783 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2784 return -EBADF;
2785
dcc36c16 2786 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2787 return -EEXIST;
b2a66aad 2788
27f70f3e 2789 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2790 if (!entry)
2791 return -ENOMEM;
b2a66aad
AJ
2792
2793 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2794 entry->bdaddr_type = type;
b2a66aad 2795
dcc36c16 2796 list_add(&entry->list, list);
b2a66aad 2797
2a8357f2 2798 return 0;
b2a66aad
AJ
2799}
2800
dcc36c16 2801int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2802{
2803 struct bdaddr_list *entry;
b2a66aad 2804
35f7498a 2805 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2806 hci_bdaddr_list_clear(list);
35f7498a
JH
2807 return 0;
2808 }
b2a66aad 2809
dcc36c16 2810 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2811 if (!entry)
2812 return -ENOENT;
2813
2814 list_del(&entry->list);
2815 kfree(entry);
2816
2817 return 0;
2818}
2819
15819a70
AG
2820/* This function requires the caller holds hdev->lock */
2821struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2822 bdaddr_t *addr, u8 addr_type)
2823{
2824 struct hci_conn_params *params;
2825
2826 list_for_each_entry(params, &hdev->le_conn_params, list) {
2827 if (bacmp(&params->addr, addr) == 0 &&
2828 params->addr_type == addr_type) {
2829 return params;
2830 }
2831 }
2832
2833 return NULL;
2834}
2835
4b10966f 2836/* This function requires the caller holds hdev->lock */
501f8827
JH
2837struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2838 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2839{
912b42ef 2840 struct hci_conn_params *param;
a9b0a04c 2841
501f8827 2842 list_for_each_entry(param, list, action) {
912b42ef
JH
2843 if (bacmp(&param->addr, addr) == 0 &&
2844 param->addr_type == addr_type)
2845 return param;
4b10966f
MH
2846 }
2847
2848 return NULL;
a9b0a04c
AG
2849}
2850
f75113a2
JP
2851/* This function requires the caller holds hdev->lock */
2852struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
2853 bdaddr_t *addr,
2854 u8 addr_type)
2855{
2856 struct hci_conn_params *param;
2857
2858 list_for_each_entry(param, &hdev->pend_le_conns, action) {
2859 if (bacmp(&param->addr, addr) == 0 &&
2860 param->addr_type == addr_type &&
2861 param->explicit_connect)
2862 return param;
2863 }
2864
2865 list_for_each_entry(param, &hdev->pend_le_reports, action) {
2866 if (bacmp(&param->addr, addr) == 0 &&
2867 param->addr_type == addr_type &&
2868 param->explicit_connect)
2869 return param;
2870 }
2871
2872 return NULL;
2873}
2874
15819a70 2875/* This function requires the caller holds hdev->lock */
51d167c0
MH
2876struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2877 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2878{
2879 struct hci_conn_params *params;
2880
2881 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2882 if (params)
51d167c0 2883 return params;
15819a70
AG
2884
2885 params = kzalloc(sizeof(*params), GFP_KERNEL);
2886 if (!params) {
2887 BT_ERR("Out of memory");
51d167c0 2888 return NULL;
15819a70
AG
2889 }
2890
2891 bacpy(&params->addr, addr);
2892 params->addr_type = addr_type;
cef952ce
AG
2893
2894 list_add(&params->list, &hdev->le_conn_params);
93450c75 2895 INIT_LIST_HEAD(&params->action);
cef952ce 2896
bf5b3c8b
MH
2897 params->conn_min_interval = hdev->le_conn_min_interval;
2898 params->conn_max_interval = hdev->le_conn_max_interval;
2899 params->conn_latency = hdev->le_conn_latency;
2900 params->supervision_timeout = hdev->le_supv_timeout;
2901 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2902
2903 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2904
51d167c0 2905 return params;
bf5b3c8b
MH
2906}
2907
f6c63249 2908static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2909{
f8aaf9b6 2910 if (params->conn) {
f161dd41 2911 hci_conn_drop(params->conn);
f8aaf9b6
JH
2912 hci_conn_put(params->conn);
2913 }
f161dd41 2914
95305baa 2915 list_del(&params->action);
15819a70
AG
2916 list_del(&params->list);
2917 kfree(params);
f6c63249
JH
2918}
2919
2920/* This function requires the caller holds hdev->lock */
2921void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2922{
2923 struct hci_conn_params *params;
2924
2925 params = hci_conn_params_lookup(hdev, addr, addr_type);
2926 if (!params)
2927 return;
2928
2929 hci_conn_params_free(params);
15819a70 2930
95305baa
JH
2931 hci_update_background_scan(hdev);
2932
15819a70
AG
2933 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2934}
2935
2936/* This function requires the caller holds hdev->lock */
55af49a8 2937void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
2938{
2939 struct hci_conn_params *params, *tmp;
2940
2941 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
2942 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2943 continue;
f75113a2
JP
2944
2945 /* If trying to estabilish one time connection to disabled
2946 * device, leave the params, but mark them as just once.
2947 */
2948 if (params->explicit_connect) {
2949 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2950 continue;
2951 }
2952
15819a70
AG
2953 list_del(&params->list);
2954 kfree(params);
2955 }
2956
55af49a8 2957 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
2958}
2959
2960/* This function requires the caller holds hdev->lock */
373110c5 2961void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 2962{
15819a70 2963 struct hci_conn_params *params, *tmp;
77a77a30 2964
f6c63249
JH
2965 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2966 hci_conn_params_free(params);
77a77a30 2967
a4790dbd 2968 hci_update_background_scan(hdev);
77a77a30 2969
15819a70 2970 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
2971}
2972
1904a853 2973static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7ba8b4be 2974{
4c87eaab
AG
2975 if (status) {
2976 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2977
4c87eaab
AG
2978 hci_dev_lock(hdev);
2979 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2980 hci_dev_unlock(hdev);
2981 return;
2982 }
7ba8b4be
AG
2983}
2984
1904a853
MH
2985static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2986 u16 opcode)
7ba8b4be 2987{
4c87eaab
AG
2988 /* General inquiry access code (GIAC) */
2989 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4c87eaab 2990 struct hci_cp_inquiry cp;
7ba8b4be
AG
2991 int err;
2992
4c87eaab
AG
2993 if (status) {
2994 BT_ERR("Failed to disable LE scanning: status %d", status);
2995 return;
2996 }
7ba8b4be 2997
2d28cfe7
JP
2998 hdev->discovery.scan_start = 0;
2999
4c87eaab
AG
3000 switch (hdev->discovery.type) {
3001 case DISCOV_TYPE_LE:
3002 hci_dev_lock(hdev);
3003 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3004 hci_dev_unlock(hdev);
3005 break;
7ba8b4be 3006
4c87eaab 3007 case DISCOV_TYPE_INTERLEAVED:
4c87eaab 3008 hci_dev_lock(hdev);
7dbfac1d 3009
07d2334a
JP
3010 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3011 &hdev->quirks)) {
3012 /* If we were running LE only scan, change discovery
3013 * state. If we were running both LE and BR/EDR inquiry
3014 * simultaneously, and BR/EDR inquiry is already
3015 * finished, stop discovery, otherwise BR/EDR inquiry
177d0506
WK
3016 * will stop discovery when finished. If we will resolve
3017 * remote device name, do not change discovery state.
07d2334a 3018 */
177d0506
WK
3019 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3020 hdev->discovery.state != DISCOVERY_RESOLVING)
07d2334a
JP
3021 hci_discovery_set_state(hdev,
3022 DISCOVERY_STOPPED);
3023 } else {
baf880a9
JH
3024 struct hci_request req;
3025
07d2334a
JP
3026 hci_inquiry_cache_flush(hdev);
3027
baf880a9
JH
3028 hci_req_init(&req, hdev);
3029
3030 memset(&cp, 0, sizeof(cp));
3031 memcpy(&cp.lap, lap, sizeof(cp.lap));
3032 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3033 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3034
07d2334a
JP
3035 err = hci_req_run(&req, inquiry_complete);
3036 if (err) {
3037 BT_ERR("Inquiry request failed: err %d", err);
3038 hci_discovery_set_state(hdev,
3039 DISCOVERY_STOPPED);
3040 }
4c87eaab 3041 }
7dbfac1d 3042
4c87eaab
AG
3043 hci_dev_unlock(hdev);
3044 break;
7dbfac1d 3045 }
7dbfac1d
AG
3046}
3047
7ba8b4be
AG
3048static void le_scan_disable_work(struct work_struct *work)
3049{
3050 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3051 le_scan_disable.work);
4c87eaab
AG
3052 struct hci_request req;
3053 int err;
7ba8b4be
AG
3054
3055 BT_DBG("%s", hdev->name);
3056
2d28cfe7
JP
3057 cancel_delayed_work_sync(&hdev->le_scan_restart);
3058
4c87eaab 3059 hci_req_init(&req, hdev);
28b75a89 3060
b1efcc28 3061 hci_req_add_le_scan_disable(&req);
28b75a89 3062
4c87eaab
AG
3063 err = hci_req_run(&req, le_scan_disable_work_complete);
3064 if (err)
3065 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3066}
3067
2d28cfe7
JP
3068static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3069 u16 opcode)
3070{
3071 unsigned long timeout, duration, scan_start, now;
3072
3073 BT_DBG("%s", hdev->name);
3074
3075 if (status) {
3076 BT_ERR("Failed to restart LE scan: status %d", status);
3077 return;
3078 }
3079
3080 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3081 !hdev->discovery.scan_start)
3082 return;
3083
3084 /* When the scan was started, hdev->le_scan_disable has been queued
3085 * after duration from scan_start. During scan restart this job
3086 * has been canceled, and we need to queue it again after proper
3087 * timeout, to make sure that scan does not run indefinitely.
3088 */
3089 duration = hdev->discovery.scan_duration;
3090 scan_start = hdev->discovery.scan_start;
3091 now = jiffies;
3092 if (now - scan_start <= duration) {
3093 int elapsed;
3094
3095 if (now >= scan_start)
3096 elapsed = now - scan_start;
3097 else
3098 elapsed = ULONG_MAX - scan_start + now;
3099
3100 timeout = duration - elapsed;
3101 } else {
3102 timeout = 0;
3103 }
3104 queue_delayed_work(hdev->workqueue,
3105 &hdev->le_scan_disable, timeout);
3106}
3107
3108static void le_scan_restart_work(struct work_struct *work)
3109{
3110 struct hci_dev *hdev = container_of(work, struct hci_dev,
3111 le_scan_restart.work);
3112 struct hci_request req;
3113 struct hci_cp_le_set_scan_enable cp;
3114 int err;
3115
3116 BT_DBG("%s", hdev->name);
3117
3118 /* If controller is not scanning we are done. */
d7a5a11d 3119 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2d28cfe7
JP
3120 return;
3121
3122 hci_req_init(&req, hdev);
3123
3124 hci_req_add_le_scan_disable(&req);
3125
3126 memset(&cp, 0, sizeof(cp));
3127 cp.enable = LE_SCAN_ENABLE;
3128 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3129 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3130
3131 err = hci_req_run(&req, le_scan_restart_work_complete);
3132 if (err)
3133 BT_ERR("Restart LE scan request failed: err %d", err);
3134}
3135
a1f4c318
JH
3136/* Copy the Identity Address of the controller.
3137 *
3138 * If the controller has a public BD_ADDR, then by default use that one.
3139 * If this is a LE only controller without a public address, default to
3140 * the static random address.
3141 *
3142 * For debugging purposes it is possible to force controllers with a
3143 * public address to use the static random address instead.
50b5b952
MH
3144 *
3145 * In case BR/EDR has been disabled on a dual-mode controller and
3146 * userspace has configured a static address, then that address
3147 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
3148 */
3149void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3150 u8 *bdaddr_type)
3151{
b7cb93e5 3152 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 3153 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 3154 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 3155 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
3156 bacpy(bdaddr, &hdev->static_addr);
3157 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3158 } else {
3159 bacpy(bdaddr, &hdev->bdaddr);
3160 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3161 }
3162}
3163
9be0dab7
DH
3164/* Alloc HCI device */
3165struct hci_dev *hci_alloc_dev(void)
3166{
3167 struct hci_dev *hdev;
3168
27f70f3e 3169 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3170 if (!hdev)
3171 return NULL;
3172
b1b813d4
DH
3173 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3174 hdev->esco_type = (ESCO_HV1);
3175 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3176 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3177 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3178 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3179 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3180 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
3181 hdev->adv_instance_cnt = 0;
3182 hdev->cur_adv_instance = 0x00;
5d900e46 3183 hdev->adv_instance_timeout = 0;
b1b813d4 3184
b1b813d4
DH
3185 hdev->sniff_max_interval = 800;
3186 hdev->sniff_min_interval = 80;
3187
3f959d46 3188 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3189 hdev->le_adv_min_interval = 0x0800;
3190 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3191 hdev->le_scan_interval = 0x0060;
3192 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3193 hdev->le_conn_min_interval = 0x0028;
3194 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3195 hdev->le_conn_latency = 0x0000;
3196 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3197 hdev->le_def_tx_len = 0x001b;
3198 hdev->le_def_tx_time = 0x0148;
3199 hdev->le_max_tx_len = 0x001b;
3200 hdev->le_max_tx_time = 0x0148;
3201 hdev->le_max_rx_len = 0x001b;
3202 hdev->le_max_rx_time = 0x0148;
bef64738 3203
d6bfd59c 3204 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3205 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3206 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3207 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3208
b1b813d4
DH
3209 mutex_init(&hdev->lock);
3210 mutex_init(&hdev->req_lock);
3211
3212 INIT_LIST_HEAD(&hdev->mgmt_pending);
3213 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3214 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3215 INIT_LIST_HEAD(&hdev->uuids);
3216 INIT_LIST_HEAD(&hdev->link_keys);
3217 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3218 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3219 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3220 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3221 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3222 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3223 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3224 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3225 INIT_LIST_HEAD(&hdev->adv_instances);
b1b813d4
DH
3226
3227 INIT_WORK(&hdev->rx_work, hci_rx_work);
3228 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3229 INIT_WORK(&hdev->tx_work, hci_tx_work);
3230 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3231 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 3232
b1b813d4
DH
3233 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3234 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3235 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2d28cfe7 3236 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
5d900e46 3237 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
b1b813d4 3238
b1b813d4
DH
3239 skb_queue_head_init(&hdev->rx_q);
3240 skb_queue_head_init(&hdev->cmd_q);
3241 skb_queue_head_init(&hdev->raw_q);
3242
3243 init_waitqueue_head(&hdev->req_wait_q);
3244
65cc2b49 3245 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3246
b1b813d4
DH
3247 hci_init_sysfs(hdev);
3248 discovery_init(hdev);
9be0dab7
DH
3249
3250 return hdev;
3251}
3252EXPORT_SYMBOL(hci_alloc_dev);
3253
3254/* Free HCI device */
3255void hci_free_dev(struct hci_dev *hdev)
3256{
9be0dab7
DH
3257 /* will free via device release */
3258 put_device(&hdev->dev);
3259}
3260EXPORT_SYMBOL(hci_free_dev);
3261
1da177e4
LT
3262/* Register HCI device */
3263int hci_register_dev(struct hci_dev *hdev)
3264{
b1b813d4 3265 int id, error;
1da177e4 3266
74292d5a 3267 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3268 return -EINVAL;
3269
08add513
MM
3270 /* Do not allow HCI_AMP devices to register at index 0,
3271 * so the index can be used as the AMP controller ID.
3272 */
3df92b31
SL
3273 switch (hdev->dev_type) {
3274 case HCI_BREDR:
3275 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3276 break;
3277 case HCI_AMP:
3278 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3279 break;
3280 default:
3281 return -EINVAL;
1da177e4 3282 }
8e87d142 3283
3df92b31
SL
3284 if (id < 0)
3285 return id;
3286
1da177e4
LT
3287 sprintf(hdev->name, "hci%d", id);
3288 hdev->id = id;
2d8b3a11
AE
3289
3290 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3291
d8537548
KC
3292 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3293 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3294 if (!hdev->workqueue) {
3295 error = -ENOMEM;
3296 goto err;
3297 }
f48fd9c8 3298
d8537548
KC
3299 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3300 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3301 if (!hdev->req_workqueue) {
3302 destroy_workqueue(hdev->workqueue);
3303 error = -ENOMEM;
3304 goto err;
3305 }
3306
0153e2ec
MH
3307 if (!IS_ERR_OR_NULL(bt_debugfs))
3308 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3309
bdc3e0f1
MH
3310 dev_set_name(&hdev->dev, "%s", hdev->name);
3311
3312 error = device_add(&hdev->dev);
33ca954d 3313 if (error < 0)
54506918 3314 goto err_wqueue;
1da177e4 3315
611b30f7 3316 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3317 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3318 hdev);
611b30f7
MH
3319 if (hdev->rfkill) {
3320 if (rfkill_register(hdev->rfkill) < 0) {
3321 rfkill_destroy(hdev->rfkill);
3322 hdev->rfkill = NULL;
3323 }
3324 }
3325
5e130367 3326 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3327 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3328
a1536da2
MH
3329 hci_dev_set_flag(hdev, HCI_SETUP);
3330 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3331
01cd3404 3332 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3333 /* Assume BR/EDR support until proven otherwise (such as
3334 * through reading supported features during init.
3335 */
a1536da2 3336 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3337 }
ce2be9ac 3338
fcee3377
GP
3339 write_lock(&hci_dev_list_lock);
3340 list_add(&hdev->list, &hci_dev_list);
3341 write_unlock(&hci_dev_list_lock);
3342
4a964404
MH
3343 /* Devices that are marked for raw-only usage are unconfigured
3344 * and should not be included in normal operation.
fee746b0
MH
3345 */
3346 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3347 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3348
1da177e4 3349 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3350 hci_dev_hold(hdev);
1da177e4 3351
19202573 3352 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3353
1da177e4 3354 return id;
f48fd9c8 3355
33ca954d
DH
3356err_wqueue:
3357 destroy_workqueue(hdev->workqueue);
6ead1bbc 3358 destroy_workqueue(hdev->req_workqueue);
33ca954d 3359err:
3df92b31 3360 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3361
33ca954d 3362 return error;
1da177e4
LT
3363}
3364EXPORT_SYMBOL(hci_register_dev);
3365
3366/* Unregister HCI device */
59735631 3367void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3368{
2d7cc19e 3369 int id;
ef222013 3370
c13854ce 3371 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3372
a1536da2 3373 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3374
3df92b31
SL
3375 id = hdev->id;
3376
f20d09d5 3377 write_lock(&hci_dev_list_lock);
1da177e4 3378 list_del(&hdev->list);
f20d09d5 3379 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3380
3381 hci_dev_do_close(hdev);
3382
b9b5ef18
GP
3383 cancel_work_sync(&hdev->power_on);
3384
ab81cbf9 3385 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3386 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3387 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3388 hci_dev_lock(hdev);
744cf19e 3389 mgmt_index_removed(hdev);
09fd0de5 3390 hci_dev_unlock(hdev);
56e5cb86 3391 }
ab81cbf9 3392
2e58ef3e
JH
3393 /* mgmt_index_removed should take care of emptying the
3394 * pending list */
3395 BUG_ON(!list_empty(&hdev->mgmt_pending));
3396
1da177e4
LT
3397 hci_notify(hdev, HCI_DEV_UNREG);
3398
611b30f7
MH
3399 if (hdev->rfkill) {
3400 rfkill_unregister(hdev->rfkill);
3401 rfkill_destroy(hdev->rfkill);
3402 }
3403
bdc3e0f1 3404 device_del(&hdev->dev);
147e2d59 3405
0153e2ec
MH
3406 debugfs_remove_recursive(hdev->debugfs);
3407
f48fd9c8 3408 destroy_workqueue(hdev->workqueue);
6ead1bbc 3409 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3410
09fd0de5 3411 hci_dev_lock(hdev);
dcc36c16 3412 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3413 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3414 hci_uuids_clear(hdev);
55ed8ca1 3415 hci_link_keys_clear(hdev);
b899efaf 3416 hci_smp_ltks_clear(hdev);
970c4e46 3417 hci_smp_irks_clear(hdev);
2763eda6 3418 hci_remote_oob_data_clear(hdev);
d2609b34 3419 hci_adv_instances_clear(hdev);
dcc36c16 3420 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 3421 hci_conn_params_clear_all(hdev);
22078800 3422 hci_discovery_filter_clear(hdev);
09fd0de5 3423 hci_dev_unlock(hdev);
e2e0cacb 3424
dc946bd8 3425 hci_dev_put(hdev);
3df92b31
SL
3426
3427 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3428}
3429EXPORT_SYMBOL(hci_unregister_dev);
3430
3431/* Suspend HCI device */
3432int hci_suspend_dev(struct hci_dev *hdev)
3433{
3434 hci_notify(hdev, HCI_DEV_SUSPEND);
3435 return 0;
3436}
3437EXPORT_SYMBOL(hci_suspend_dev);
3438
3439/* Resume HCI device */
3440int hci_resume_dev(struct hci_dev *hdev)
3441{
3442 hci_notify(hdev, HCI_DEV_RESUME);
3443 return 0;
3444}
3445EXPORT_SYMBOL(hci_resume_dev);
3446
75e0569f
MH
3447/* Reset HCI device */
3448int hci_reset_dev(struct hci_dev *hdev)
3449{
3450 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3451 struct sk_buff *skb;
3452
3453 skb = bt_skb_alloc(3, GFP_ATOMIC);
3454 if (!skb)
3455 return -ENOMEM;
3456
3457 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3458 memcpy(skb_put(skb, 3), hw_err, 3);
3459
3460 /* Send Hardware Error to upper stack */
3461 return hci_recv_frame(hdev, skb);
3462}
3463EXPORT_SYMBOL(hci_reset_dev);
3464
76bca880 3465/* Receive frame from HCI drivers */
e1a26170 3466int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3467{
76bca880 3468 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3469 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3470 kfree_skb(skb);
3471 return -ENXIO;
3472 }
3473
d82603c6 3474 /* Incoming skb */
76bca880
MH
3475 bt_cb(skb)->incoming = 1;
3476
3477 /* Time stamp */
3478 __net_timestamp(skb);
3479
76bca880 3480 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3481 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3482
76bca880
MH
3483 return 0;
3484}
3485EXPORT_SYMBOL(hci_recv_frame);
3486
1da177e4
LT
3487/* ---- Interface to upper protocols ---- */
3488
1da177e4
LT
3489int hci_register_cb(struct hci_cb *cb)
3490{
3491 BT_DBG("%p name %s", cb, cb->name);
3492
fba7ecf0 3493 mutex_lock(&hci_cb_list_lock);
00629e0f 3494 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3495 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3496
3497 return 0;
3498}
3499EXPORT_SYMBOL(hci_register_cb);
3500
3501int hci_unregister_cb(struct hci_cb *cb)
3502{
3503 BT_DBG("%p name %s", cb, cb->name);
3504
fba7ecf0 3505 mutex_lock(&hci_cb_list_lock);
1da177e4 3506 list_del(&cb->list);
fba7ecf0 3507 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3508
3509 return 0;
3510}
3511EXPORT_SYMBOL(hci_unregister_cb);
3512
51086991 3513static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3514{
cdc52faa
MH
3515 int err;
3516
0d48d939 3517 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3518
cd82e61c
MH
3519 /* Time stamp */
3520 __net_timestamp(skb);
1da177e4 3521
cd82e61c
MH
3522 /* Send copy to monitor */
3523 hci_send_to_monitor(hdev, skb);
3524
3525 if (atomic_read(&hdev->promisc)) {
3526 /* Send copy to the sockets */
470fe1b5 3527 hci_send_to_sock(hdev, skb);
1da177e4
LT
3528 }
3529
3530 /* Get rid of skb owner, prior to sending to the driver. */
3531 skb_orphan(skb);
3532
cdc52faa
MH
3533 err = hdev->send(hdev, skb);
3534 if (err < 0) {
3535 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3536 kfree_skb(skb);
3537 }
1da177e4
LT
3538}
3539
1ca3a9d0 3540/* Send HCI command */
07dc93dd
JH
3541int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3542 const void *param)
1ca3a9d0
JH
3543{
3544 struct sk_buff *skb;
3545
3546 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3547
3548 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3549 if (!skb) {
3550 BT_ERR("%s no memory for command", hdev->name);
3551 return -ENOMEM;
3552 }
3553
49c922bb 3554 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3555 * single-command requests.
3556 */
db6e3e8d 3557 bt_cb(skb)->req.start = true;
11714b3d 3558
1da177e4 3559 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3560 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3561
3562 return 0;
3563}
1da177e4
LT
3564
3565/* Get data from the previously sent command */
a9de9248 3566void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3567{
3568 struct hci_command_hdr *hdr;
3569
3570 if (!hdev->sent_cmd)
3571 return NULL;
3572
3573 hdr = (void *) hdev->sent_cmd->data;
3574
a9de9248 3575 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3576 return NULL;
3577
f0e09510 3578 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3579
3580 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3581}
3582
fbef168f
LP
3583/* Send HCI command and wait for command commplete event */
3584struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3585 const void *param, u32 timeout)
3586{
3587 struct sk_buff *skb;
3588
3589 if (!test_bit(HCI_UP, &hdev->flags))
3590 return ERR_PTR(-ENETDOWN);
3591
3592 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3593
3594 hci_req_lock(hdev);
3595 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3596 hci_req_unlock(hdev);
3597
3598 return skb;
3599}
3600EXPORT_SYMBOL(hci_cmd_sync);
3601
1da177e4
LT
3602/* Send ACL data */
3603static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3604{
3605 struct hci_acl_hdr *hdr;
3606 int len = skb->len;
3607
badff6d0
ACM
3608 skb_push(skb, HCI_ACL_HDR_SIZE);
3609 skb_reset_transport_header(skb);
9c70220b 3610 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3611 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3612 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3613}
3614
ee22be7e 3615static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3616 struct sk_buff *skb, __u16 flags)
1da177e4 3617{
ee22be7e 3618 struct hci_conn *conn = chan->conn;
1da177e4
LT
3619 struct hci_dev *hdev = conn->hdev;
3620 struct sk_buff *list;
3621
087bfd99
GP
3622 skb->len = skb_headlen(skb);
3623 skb->data_len = 0;
3624
3625 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3626
3627 switch (hdev->dev_type) {
3628 case HCI_BREDR:
3629 hci_add_acl_hdr(skb, conn->handle, flags);
3630 break;
3631 case HCI_AMP:
3632 hci_add_acl_hdr(skb, chan->handle, flags);
3633 break;
3634 default:
3635 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3636 return;
3637 }
087bfd99 3638
70f23020
AE
3639 list = skb_shinfo(skb)->frag_list;
3640 if (!list) {
1da177e4
LT
3641 /* Non fragmented */
3642 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3643
73d80deb 3644 skb_queue_tail(queue, skb);
1da177e4
LT
3645 } else {
3646 /* Fragmented */
3647 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3648
3649 skb_shinfo(skb)->frag_list = NULL;
3650
9cfd5a23
JR
3651 /* Queue all fragments atomically. We need to use spin_lock_bh
3652 * here because of 6LoWPAN links, as there this function is
3653 * called from softirq and using normal spin lock could cause
3654 * deadlocks.
3655 */
3656 spin_lock_bh(&queue->lock);
1da177e4 3657
73d80deb 3658 __skb_queue_tail(queue, skb);
e702112f
AE
3659
3660 flags &= ~ACL_START;
3661 flags |= ACL_CONT;
1da177e4
LT
3662 do {
3663 skb = list; list = list->next;
8e87d142 3664
0d48d939 3665 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3666 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3667
3668 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3669
73d80deb 3670 __skb_queue_tail(queue, skb);
1da177e4
LT
3671 } while (list);
3672
9cfd5a23 3673 spin_unlock_bh(&queue->lock);
1da177e4 3674 }
73d80deb
LAD
3675}
3676
3677void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3678{
ee22be7e 3679 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3680
f0e09510 3681 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3682
ee22be7e 3683 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3684
3eff45ea 3685 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3686}
1da177e4
LT
3687
3688/* Send SCO data */
0d861d8b 3689void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3690{
3691 struct hci_dev *hdev = conn->hdev;
3692 struct hci_sco_hdr hdr;
3693
3694 BT_DBG("%s len %d", hdev->name, skb->len);
3695
aca3192c 3696 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3697 hdr.dlen = skb->len;
3698
badff6d0
ACM
3699 skb_push(skb, HCI_SCO_HDR_SIZE);
3700 skb_reset_transport_header(skb);
9c70220b 3701 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3702
0d48d939 3703 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3704
1da177e4 3705 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3706 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3707}
1da177e4
LT
3708
3709/* ---- HCI TX task (outgoing data) ---- */
3710
3711/* HCI Connection scheduler */
6039aa73
GP
3712static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3713 int *quote)
1da177e4
LT
3714{
3715 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3716 struct hci_conn *conn = NULL, *c;
abc5de8f 3717 unsigned int num = 0, min = ~0;
1da177e4 3718
8e87d142 3719 /* We don't have to lock device here. Connections are always
1da177e4 3720 * added and removed with TX task disabled. */
bf4c6325
GP
3721
3722 rcu_read_lock();
3723
3724 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3725 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3726 continue;
769be974
MH
3727
3728 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3729 continue;
3730
1da177e4
LT
3731 num++;
3732
3733 if (c->sent < min) {
3734 min = c->sent;
3735 conn = c;
3736 }
52087a79
LAD
3737
3738 if (hci_conn_num(hdev, type) == num)
3739 break;
1da177e4
LT
3740 }
3741
bf4c6325
GP
3742 rcu_read_unlock();
3743
1da177e4 3744 if (conn) {
6ed58ec5
VT
3745 int cnt, q;
3746
3747 switch (conn->type) {
3748 case ACL_LINK:
3749 cnt = hdev->acl_cnt;
3750 break;
3751 case SCO_LINK:
3752 case ESCO_LINK:
3753 cnt = hdev->sco_cnt;
3754 break;
3755 case LE_LINK:
3756 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3757 break;
3758 default:
3759 cnt = 0;
3760 BT_ERR("Unknown link type");
3761 }
3762
3763 q = cnt / num;
1da177e4
LT
3764 *quote = q ? q : 1;
3765 } else
3766 *quote = 0;
3767
3768 BT_DBG("conn %p quote %d", conn, *quote);
3769 return conn;
3770}
3771
6039aa73 3772static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3773{
3774 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3775 struct hci_conn *c;
1da177e4 3776
bae1f5d9 3777 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3778
bf4c6325
GP
3779 rcu_read_lock();
3780
1da177e4 3781 /* Kill stalled connections */
bf4c6325 3782 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3783 if (c->type == type && c->sent) {
6ed93dc6
AE
3784 BT_ERR("%s killing stalled connection %pMR",
3785 hdev->name, &c->dst);
bed71748 3786 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3787 }
3788 }
bf4c6325
GP
3789
3790 rcu_read_unlock();
1da177e4
LT
3791}
3792
6039aa73
GP
3793static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3794 int *quote)
1da177e4 3795{
73d80deb
LAD
3796 struct hci_conn_hash *h = &hdev->conn_hash;
3797 struct hci_chan *chan = NULL;
abc5de8f 3798 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3799 struct hci_conn *conn;
73d80deb
LAD
3800 int cnt, q, conn_num = 0;
3801
3802 BT_DBG("%s", hdev->name);
3803
bf4c6325
GP
3804 rcu_read_lock();
3805
3806 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3807 struct hci_chan *tmp;
3808
3809 if (conn->type != type)
3810 continue;
3811
3812 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3813 continue;
3814
3815 conn_num++;
3816
8192edef 3817 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3818 struct sk_buff *skb;
3819
3820 if (skb_queue_empty(&tmp->data_q))
3821 continue;
3822
3823 skb = skb_peek(&tmp->data_q);
3824 if (skb->priority < cur_prio)
3825 continue;
3826
3827 if (skb->priority > cur_prio) {
3828 num = 0;
3829 min = ~0;
3830 cur_prio = skb->priority;
3831 }
3832
3833 num++;
3834
3835 if (conn->sent < min) {
3836 min = conn->sent;
3837 chan = tmp;
3838 }
3839 }
3840
3841 if (hci_conn_num(hdev, type) == conn_num)
3842 break;
3843 }
3844
bf4c6325
GP
3845 rcu_read_unlock();
3846
73d80deb
LAD
3847 if (!chan)
3848 return NULL;
3849
3850 switch (chan->conn->type) {
3851 case ACL_LINK:
3852 cnt = hdev->acl_cnt;
3853 break;
bd1eb66b
AE
3854 case AMP_LINK:
3855 cnt = hdev->block_cnt;
3856 break;
73d80deb
LAD
3857 case SCO_LINK:
3858 case ESCO_LINK:
3859 cnt = hdev->sco_cnt;
3860 break;
3861 case LE_LINK:
3862 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3863 break;
3864 default:
3865 cnt = 0;
3866 BT_ERR("Unknown link type");
3867 }
3868
3869 q = cnt / num;
3870 *quote = q ? q : 1;
3871 BT_DBG("chan %p quote %d", chan, *quote);
3872 return chan;
3873}
3874
02b20f0b
LAD
3875static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3876{
3877 struct hci_conn_hash *h = &hdev->conn_hash;
3878 struct hci_conn *conn;
3879 int num = 0;
3880
3881 BT_DBG("%s", hdev->name);
3882
bf4c6325
GP
3883 rcu_read_lock();
3884
3885 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3886 struct hci_chan *chan;
3887
3888 if (conn->type != type)
3889 continue;
3890
3891 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3892 continue;
3893
3894 num++;
3895
8192edef 3896 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3897 struct sk_buff *skb;
3898
3899 if (chan->sent) {
3900 chan->sent = 0;
3901 continue;
3902 }
3903
3904 if (skb_queue_empty(&chan->data_q))
3905 continue;
3906
3907 skb = skb_peek(&chan->data_q);
3908 if (skb->priority >= HCI_PRIO_MAX - 1)
3909 continue;
3910
3911 skb->priority = HCI_PRIO_MAX - 1;
3912
3913 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3914 skb->priority);
02b20f0b
LAD
3915 }
3916
3917 if (hci_conn_num(hdev, type) == num)
3918 break;
3919 }
bf4c6325
GP
3920
3921 rcu_read_unlock();
3922
02b20f0b
LAD
3923}
3924
b71d385a
AE
3925static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3926{
3927 /* Calculate count of blocks used by this packet */
3928 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3929}
3930
6039aa73 3931static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3932{
d7a5a11d 3933 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
3934 /* ACL tx timeout must be longer than maximum
3935 * link supervision timeout (40.9 seconds) */
63d2bc1b 3936 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3937 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3938 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3939 }
63d2bc1b 3940}
1da177e4 3941
6039aa73 3942static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3943{
3944 unsigned int cnt = hdev->acl_cnt;
3945 struct hci_chan *chan;
3946 struct sk_buff *skb;
3947 int quote;
3948
3949 __check_timeout(hdev, cnt);
04837f64 3950
73d80deb 3951 while (hdev->acl_cnt &&
a8c5fb1a 3952 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3953 u32 priority = (skb_peek(&chan->data_q))->priority;
3954 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3955 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3956 skb->len, skb->priority);
73d80deb 3957
ec1cce24
LAD
3958 /* Stop if priority has changed */
3959 if (skb->priority < priority)
3960 break;
3961
3962 skb = skb_dequeue(&chan->data_q);
3963
73d80deb 3964 hci_conn_enter_active_mode(chan->conn,
04124681 3965 bt_cb(skb)->force_active);
04837f64 3966
57d17d70 3967 hci_send_frame(hdev, skb);
1da177e4
LT
3968 hdev->acl_last_tx = jiffies;
3969
3970 hdev->acl_cnt--;
73d80deb
LAD
3971 chan->sent++;
3972 chan->conn->sent++;
1da177e4
LT
3973 }
3974 }
02b20f0b
LAD
3975
3976 if (cnt != hdev->acl_cnt)
3977 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3978}
3979
6039aa73 3980static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3981{
63d2bc1b 3982 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3983 struct hci_chan *chan;
3984 struct sk_buff *skb;
3985 int quote;
bd1eb66b 3986 u8 type;
b71d385a 3987
63d2bc1b 3988 __check_timeout(hdev, cnt);
b71d385a 3989
bd1eb66b
AE
3990 BT_DBG("%s", hdev->name);
3991
3992 if (hdev->dev_type == HCI_AMP)
3993 type = AMP_LINK;
3994 else
3995 type = ACL_LINK;
3996
b71d385a 3997 while (hdev->block_cnt > 0 &&
bd1eb66b 3998 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3999 u32 priority = (skb_peek(&chan->data_q))->priority;
4000 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4001 int blocks;
4002
4003 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4004 skb->len, skb->priority);
b71d385a
AE
4005
4006 /* Stop if priority has changed */
4007 if (skb->priority < priority)
4008 break;
4009
4010 skb = skb_dequeue(&chan->data_q);
4011
4012 blocks = __get_blocks(hdev, skb);
4013 if (blocks > hdev->block_cnt)
4014 return;
4015
4016 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4017 bt_cb(skb)->force_active);
b71d385a 4018
57d17d70 4019 hci_send_frame(hdev, skb);
b71d385a
AE
4020 hdev->acl_last_tx = jiffies;
4021
4022 hdev->block_cnt -= blocks;
4023 quote -= blocks;
4024
4025 chan->sent += blocks;
4026 chan->conn->sent += blocks;
4027 }
4028 }
4029
4030 if (cnt != hdev->block_cnt)
bd1eb66b 4031 hci_prio_recalculate(hdev, type);
b71d385a
AE
4032}
4033
6039aa73 4034static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4035{
4036 BT_DBG("%s", hdev->name);
4037
bd1eb66b
AE
4038 /* No ACL link over BR/EDR controller */
4039 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4040 return;
4041
4042 /* No AMP link over AMP controller */
4043 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4044 return;
4045
4046 switch (hdev->flow_ctl_mode) {
4047 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4048 hci_sched_acl_pkt(hdev);
4049 break;
4050
4051 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4052 hci_sched_acl_blk(hdev);
4053 break;
4054 }
4055}
4056
1da177e4 4057/* Schedule SCO */
6039aa73 4058static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4059{
4060 struct hci_conn *conn;
4061 struct sk_buff *skb;
4062 int quote;
4063
4064 BT_DBG("%s", hdev->name);
4065
52087a79
LAD
4066 if (!hci_conn_num(hdev, SCO_LINK))
4067 return;
4068
1da177e4
LT
4069 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4070 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4071 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4072 hci_send_frame(hdev, skb);
1da177e4
LT
4073
4074 conn->sent++;
4075 if (conn->sent == ~0)
4076 conn->sent = 0;
4077 }
4078 }
4079}
4080
6039aa73 4081static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4082{
4083 struct hci_conn *conn;
4084 struct sk_buff *skb;
4085 int quote;
4086
4087 BT_DBG("%s", hdev->name);
4088
52087a79
LAD
4089 if (!hci_conn_num(hdev, ESCO_LINK))
4090 return;
4091
8fc9ced3
GP
4092 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4093 &quote))) {
b6a0dc82
MH
4094 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4095 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4096 hci_send_frame(hdev, skb);
b6a0dc82
MH
4097
4098 conn->sent++;
4099 if (conn->sent == ~0)
4100 conn->sent = 0;
4101 }
4102 }
4103}
4104
6039aa73 4105static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4106{
73d80deb 4107 struct hci_chan *chan;
6ed58ec5 4108 struct sk_buff *skb;
02b20f0b 4109 int quote, cnt, tmp;
6ed58ec5
VT
4110
4111 BT_DBG("%s", hdev->name);
4112
52087a79
LAD
4113 if (!hci_conn_num(hdev, LE_LINK))
4114 return;
4115
d7a5a11d 4116 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6ed58ec5
VT
4117 /* LE tx timeout must be longer than maximum
4118 * link supervision timeout (40.9 seconds) */
bae1f5d9 4119 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4120 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4121 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4122 }
4123
4124 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4125 tmp = cnt;
73d80deb 4126 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4127 u32 priority = (skb_peek(&chan->data_q))->priority;
4128 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4129 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4130 skb->len, skb->priority);
6ed58ec5 4131
ec1cce24
LAD
4132 /* Stop if priority has changed */
4133 if (skb->priority < priority)
4134 break;
4135
4136 skb = skb_dequeue(&chan->data_q);
4137
57d17d70 4138 hci_send_frame(hdev, skb);
6ed58ec5
VT
4139 hdev->le_last_tx = jiffies;
4140
4141 cnt--;
73d80deb
LAD
4142 chan->sent++;
4143 chan->conn->sent++;
6ed58ec5
VT
4144 }
4145 }
73d80deb 4146
6ed58ec5
VT
4147 if (hdev->le_pkts)
4148 hdev->le_cnt = cnt;
4149 else
4150 hdev->acl_cnt = cnt;
02b20f0b
LAD
4151
4152 if (cnt != tmp)
4153 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4154}
4155
3eff45ea 4156static void hci_tx_work(struct work_struct *work)
1da177e4 4157{
3eff45ea 4158 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4159 struct sk_buff *skb;
4160
6ed58ec5 4161 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4162 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4163
d7a5a11d 4164 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e
MH
4165 /* Schedule queues and send stuff to HCI driver */
4166 hci_sched_acl(hdev);
4167 hci_sched_sco(hdev);
4168 hci_sched_esco(hdev);
4169 hci_sched_le(hdev);
4170 }
6ed58ec5 4171
1da177e4
LT
4172 /* Send next queued raw (unknown type) packet */
4173 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4174 hci_send_frame(hdev, skb);
1da177e4
LT
4175}
4176
25985edc 4177/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4178
4179/* ACL data packet */
6039aa73 4180static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4181{
4182 struct hci_acl_hdr *hdr = (void *) skb->data;
4183 struct hci_conn *conn;
4184 __u16 handle, flags;
4185
4186 skb_pull(skb, HCI_ACL_HDR_SIZE);
4187
4188 handle = __le16_to_cpu(hdr->handle);
4189 flags = hci_flags(handle);
4190 handle = hci_handle(handle);
4191
f0e09510 4192 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4193 handle, flags);
1da177e4
LT
4194
4195 hdev->stat.acl_rx++;
4196
4197 hci_dev_lock(hdev);
4198 conn = hci_conn_hash_lookup_handle(hdev, handle);
4199 hci_dev_unlock(hdev);
8e87d142 4200
1da177e4 4201 if (conn) {
65983fc7 4202 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4203
1da177e4 4204 /* Send to upper protocol */
686ebf28
UF
4205 l2cap_recv_acldata(conn, skb, flags);
4206 return;
1da177e4 4207 } else {
8e87d142 4208 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4209 hdev->name, handle);
1da177e4
LT
4210 }
4211
4212 kfree_skb(skb);
4213}
4214
4215/* SCO data packet */
6039aa73 4216static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4217{
4218 struct hci_sco_hdr *hdr = (void *) skb->data;
4219 struct hci_conn *conn;
4220 __u16 handle;
4221
4222 skb_pull(skb, HCI_SCO_HDR_SIZE);
4223
4224 handle = __le16_to_cpu(hdr->handle);
4225
f0e09510 4226 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4227
4228 hdev->stat.sco_rx++;
4229
4230 hci_dev_lock(hdev);
4231 conn = hci_conn_hash_lookup_handle(hdev, handle);
4232 hci_dev_unlock(hdev);
4233
4234 if (conn) {
1da177e4 4235 /* Send to upper protocol */
686ebf28
UF
4236 sco_recv_scodata(conn, skb);
4237 return;
1da177e4 4238 } else {
8e87d142 4239 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4240 hdev->name, handle);
1da177e4
LT
4241 }
4242
4243 kfree_skb(skb);
4244}
4245
9238f36a
JH
4246static bool hci_req_is_complete(struct hci_dev *hdev)
4247{
4248 struct sk_buff *skb;
4249
4250 skb = skb_peek(&hdev->cmd_q);
4251 if (!skb)
4252 return true;
4253
db6e3e8d 4254 return bt_cb(skb)->req.start;
9238f36a
JH
4255}
4256
42c6b129
JH
4257static void hci_resend_last(struct hci_dev *hdev)
4258{
4259 struct hci_command_hdr *sent;
4260 struct sk_buff *skb;
4261 u16 opcode;
4262
4263 if (!hdev->sent_cmd)
4264 return;
4265
4266 sent = (void *) hdev->sent_cmd->data;
4267 opcode = __le16_to_cpu(sent->opcode);
4268 if (opcode == HCI_OP_RESET)
4269 return;
4270
4271 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4272 if (!skb)
4273 return;
4274
4275 skb_queue_head(&hdev->cmd_q, skb);
4276 queue_work(hdev->workqueue, &hdev->cmd_work);
4277}
4278
e6214487
JH
4279void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4280 hci_req_complete_t *req_complete,
4281 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4282{
9238f36a
JH
4283 struct sk_buff *skb;
4284 unsigned long flags;
4285
4286 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4287
42c6b129
JH
4288 /* If the completed command doesn't match the last one that was
4289 * sent we need to do special handling of it.
9238f36a 4290 */
42c6b129
JH
4291 if (!hci_sent_cmd_data(hdev, opcode)) {
4292 /* Some CSR based controllers generate a spontaneous
4293 * reset complete event during init and any pending
4294 * command will never be completed. In such a case we
4295 * need to resend whatever was the last sent
4296 * command.
4297 */
4298 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4299 hci_resend_last(hdev);
4300
9238f36a 4301 return;
42c6b129 4302 }
9238f36a
JH
4303
4304 /* If the command succeeded and there's still more commands in
4305 * this request the request is not yet complete.
4306 */
4307 if (!status && !hci_req_is_complete(hdev))
4308 return;
4309
4310 /* If this was the last command in a request the complete
4311 * callback would be found in hdev->sent_cmd instead of the
4312 * command queue (hdev->cmd_q).
4313 */
e6214487
JH
4314 if (bt_cb(hdev->sent_cmd)->req.complete) {
4315 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4316 return;
4317 }
53e21fbc 4318
e6214487
JH
4319 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4320 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4321 return;
9238f36a
JH
4322 }
4323
4324 /* Remove all pending commands belonging to this request */
4325 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4326 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
db6e3e8d 4327 if (bt_cb(skb)->req.start) {
9238f36a
JH
4328 __skb_queue_head(&hdev->cmd_q, skb);
4329 break;
4330 }
4331
e6214487
JH
4332 *req_complete = bt_cb(skb)->req.complete;
4333 *req_complete_skb = bt_cb(skb)->req.complete_skb;
9238f36a
JH
4334 kfree_skb(skb);
4335 }
4336 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4337}
4338
b78752cc 4339static void hci_rx_work(struct work_struct *work)
1da177e4 4340{
b78752cc 4341 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4342 struct sk_buff *skb;
4343
4344 BT_DBG("%s", hdev->name);
4345
1da177e4 4346 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4347 /* Send copy to monitor */
4348 hci_send_to_monitor(hdev, skb);
4349
1da177e4
LT
4350 if (atomic_read(&hdev->promisc)) {
4351 /* Send copy to the sockets */
470fe1b5 4352 hci_send_to_sock(hdev, skb);
1da177e4
LT
4353 }
4354
d7a5a11d 4355 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1da177e4
LT
4356 kfree_skb(skb);
4357 continue;
4358 }
4359
4360 if (test_bit(HCI_INIT, &hdev->flags)) {
4361 /* Don't process data packets in this states. */
0d48d939 4362 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4363 case HCI_ACLDATA_PKT:
4364 case HCI_SCODATA_PKT:
4365 kfree_skb(skb);
4366 continue;
3ff50b79 4367 }
1da177e4
LT
4368 }
4369
4370 /* Process frame */
0d48d939 4371 switch (bt_cb(skb)->pkt_type) {
1da177e4 4372 case HCI_EVENT_PKT:
b78752cc 4373 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4374 hci_event_packet(hdev, skb);
4375 break;
4376
4377 case HCI_ACLDATA_PKT:
4378 BT_DBG("%s ACL data packet", hdev->name);
4379 hci_acldata_packet(hdev, skb);
4380 break;
4381
4382 case HCI_SCODATA_PKT:
4383 BT_DBG("%s SCO data packet", hdev->name);
4384 hci_scodata_packet(hdev, skb);
4385 break;
4386
4387 default:
4388 kfree_skb(skb);
4389 break;
4390 }
4391 }
1da177e4
LT
4392}
4393
c347b765 4394static void hci_cmd_work(struct work_struct *work)
1da177e4 4395{
c347b765 4396 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4397 struct sk_buff *skb;
4398
2104786b
AE
4399 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4400 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4401
1da177e4 4402 /* Send queued commands */
5a08ecce
AE
4403 if (atomic_read(&hdev->cmd_cnt)) {
4404 skb = skb_dequeue(&hdev->cmd_q);
4405 if (!skb)
4406 return;
4407
7585b97a 4408 kfree_skb(hdev->sent_cmd);
1da177e4 4409
a675d7f1 4410 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4411 if (hdev->sent_cmd) {
1da177e4 4412 atomic_dec(&hdev->cmd_cnt);
57d17d70 4413 hci_send_frame(hdev, skb);
7bdb8a5c 4414 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4415 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4416 else
65cc2b49
MH
4417 schedule_delayed_work(&hdev->cmd_timer,
4418 HCI_CMD_TIMEOUT);
1da177e4
LT
4419 } else {
4420 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4421 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4422 }
4423 }
4424}