Bluetooth: Enable LE PHY Update Complete event
[linux-2.6-block.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46 42#include "smp.h"
6d5d2ee6 43#include "leds.h"
970c4e46 44
b78752cc 45static void hci_rx_work(struct work_struct *work);
c347b765 46static void hci_cmd_work(struct work_struct *work);
3eff45ea 47static void hci_tx_work(struct work_struct *work);
1da177e4 48
1da177e4
LT
49/* HCI device list */
50LIST_HEAD(hci_dev_list);
51DEFINE_RWLOCK(hci_dev_list_lock);
52
53/* HCI callback list */
54LIST_HEAD(hci_cb_list);
fba7ecf0 55DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 56
3df92b31
SL
57/* HCI ID Numbering */
58static DEFINE_IDA(hci_index_ida);
59
baf27f6e
MH
60/* ---- HCI debugfs entries ---- */
61
4b4148e9
MH
62static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
64{
65 struct hci_dev *hdev = file->private_data;
66 char buf[3];
67
74b93e9f 68 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
4b4148e9
MH
69 buf[1] = '\n';
70 buf[2] = '\0';
71 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72}
73
74static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos)
76{
77 struct hci_dev *hdev = file->private_data;
78 struct sk_buff *skb;
79 char buf[32];
80 size_t buf_size = min(count, (sizeof(buf)-1));
81 bool enable;
4b4148e9
MH
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
b7cb93e5 93 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
94 return -EALREADY;
95
b504430c 96 hci_req_sync_lock(hdev);
4b4148e9
MH
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
b504430c 103 hci_req_sync_unlock(hdev);
4b4148e9
MH
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
4b4148e9
MH
108 kfree_skb(skb);
109
b7cb93e5 110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
111
112 return count;
113}
114
115static const struct file_operations dut_mode_fops = {
116 .open = simple_open,
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
120};
121
4b4113d6
MH
122static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
124{
125 struct hci_dev *hdev = file->private_data;
126 char buf[3];
127
74b93e9f 128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
4b4113d6
MH
129 buf[1] = '\n';
130 buf[2] = '\0';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132}
133
134static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
136{
137 struct hci_dev *hdev = file->private_data;
138 char buf[32];
139 size_t buf_size = min(count, (sizeof(buf)-1));
140 bool enable;
141 int err;
142
143 if (copy_from_user(buf, user_buf, buf_size))
144 return -EFAULT;
145
146 buf[buf_size] = '\0';
147 if (strtobool(buf, &enable))
148 return -EINVAL;
149
7e995b9e
MH
150 /* When the diagnostic flags are not persistent and the transport
151 * is not active, then there is no need for the vendor callback.
152 *
153 * Instead just store the desired value. If needed the setting
154 * will be programmed when the controller gets powered on.
155 */
156 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
157 !test_bit(HCI_RUNNING, &hdev->flags))
158 goto done;
159
b504430c 160 hci_req_sync_lock(hdev);
4b4113d6 161 err = hdev->set_diag(hdev, enable);
b504430c 162 hci_req_sync_unlock(hdev);
4b4113d6
MH
163
164 if (err < 0)
165 return err;
166
7e995b9e 167done:
4b4113d6
MH
168 if (enable)
169 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
170 else
171 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
172
173 return count;
174}
175
176static const struct file_operations vendor_diag_fops = {
177 .open = simple_open,
178 .read = vendor_diag_read,
179 .write = vendor_diag_write,
180 .llseek = default_llseek,
181};
182
f640ee98
MH
183static void hci_debugfs_create_basic(struct hci_dev *hdev)
184{
185 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
186 &dut_mode_fops);
187
188 if (hdev->set_diag)
189 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
190 &vendor_diag_fops);
191}
192
a1d01db1 193static int hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 194{
42c6b129 195 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
196
197 /* Reset device */
42c6b129
JH
198 set_bit(HCI_RESET, &req->hdev->flags);
199 hci_req_add(req, HCI_OP_RESET, 0, NULL);
a1d01db1 200 return 0;
1da177e4
LT
201}
202
42c6b129 203static void bredr_init(struct hci_request *req)
1da177e4 204{
42c6b129 205 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 206
1da177e4 207 /* Read Local Supported Features */
42c6b129 208 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 209
1143e5a6 210 /* Read Local Version */
42c6b129 211 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
212
213 /* Read BD Address */
42c6b129 214 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
215}
216
0af801b9 217static void amp_init1(struct hci_request *req)
e61ef499 218{
42c6b129 219 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 220
e61ef499 221 /* Read Local Version */
42c6b129 222 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 223
f6996cfe
MH
224 /* Read Local Supported Commands */
225 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
226
6bcbc489 227 /* Read Local AMP Info */
42c6b129 228 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
229
230 /* Read Data Blk size */
42c6b129 231 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 232
f38ba941
MH
233 /* Read Flow Control Mode */
234 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
235
7528ca1c
MH
236 /* Read Location Data */
237 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
238}
239
a1d01db1 240static int amp_init2(struct hci_request *req)
0af801b9
JH
241{
242 /* Read Local Supported Features. Not all AMP controllers
243 * support this so it's placed conditionally in the second
244 * stage init.
245 */
246 if (req->hdev->commands[14] & 0x20)
247 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
a1d01db1
JH
248
249 return 0;
0af801b9
JH
250}
251
a1d01db1 252static int hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 253{
42c6b129 254 struct hci_dev *hdev = req->hdev;
e61ef499
AE
255
256 BT_DBG("%s %ld", hdev->name, opt);
257
11778716
AE
258 /* Reset */
259 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 260 hci_reset_req(req, 0);
11778716 261
e61ef499 262 switch (hdev->dev_type) {
ca8bee5d 263 case HCI_PRIMARY:
42c6b129 264 bredr_init(req);
e61ef499 265 break;
e61ef499 266 case HCI_AMP:
0af801b9 267 amp_init1(req);
e61ef499 268 break;
e61ef499
AE
269 default:
270 BT_ERR("Unknown device type %d", hdev->dev_type);
271 break;
272 }
a1d01db1
JH
273
274 return 0;
e61ef499
AE
275}
276
42c6b129 277static void bredr_setup(struct hci_request *req)
2177bab5 278{
2177bab5
JH
279 __le16 param;
280 __u8 flt_type;
281
282 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 283 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
284
285 /* Read Class of Device */
42c6b129 286 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
287
288 /* Read Local Name */
42c6b129 289 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
290
291 /* Read Voice Setting */
42c6b129 292 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 293
b4cb9fb2
MH
294 /* Read Number of Supported IAC */
295 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
296
4b836f39
MH
297 /* Read Current IAC LAP */
298 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
299
2177bab5
JH
300 /* Clear Event Filters */
301 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 302 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
303
304 /* Connection accept timeout ~20 secs */
dcf4adbf 305 param = cpu_to_le16(0x7d00);
42c6b129 306 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
307}
308
42c6b129 309static void le_setup(struct hci_request *req)
2177bab5 310{
c73eee91
JH
311 struct hci_dev *hdev = req->hdev;
312
2177bab5 313 /* Read LE Buffer Size */
42c6b129 314 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
315
316 /* Read LE Local Supported Features */
42c6b129 317 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 318
747d3f03
MH
319 /* Read LE Supported States */
320 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
321
c73eee91
JH
322 /* LE-only controllers have LE implicitly enabled */
323 if (!lmp_bredr_capable(hdev))
a1536da2 324 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
325}
326
42c6b129 327static void hci_setup_event_mask(struct hci_request *req)
2177bab5 328{
42c6b129
JH
329 struct hci_dev *hdev = req->hdev;
330
2177bab5
JH
331 /* The second byte is 0xff instead of 0x9f (two reserved bits
332 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
333 * command otherwise.
334 */
335 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
336
337 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
338 * any event mask for pre 1.2 devices.
339 */
340 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
341 return;
342
343 if (lmp_bredr_capable(hdev)) {
344 events[4] |= 0x01; /* Flow Specification Complete */
c7882cbd
MH
345 } else {
346 /* Use a different default for LE-only devices */
347 memset(events, 0, sizeof(events));
c7882cbd
MH
348 events[1] |= 0x20; /* Command Complete */
349 events[1] |= 0x40; /* Command Status */
350 events[1] |= 0x80; /* Hardware Error */
5c3d3b4c
MH
351
352 /* If the controller supports the Disconnect command, enable
353 * the corresponding event. In addition enable packet flow
354 * control related events.
355 */
356 if (hdev->commands[0] & 0x20) {
357 events[0] |= 0x10; /* Disconnection Complete */
358 events[2] |= 0x04; /* Number of Completed Packets */
359 events[3] |= 0x02; /* Data Buffer Overflow */
360 }
361
362 /* If the controller supports the Read Remote Version
363 * Information command, enable the corresponding event.
364 */
365 if (hdev->commands[2] & 0x80)
366 events[1] |= 0x08; /* Read Remote Version Information
367 * Complete
368 */
0da71f1b
MH
369
370 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
371 events[0] |= 0x80; /* Encryption Change */
372 events[5] |= 0x80; /* Encryption Key Refresh Complete */
373 }
2177bab5
JH
374 }
375
9fe759ce
MH
376 if (lmp_inq_rssi_capable(hdev) ||
377 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
2177bab5
JH
378 events[4] |= 0x02; /* Inquiry Result with RSSI */
379
70f56aa2
MH
380 if (lmp_ext_feat_capable(hdev))
381 events[4] |= 0x04; /* Read Remote Extended Features Complete */
382
383 if (lmp_esco_capable(hdev)) {
384 events[5] |= 0x08; /* Synchronous Connection Complete */
385 events[5] |= 0x10; /* Synchronous Connection Changed */
386 }
387
2177bab5
JH
388 if (lmp_sniffsubr_capable(hdev))
389 events[5] |= 0x20; /* Sniff Subrating */
390
391 if (lmp_pause_enc_capable(hdev))
392 events[5] |= 0x80; /* Encryption Key Refresh Complete */
393
394 if (lmp_ext_inq_capable(hdev))
395 events[5] |= 0x40; /* Extended Inquiry Result */
396
397 if (lmp_no_flush_capable(hdev))
398 events[7] |= 0x01; /* Enhanced Flush Complete */
399
400 if (lmp_lsto_capable(hdev))
401 events[6] |= 0x80; /* Link Supervision Timeout Changed */
402
403 if (lmp_ssp_capable(hdev)) {
404 events[6] |= 0x01; /* IO Capability Request */
405 events[6] |= 0x02; /* IO Capability Response */
406 events[6] |= 0x04; /* User Confirmation Request */
407 events[6] |= 0x08; /* User Passkey Request */
408 events[6] |= 0x10; /* Remote OOB Data Request */
409 events[6] |= 0x20; /* Simple Pairing Complete */
410 events[7] |= 0x04; /* User Passkey Notification */
411 events[7] |= 0x08; /* Keypress Notification */
412 events[7] |= 0x10; /* Remote Host Supported
413 * Features Notification
414 */
415 }
416
417 if (lmp_le_capable(hdev))
418 events[7] |= 0x20; /* LE Meta-Event */
419
42c6b129 420 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
421}
422
a1d01db1 423static int hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 424{
42c6b129
JH
425 struct hci_dev *hdev = req->hdev;
426
0af801b9
JH
427 if (hdev->dev_type == HCI_AMP)
428 return amp_init2(req);
429
2177bab5 430 if (lmp_bredr_capable(hdev))
42c6b129 431 bredr_setup(req);
56f87901 432 else
a358dc11 433 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
434
435 if (lmp_le_capable(hdev))
42c6b129 436 le_setup(req);
2177bab5 437
0f3adeae
MH
438 /* All Bluetooth 1.2 and later controllers should support the
439 * HCI command for reading the local supported commands.
440 *
441 * Unfortunately some controllers indicate Bluetooth 1.2 support,
442 * but do not have support for this command. If that is the case,
443 * the driver can quirk the behavior and skip reading the local
444 * supported commands.
3f8e2d75 445 */
0f3adeae
MH
446 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
447 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 448 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
449
450 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
451 /* When SSP is available, then the host features page
452 * should also be available as well. However some
453 * controllers list the max_page as 0 as long as SSP
454 * has not been enabled. To achieve proper debugging
455 * output, force the minimum max_page to 1 at least.
456 */
457 hdev->max_page = 0x01;
458
d7a5a11d 459 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 460 u8 mode = 0x01;
574ea3c7 461
42c6b129
JH
462 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
463 sizeof(mode), &mode);
2177bab5
JH
464 } else {
465 struct hci_cp_write_eir cp;
466
467 memset(hdev->eir, 0, sizeof(hdev->eir));
468 memset(&cp, 0, sizeof(cp));
469
42c6b129 470 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
471 }
472 }
473
043ec9bf
MH
474 if (lmp_inq_rssi_capable(hdev) ||
475 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
476 u8 mode;
477
478 /* If Extended Inquiry Result events are supported, then
479 * they are clearly preferred over Inquiry Result with RSSI
480 * events.
481 */
482 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
483
484 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
485 }
2177bab5
JH
486
487 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 488 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
489
490 if (lmp_ext_feat_capable(hdev)) {
491 struct hci_cp_read_local_ext_features cp;
492
493 cp.page = 0x01;
42c6b129
JH
494 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
495 sizeof(cp), &cp);
2177bab5
JH
496 }
497
d7a5a11d 498 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 499 u8 enable = 1;
42c6b129
JH
500 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
501 &enable);
2177bab5 502 }
a1d01db1
JH
503
504 return 0;
2177bab5
JH
505}
506
42c6b129 507static void hci_setup_link_policy(struct hci_request *req)
2177bab5 508{
42c6b129 509 struct hci_dev *hdev = req->hdev;
2177bab5
JH
510 struct hci_cp_write_def_link_policy cp;
511 u16 link_policy = 0;
512
513 if (lmp_rswitch_capable(hdev))
514 link_policy |= HCI_LP_RSWITCH;
515 if (lmp_hold_capable(hdev))
516 link_policy |= HCI_LP_HOLD;
517 if (lmp_sniff_capable(hdev))
518 link_policy |= HCI_LP_SNIFF;
519 if (lmp_park_capable(hdev))
520 link_policy |= HCI_LP_PARK;
521
522 cp.policy = cpu_to_le16(link_policy);
42c6b129 523 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
524}
525
42c6b129 526static void hci_set_le_support(struct hci_request *req)
2177bab5 527{
42c6b129 528 struct hci_dev *hdev = req->hdev;
2177bab5
JH
529 struct hci_cp_write_le_host_supported cp;
530
c73eee91
JH
531 /* LE-only devices do not support explicit enablement */
532 if (!lmp_bredr_capable(hdev))
533 return;
534
2177bab5
JH
535 memset(&cp, 0, sizeof(cp));
536
d7a5a11d 537 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 538 cp.le = 0x01;
32226e4f 539 cp.simul = 0x00;
2177bab5
JH
540 }
541
542 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
543 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
544 &cp);
2177bab5
JH
545}
546
d62e6d67
JH
547static void hci_set_event_mask_page_2(struct hci_request *req)
548{
549 struct hci_dev *hdev = req->hdev;
550 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
551
552 /* If Connectionless Slave Broadcast master role is supported
553 * enable all necessary events for it.
554 */
53b834d2 555 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
556 events[1] |= 0x40; /* Triggered Clock Capture */
557 events[1] |= 0x80; /* Synchronization Train Complete */
558 events[2] |= 0x10; /* Slave Page Response Timeout */
559 events[2] |= 0x20; /* CSB Channel Map Change */
560 }
561
562 /* If Connectionless Slave Broadcast slave role is supported
563 * enable all necessary events for it.
564 */
53b834d2 565 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
566 events[2] |= 0x01; /* Synchronization Train Received */
567 events[2] |= 0x02; /* CSB Receive */
568 events[2] |= 0x04; /* CSB Timeout */
569 events[2] |= 0x08; /* Truncated Page Complete */
570 }
571
40c59fcb 572 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 573 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
574 events[2] |= 0x80;
575
d62e6d67
JH
576 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
577}
578
a1d01db1 579static int hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 580{
42c6b129 581 struct hci_dev *hdev = req->hdev;
d2c5d77f 582 u8 p;
42c6b129 583
0da71f1b
MH
584 hci_setup_event_mask(req);
585
e81be90b
JH
586 if (hdev->commands[6] & 0x20 &&
587 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
588 struct hci_cp_read_stored_link_key cp;
589
590 bacpy(&cp.bdaddr, BDADDR_ANY);
591 cp.read_all = 0x01;
592 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
593 }
594
2177bab5 595 if (hdev->commands[5] & 0x10)
42c6b129 596 hci_setup_link_policy(req);
2177bab5 597
417287de
MH
598 if (hdev->commands[8] & 0x01)
599 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
600
601 /* Some older Broadcom based Bluetooth 1.2 controllers do not
602 * support the Read Page Scan Type command. Check support for
603 * this command in the bit mask of supported commands.
604 */
605 if (hdev->commands[13] & 0x01)
606 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
607
9193c6e8
AG
608 if (lmp_le_capable(hdev)) {
609 u8 events[8];
610
611 memset(events, 0, sizeof(events));
4d6c705b
MH
612
613 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
614 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
615
616 /* If controller supports the Connection Parameters Request
617 * Link Layer Procedure, enable the corresponding event.
618 */
619 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
620 events[0] |= 0x20; /* LE Remote Connection
621 * Parameter Request
622 */
623
a9f6068e
MH
624 /* If the controller supports the Data Length Extension
625 * feature, enable the corresponding event.
626 */
627 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
628 events[0] |= 0x40; /* LE Data Length Change */
629
4b71bba4
MH
630 /* If the controller supports Extended Scanner Filter
631 * Policies, enable the correspondig event.
632 */
633 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
634 events[1] |= 0x04; /* LE Direct Advertising
635 * Report
636 */
637
9756d33b
MH
638 /* If the controller supports Channel Selection Algorithm #2
639 * feature, enable the corresponding event.
640 */
641 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
642 events[2] |= 0x08; /* LE Channel Selection
643 * Algorithm
644 */
645
7d26f5c4
MH
646 /* If the controller supports the LE Set Scan Enable command,
647 * enable the corresponding advertising report event.
648 */
649 if (hdev->commands[26] & 0x08)
650 events[0] |= 0x02; /* LE Advertising Report */
651
652 /* If the controller supports the LE Create Connection
653 * command, enable the corresponding event.
654 */
655 if (hdev->commands[26] & 0x10)
656 events[0] |= 0x01; /* LE Connection Complete */
657
658 /* If the controller supports the LE Connection Update
659 * command, enable the corresponding event.
660 */
661 if (hdev->commands[27] & 0x04)
662 events[0] |= 0x04; /* LE Connection Update
663 * Complete
664 */
665
666 /* If the controller supports the LE Read Remote Used Features
667 * command, enable the corresponding event.
668 */
669 if (hdev->commands[27] & 0x20)
670 events[0] |= 0x08; /* LE Read Remote Used
671 * Features Complete
672 */
673
5a34bd5f
MH
674 /* If the controller supports the LE Read Local P-256
675 * Public Key command, enable the corresponding event.
676 */
677 if (hdev->commands[34] & 0x02)
678 events[0] |= 0x80; /* LE Read Local P-256
679 * Public Key Complete
680 */
681
682 /* If the controller supports the LE Generate DHKey
683 * command, enable the corresponding event.
684 */
685 if (hdev->commands[34] & 0x04)
686 events[1] |= 0x01; /* LE Generate DHKey Complete */
687
27bbca44
MH
688 /* If the controller supports the LE Set Default PHY or
689 * LE Set PHY commands, enable the corresponding event.
690 */
691 if (hdev->commands[35] & (0x20 | 0x40))
692 events[1] |= 0x08; /* LE PHY Update Complete */
693
9193c6e8
AG
694 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
695 events);
696
15a49cca
MH
697 if (hdev->commands[25] & 0x40) {
698 /* Read LE Advertising Channel TX Power */
699 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
700 }
701
2ab216a7
MH
702 if (hdev->commands[26] & 0x40) {
703 /* Read LE White List Size */
704 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
705 0, NULL);
706 }
707
708 if (hdev->commands[26] & 0x80) {
709 /* Clear LE White List */
710 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
711 }
712
a9f6068e
MH
713 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
714 /* Read LE Maximum Data Length */
715 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
716
717 /* Read LE Suggested Default Data Length */
718 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
719 }
720
42c6b129 721 hci_set_le_support(req);
9193c6e8 722 }
d2c5d77f
JH
723
724 /* Read features beyond page 1 if available */
725 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
726 struct hci_cp_read_local_ext_features cp;
727
728 cp.page = p;
729 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
730 sizeof(cp), &cp);
731 }
a1d01db1
JH
732
733 return 0;
2177bab5
JH
734}
735
a1d01db1 736static int hci_init4_req(struct hci_request *req, unsigned long opt)
5d4e7e8d
JH
737{
738 struct hci_dev *hdev = req->hdev;
739
36f260ce
MH
740 /* Some Broadcom based Bluetooth controllers do not support the
741 * Delete Stored Link Key command. They are clearly indicating its
742 * absence in the bit mask of supported commands.
743 *
744 * Check the supported commands and only if the the command is marked
745 * as supported send it. If not supported assume that the controller
746 * does not have actual support for stored link keys which makes this
747 * command redundant anyway.
748 *
749 * Some controllers indicate that they support handling deleting
750 * stored link keys, but they don't. The quirk lets a driver
751 * just disable this command.
752 */
753 if (hdev->commands[6] & 0x80 &&
754 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
755 struct hci_cp_delete_stored_link_key cp;
756
757 bacpy(&cp.bdaddr, BDADDR_ANY);
758 cp.delete_all = 0x01;
759 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
760 sizeof(cp), &cp);
761 }
762
d62e6d67
JH
763 /* Set event mask page 2 if the HCI command for it is supported */
764 if (hdev->commands[22] & 0x04)
765 hci_set_event_mask_page_2(req);
766
109e3191
MH
767 /* Read local codec list if the HCI command is supported */
768 if (hdev->commands[29] & 0x20)
769 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
770
f4fe73ed
MH
771 /* Get MWS transport configuration if the HCI command is supported */
772 if (hdev->commands[30] & 0x08)
773 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
774
5d4e7e8d 775 /* Check for Synchronization Train support */
53b834d2 776 if (lmp_sync_train_capable(hdev))
5d4e7e8d 777 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
778
779 /* Enable Secure Connections if supported and configured */
d7a5a11d 780 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 781 bredr_sc_enabled(hdev)) {
a6d0d690 782 u8 support = 0x01;
574ea3c7 783
a6d0d690
MH
784 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
785 sizeof(support), &support);
786 }
a1d01db1 787
12204875
MH
788 /* Set Suggested Default Data Length to maximum if supported */
789 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
790 struct hci_cp_le_write_def_data_len cp;
791
792 cp.tx_len = hdev->le_max_tx_len;
793 cp.tx_time = hdev->le_max_tx_time;
794 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
795 }
796
a1d01db1 797 return 0;
5d4e7e8d
JH
798}
799
2177bab5
JH
800static int __hci_init(struct hci_dev *hdev)
801{
802 int err;
803
4ebeee2d 804 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
2177bab5
JH
805 if (err < 0)
806 return err;
807
f640ee98
MH
808 if (hci_dev_test_flag(hdev, HCI_SETUP))
809 hci_debugfs_create_basic(hdev);
4b4148e9 810
4ebeee2d 811 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
0af801b9
JH
812 if (err < 0)
813 return err;
814
ca8bee5d 815 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
2177bab5 816 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 817 * first two stages of init.
2177bab5 818 */
ca8bee5d 819 if (hdev->dev_type != HCI_PRIMARY)
2177bab5
JH
820 return 0;
821
4ebeee2d 822 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
5d4e7e8d
JH
823 if (err < 0)
824 return err;
825
4ebeee2d 826 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
baf27f6e
MH
827 if (err < 0)
828 return err;
829
ec6cef9c
MH
830 /* This function is only called when the controller is actually in
831 * configured state. When the controller is marked as unconfigured,
832 * this initialization procedure is not run.
833 *
834 * It means that it is possible that a controller runs through its
835 * setup phase and then discovers missing settings. If that is the
836 * case, then this function will not be called. It then will only
837 * be called during the config phase.
838 *
839 * So only when in setup phase or config phase, create the debugfs
840 * entries and register the SMP channels.
baf27f6e 841 */
d7a5a11d
MH
842 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
843 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
844 return 0;
845
60c5f5fb
MH
846 hci_debugfs_create_common(hdev);
847
71c3b60e 848 if (lmp_bredr_capable(hdev))
60c5f5fb 849 hci_debugfs_create_bredr(hdev);
2bfa3531 850
162a3bac 851 if (lmp_le_capable(hdev))
60c5f5fb 852 hci_debugfs_create_le(hdev);
e7b8fc92 853
baf27f6e 854 return 0;
2177bab5
JH
855}
856
a1d01db1 857static int hci_init0_req(struct hci_request *req, unsigned long opt)
0ebca7d6
MH
858{
859 struct hci_dev *hdev = req->hdev;
860
861 BT_DBG("%s %ld", hdev->name, opt);
862
863 /* Reset */
864 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
865 hci_reset_req(req, 0);
866
867 /* Read Local Version */
868 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
869
870 /* Read BD Address */
871 if (hdev->set_bdaddr)
872 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
a1d01db1
JH
873
874 return 0;
0ebca7d6
MH
875}
876
877static int __hci_unconf_init(struct hci_dev *hdev)
878{
879 int err;
880
cc78b44b
MH
881 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
882 return 0;
883
4ebeee2d 884 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
0ebca7d6
MH
885 if (err < 0)
886 return err;
887
f640ee98
MH
888 if (hci_dev_test_flag(hdev, HCI_SETUP))
889 hci_debugfs_create_basic(hdev);
890
0ebca7d6
MH
891 return 0;
892}
893
a1d01db1 894static int hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
895{
896 __u8 scan = opt;
897
42c6b129 898 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
899
900 /* Inquiry and Page scans */
42c6b129 901 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
a1d01db1 902 return 0;
1da177e4
LT
903}
904
a1d01db1 905static int hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
906{
907 __u8 auth = opt;
908
42c6b129 909 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
910
911 /* Authentication */
42c6b129 912 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
a1d01db1 913 return 0;
1da177e4
LT
914}
915
a1d01db1 916static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
917{
918 __u8 encrypt = opt;
919
42c6b129 920 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 921
e4e8e37c 922 /* Encryption */
42c6b129 923 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
a1d01db1 924 return 0;
1da177e4
LT
925}
926
a1d01db1 927static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
928{
929 __le16 policy = cpu_to_le16(opt);
930
42c6b129 931 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
932
933 /* Default link policy */
42c6b129 934 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
a1d01db1 935 return 0;
e4e8e37c
MH
936}
937
8e87d142 938/* Get HCI device by index.
1da177e4
LT
939 * Device is held on return. */
940struct hci_dev *hci_dev_get(int index)
941{
8035ded4 942 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
943
944 BT_DBG("%d", index);
945
946 if (index < 0)
947 return NULL;
948
949 read_lock(&hci_dev_list_lock);
8035ded4 950 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
951 if (d->id == index) {
952 hdev = hci_dev_hold(d);
953 break;
954 }
955 }
956 read_unlock(&hci_dev_list_lock);
957 return hdev;
958}
1da177e4
LT
959
960/* ---- Inquiry support ---- */
ff9ef578 961
30dc78e1
JH
962bool hci_discovery_active(struct hci_dev *hdev)
963{
964 struct discovery_state *discov = &hdev->discovery;
965
6fbe195d 966 switch (discov->state) {
343f935b 967 case DISCOVERY_FINDING:
6fbe195d 968 case DISCOVERY_RESOLVING:
30dc78e1
JH
969 return true;
970
6fbe195d
AG
971 default:
972 return false;
973 }
30dc78e1
JH
974}
975
ff9ef578
JH
976void hci_discovery_set_state(struct hci_dev *hdev, int state)
977{
bb3e0a33
JH
978 int old_state = hdev->discovery.state;
979
ff9ef578
JH
980 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
981
bb3e0a33 982 if (old_state == state)
ff9ef578
JH
983 return;
984
bb3e0a33
JH
985 hdev->discovery.state = state;
986
ff9ef578
JH
987 switch (state) {
988 case DISCOVERY_STOPPED:
c54c3860
AG
989 hci_update_background_scan(hdev);
990
bb3e0a33 991 if (old_state != DISCOVERY_STARTING)
7b99b659 992 mgmt_discovering(hdev, 0);
ff9ef578
JH
993 break;
994 case DISCOVERY_STARTING:
995 break;
343f935b 996 case DISCOVERY_FINDING:
ff9ef578
JH
997 mgmt_discovering(hdev, 1);
998 break;
30dc78e1
JH
999 case DISCOVERY_RESOLVING:
1000 break;
ff9ef578
JH
1001 case DISCOVERY_STOPPING:
1002 break;
1003 }
ff9ef578
JH
1004}
1005
1f9b9a5d 1006void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1007{
30883512 1008 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1009 struct inquiry_entry *p, *n;
1da177e4 1010
561aafbc
JH
1011 list_for_each_entry_safe(p, n, &cache->all, all) {
1012 list_del(&p->all);
b57c1a56 1013 kfree(p);
1da177e4 1014 }
561aafbc
JH
1015
1016 INIT_LIST_HEAD(&cache->unknown);
1017 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1018}
1019
a8c5fb1a
GP
1020struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1021 bdaddr_t *bdaddr)
1da177e4 1022{
30883512 1023 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1024 struct inquiry_entry *e;
1025
6ed93dc6 1026 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1027
561aafbc
JH
1028 list_for_each_entry(e, &cache->all, all) {
1029 if (!bacmp(&e->data.bdaddr, bdaddr))
1030 return e;
1031 }
1032
1033 return NULL;
1034}
1035
1036struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1037 bdaddr_t *bdaddr)
561aafbc 1038{
30883512 1039 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1040 struct inquiry_entry *e;
1041
6ed93dc6 1042 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1043
1044 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1045 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1046 return e;
1047 }
1048
1049 return NULL;
1da177e4
LT
1050}
1051
30dc78e1 1052struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1053 bdaddr_t *bdaddr,
1054 int state)
30dc78e1
JH
1055{
1056 struct discovery_state *cache = &hdev->discovery;
1057 struct inquiry_entry *e;
1058
6ed93dc6 1059 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1060
1061 list_for_each_entry(e, &cache->resolve, list) {
1062 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1063 return e;
1064 if (!bacmp(&e->data.bdaddr, bdaddr))
1065 return e;
1066 }
1067
1068 return NULL;
1069}
1070
a3d4e20a 1071void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1072 struct inquiry_entry *ie)
a3d4e20a
JH
1073{
1074 struct discovery_state *cache = &hdev->discovery;
1075 struct list_head *pos = &cache->resolve;
1076 struct inquiry_entry *p;
1077
1078 list_del(&ie->list);
1079
1080 list_for_each_entry(p, &cache->resolve, list) {
1081 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1082 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1083 break;
1084 pos = &p->list;
1085 }
1086
1087 list_add(&ie->list, pos);
1088}
1089
af58925c
MH
1090u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1091 bool name_known)
1da177e4 1092{
30883512 1093 struct discovery_state *cache = &hdev->discovery;
70f23020 1094 struct inquiry_entry *ie;
af58925c 1095 u32 flags = 0;
1da177e4 1096
6ed93dc6 1097 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1098
6928a924 1099 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1100
af58925c
MH
1101 if (!data->ssp_mode)
1102 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1103
70f23020 1104 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1105 if (ie) {
af58925c
MH
1106 if (!ie->data.ssp_mode)
1107 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1108
a3d4e20a 1109 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1110 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1111 ie->data.rssi = data->rssi;
1112 hci_inquiry_cache_update_resolve(hdev, ie);
1113 }
1114
561aafbc 1115 goto update;
a3d4e20a 1116 }
561aafbc
JH
1117
1118 /* Entry not in the cache. Add new one. */
27f70f3e 1119 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1120 if (!ie) {
1121 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1122 goto done;
1123 }
561aafbc
JH
1124
1125 list_add(&ie->all, &cache->all);
1126
1127 if (name_known) {
1128 ie->name_state = NAME_KNOWN;
1129 } else {
1130 ie->name_state = NAME_NOT_KNOWN;
1131 list_add(&ie->list, &cache->unknown);
1132 }
70f23020 1133
561aafbc
JH
1134update:
1135 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1136 ie->name_state != NAME_PENDING) {
561aafbc
JH
1137 ie->name_state = NAME_KNOWN;
1138 list_del(&ie->list);
1da177e4
LT
1139 }
1140
70f23020
AE
1141 memcpy(&ie->data, data, sizeof(*data));
1142 ie->timestamp = jiffies;
1da177e4 1143 cache->timestamp = jiffies;
3175405b
JH
1144
1145 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1146 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1147
af58925c
MH
1148done:
1149 return flags;
1da177e4
LT
1150}
1151
1152static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1153{
30883512 1154 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1155 struct inquiry_info *info = (struct inquiry_info *) buf;
1156 struct inquiry_entry *e;
1157 int copied = 0;
1158
561aafbc 1159 list_for_each_entry(e, &cache->all, all) {
1da177e4 1160 struct inquiry_data *data = &e->data;
b57c1a56
JH
1161
1162 if (copied >= num)
1163 break;
1164
1da177e4
LT
1165 bacpy(&info->bdaddr, &data->bdaddr);
1166 info->pscan_rep_mode = data->pscan_rep_mode;
1167 info->pscan_period_mode = data->pscan_period_mode;
1168 info->pscan_mode = data->pscan_mode;
1169 memcpy(info->dev_class, data->dev_class, 3);
1170 info->clock_offset = data->clock_offset;
b57c1a56 1171
1da177e4 1172 info++;
b57c1a56 1173 copied++;
1da177e4
LT
1174 }
1175
1176 BT_DBG("cache %p, copied %d", cache, copied);
1177 return copied;
1178}
1179
a1d01db1 1180static int hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1181{
1182 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1183 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1184 struct hci_cp_inquiry cp;
1185
1186 BT_DBG("%s", hdev->name);
1187
1188 if (test_bit(HCI_INQUIRY, &hdev->flags))
a1d01db1 1189 return 0;
1da177e4
LT
1190
1191 /* Start Inquiry */
1192 memcpy(&cp.lap, &ir->lap, 3);
1193 cp.length = ir->length;
1194 cp.num_rsp = ir->num_rsp;
42c6b129 1195 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
a1d01db1
JH
1196
1197 return 0;
1da177e4
LT
1198}
1199
1200int hci_inquiry(void __user *arg)
1201{
1202 __u8 __user *ptr = arg;
1203 struct hci_inquiry_req ir;
1204 struct hci_dev *hdev;
1205 int err = 0, do_inquiry = 0, max_rsp;
1206 long timeo;
1207 __u8 *buf;
1208
1209 if (copy_from_user(&ir, ptr, sizeof(ir)))
1210 return -EFAULT;
1211
5a08ecce
AE
1212 hdev = hci_dev_get(ir.dev_id);
1213 if (!hdev)
1da177e4
LT
1214 return -ENODEV;
1215
d7a5a11d 1216 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1217 err = -EBUSY;
1218 goto done;
1219 }
1220
d7a5a11d 1221 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1222 err = -EOPNOTSUPP;
1223 goto done;
1224 }
1225
ca8bee5d 1226 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
1227 err = -EOPNOTSUPP;
1228 goto done;
1229 }
1230
d7a5a11d 1231 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1232 err = -EOPNOTSUPP;
1233 goto done;
1234 }
1235
09fd0de5 1236 hci_dev_lock(hdev);
8e87d142 1237 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1238 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1239 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1240 do_inquiry = 1;
1241 }
09fd0de5 1242 hci_dev_unlock(hdev);
1da177e4 1243
04837f64 1244 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1245
1246 if (do_inquiry) {
01178cd4 1247 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
4ebeee2d 1248 timeo, NULL);
70f23020
AE
1249 if (err < 0)
1250 goto done;
3e13fa1e
AG
1251
1252 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1253 * cleared). If it is interrupted by a signal, return -EINTR.
1254 */
74316201 1255 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1256 TASK_INTERRUPTIBLE))
1257 return -EINTR;
70f23020 1258 }
1da177e4 1259
8fc9ced3
GP
1260 /* for unlimited number of responses we will use buffer with
1261 * 255 entries
1262 */
1da177e4
LT
1263 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1264
1265 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1266 * copy it to the user space.
1267 */
01df8c31 1268 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1269 if (!buf) {
1da177e4
LT
1270 err = -ENOMEM;
1271 goto done;
1272 }
1273
09fd0de5 1274 hci_dev_lock(hdev);
1da177e4 1275 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1276 hci_dev_unlock(hdev);
1da177e4
LT
1277
1278 BT_DBG("num_rsp %d", ir.num_rsp);
1279
1280 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1281 ptr += sizeof(ir);
1282 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1283 ir.num_rsp))
1da177e4 1284 err = -EFAULT;
8e87d142 1285 } else
1da177e4
LT
1286 err = -EFAULT;
1287
1288 kfree(buf);
1289
1290done:
1291 hci_dev_put(hdev);
1292 return err;
1293}
1294
cbed0ca1 1295static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1296{
1da177e4
LT
1297 int ret = 0;
1298
1da177e4
LT
1299 BT_DBG("%s %p", hdev->name, hdev);
1300
b504430c 1301 hci_req_sync_lock(hdev);
1da177e4 1302
d7a5a11d 1303 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1304 ret = -ENODEV;
1305 goto done;
1306 }
1307
d7a5a11d
MH
1308 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1309 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1310 /* Check for rfkill but allow the HCI setup stage to
1311 * proceed (which in itself doesn't cause any RF activity).
1312 */
d7a5a11d 1313 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1314 ret = -ERFKILL;
1315 goto done;
1316 }
1317
1318 /* Check for valid public address or a configured static
1319 * random adddress, but let the HCI setup proceed to
1320 * be able to determine if there is a public address
1321 * or not.
1322 *
c6beca0e
MH
1323 * In case of user channel usage, it is not important
1324 * if a public address or static random address is
1325 * available.
1326 *
a5c8f270
MH
1327 * This check is only valid for BR/EDR controllers
1328 * since AMP controllers do not have an address.
1329 */
d7a5a11d 1330 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
ca8bee5d 1331 hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
1332 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1333 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1334 ret = -EADDRNOTAVAIL;
1335 goto done;
1336 }
611b30f7
MH
1337 }
1338
1da177e4
LT
1339 if (test_bit(HCI_UP, &hdev->flags)) {
1340 ret = -EALREADY;
1341 goto done;
1342 }
1343
1da177e4
LT
1344 if (hdev->open(hdev)) {
1345 ret = -EIO;
1346 goto done;
1347 }
1348
e9ca8bf1 1349 set_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1350 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
4a3f95b7 1351
f41c70c4
MH
1352 atomic_set(&hdev->cmd_cnt, 1);
1353 set_bit(HCI_INIT, &hdev->flags);
1354
d7a5a11d 1355 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
e131d74a
MH
1356 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1357
af202f84
MH
1358 if (hdev->setup)
1359 ret = hdev->setup(hdev);
f41c70c4 1360
af202f84
MH
1361 /* The transport driver can set these quirks before
1362 * creating the HCI device or in its setup callback.
1363 *
1364 * In case any of them is set, the controller has to
1365 * start up as unconfigured.
1366 */
eb1904f4
MH
1367 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1368 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
a1536da2 1369 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1370
0ebca7d6
MH
1371 /* For an unconfigured controller it is required to
1372 * read at least the version information provided by
1373 * the Read Local Version Information command.
1374 *
1375 * If the set_bdaddr driver callback is provided, then
1376 * also the original Bluetooth public device address
1377 * will be read using the Read BD Address command.
1378 */
d7a5a11d 1379 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1380 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1381 }
1382
d7a5a11d 1383 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1384 /* If public address change is configured, ensure that
1385 * the address gets programmed. If the driver does not
1386 * support changing the public address, fail the power
1387 * on procedure.
1388 */
1389 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1390 hdev->set_bdaddr)
24c457e2
MH
1391 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1392 else
1393 ret = -EADDRNOTAVAIL;
1394 }
1395
f41c70c4 1396 if (!ret) {
d7a5a11d 1397 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
98a63aaf 1398 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
f41c70c4 1399 ret = __hci_init(hdev);
98a63aaf
MH
1400 if (!ret && hdev->post_init)
1401 ret = hdev->post_init(hdev);
1402 }
1da177e4
LT
1403 }
1404
7e995b9e
MH
1405 /* If the HCI Reset command is clearing all diagnostic settings,
1406 * then they need to be reprogrammed after the init procedure
1407 * completed.
1408 */
1409 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1410 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1411 ret = hdev->set_diag(hdev, true);
1412
f41c70c4
MH
1413 clear_bit(HCI_INIT, &hdev->flags);
1414
1da177e4
LT
1415 if (!ret) {
1416 hci_dev_hold(hdev);
a1536da2 1417 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1da177e4 1418 set_bit(HCI_UP, &hdev->flags);
05fcd4c4 1419 hci_sock_dev_event(hdev, HCI_DEV_UP);
6d5d2ee6 1420 hci_leds_update_powered(hdev, true);
d7a5a11d
MH
1421 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1422 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1423 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1424 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
2ff13894 1425 hci_dev_test_flag(hdev, HCI_MGMT) &&
ca8bee5d 1426 hdev->dev_type == HCI_PRIMARY) {
2ff13894
JH
1427 ret = __hci_req_hci_power_on(hdev);
1428 mgmt_power_on(hdev, ret);
56e5cb86 1429 }
8e87d142 1430 } else {
1da177e4 1431 /* Init failed, cleanup */
3eff45ea 1432 flush_work(&hdev->tx_work);
c347b765 1433 flush_work(&hdev->cmd_work);
b78752cc 1434 flush_work(&hdev->rx_work);
1da177e4
LT
1435
1436 skb_queue_purge(&hdev->cmd_q);
1437 skb_queue_purge(&hdev->rx_q);
1438
1439 if (hdev->flush)
1440 hdev->flush(hdev);
1441
1442 if (hdev->sent_cmd) {
1443 kfree_skb(hdev->sent_cmd);
1444 hdev->sent_cmd = NULL;
1445 }
1446
e9ca8bf1 1447 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1448 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1449
1da177e4 1450 hdev->close(hdev);
fee746b0 1451 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1452 }
1453
1454done:
b504430c 1455 hci_req_sync_unlock(hdev);
1da177e4
LT
1456 return ret;
1457}
1458
cbed0ca1
JH
1459/* ---- HCI ioctl helpers ---- */
1460
1461int hci_dev_open(__u16 dev)
1462{
1463 struct hci_dev *hdev;
1464 int err;
1465
1466 hdev = hci_dev_get(dev);
1467 if (!hdev)
1468 return -ENODEV;
1469
4a964404 1470 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1471 * up as user channel. Trying to bring them up as normal devices
1472 * will result into a failure. Only user channel operation is
1473 * possible.
1474 *
1475 * When this function is called for a user channel, the flag
1476 * HCI_USER_CHANNEL will be set first before attempting to
1477 * open the device.
1478 */
d7a5a11d
MH
1479 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1480 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1481 err = -EOPNOTSUPP;
1482 goto done;
1483 }
1484
e1d08f40
JH
1485 /* We need to ensure that no other power on/off work is pending
1486 * before proceeding to call hci_dev_do_open. This is
1487 * particularly important if the setup procedure has not yet
1488 * completed.
1489 */
a69d8927 1490 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1491 cancel_delayed_work(&hdev->power_off);
1492
a5c8f270
MH
1493 /* After this call it is guaranteed that the setup procedure
1494 * has finished. This means that error conditions like RFKILL
1495 * or no valid public or static random address apply.
1496 */
e1d08f40
JH
1497 flush_workqueue(hdev->req_workqueue);
1498
12aa4f0a 1499 /* For controllers not using the management interface and that
b6ae8457 1500 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1501 * so that pairing works for them. Once the management interface
1502 * is in use this bit will be cleared again and userspace has
1503 * to explicitly enable it.
1504 */
d7a5a11d
MH
1505 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1506 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1507 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1508
cbed0ca1
JH
1509 err = hci_dev_do_open(hdev);
1510
fee746b0 1511done:
cbed0ca1 1512 hci_dev_put(hdev);
cbed0ca1
JH
1513 return err;
1514}
1515
d7347f3c
JH
1516/* This function requires the caller holds hdev->lock */
1517static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1518{
1519 struct hci_conn_params *p;
1520
f161dd41
JH
1521 list_for_each_entry(p, &hdev->le_conn_params, list) {
1522 if (p->conn) {
1523 hci_conn_drop(p->conn);
f8aaf9b6 1524 hci_conn_put(p->conn);
f161dd41
JH
1525 p->conn = NULL;
1526 }
d7347f3c 1527 list_del_init(&p->action);
f161dd41 1528 }
d7347f3c
JH
1529
1530 BT_DBG("All LE pending actions cleared");
1531}
1532
6b3cc1db 1533int hci_dev_do_close(struct hci_dev *hdev)
1da177e4 1534{
acc649c6
MH
1535 bool auto_off;
1536
1da177e4
LT
1537 BT_DBG("%s %p", hdev->name, hdev);
1538
d24d8144 1539 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1540 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1541 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1542 /* Execute vendor specific shutdown routine */
1543 if (hdev->shutdown)
1544 hdev->shutdown(hdev);
1545 }
1546
78c04c0b
VCG
1547 cancel_delayed_work(&hdev->power_off);
1548
7df0f73e 1549 hci_request_cancel_all(hdev);
b504430c 1550 hci_req_sync_lock(hdev);
1da177e4
LT
1551
1552 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1553 cancel_delayed_work_sync(&hdev->cmd_timer);
b504430c 1554 hci_req_sync_unlock(hdev);
1da177e4
LT
1555 return 0;
1556 }
1557
6d5d2ee6
HK
1558 hci_leds_update_powered(hdev, false);
1559
3eff45ea
GP
1560 /* Flush RX and TX works */
1561 flush_work(&hdev->tx_work);
b78752cc 1562 flush_work(&hdev->rx_work);
1da177e4 1563
16ab91ab 1564 if (hdev->discov_timeout > 0) {
16ab91ab 1565 hdev->discov_timeout = 0;
a358dc11
MH
1566 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1567 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1568 }
1569
a69d8927 1570 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1571 cancel_delayed_work(&hdev->service_cache);
1572
d7a5a11d 1573 if (hci_dev_test_flag(hdev, HCI_MGMT))
4518bb0f 1574 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1575
76727c02
JH
1576 /* Avoid potential lockdep warnings from the *_flush() calls by
1577 * ensuring the workqueue is empty up front.
1578 */
1579 drain_workqueue(hdev->workqueue);
1580
09fd0de5 1581 hci_dev_lock(hdev);
1aeb9c65 1582
8f502f84
JH
1583 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1584
acc649c6
MH
1585 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1586
ca8bee5d 1587 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
baab7932 1588 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
2ff13894
JH
1589 hci_dev_test_flag(hdev, HCI_MGMT))
1590 __mgmt_power_off(hdev);
1aeb9c65 1591
1f9b9a5d 1592 hci_inquiry_cache_flush(hdev);
d7347f3c 1593 hci_pend_le_actions_clear(hdev);
f161dd41 1594 hci_conn_hash_flush(hdev);
09fd0de5 1595 hci_dev_unlock(hdev);
1da177e4 1596
64dae967
MH
1597 smp_unregister(hdev);
1598
05fcd4c4 1599 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1da177e4
LT
1600
1601 if (hdev->flush)
1602 hdev->flush(hdev);
1603
1604 /* Reset device */
1605 skb_queue_purge(&hdev->cmd_q);
1606 atomic_set(&hdev->cmd_cnt, 1);
acc649c6
MH
1607 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1608 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4 1609 set_bit(HCI_INIT, &hdev->flags);
4ebeee2d 1610 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1da177e4
LT
1611 clear_bit(HCI_INIT, &hdev->flags);
1612 }
1613
c347b765
GP
1614 /* flush cmd work */
1615 flush_work(&hdev->cmd_work);
1da177e4
LT
1616
1617 /* Drop queues */
1618 skb_queue_purge(&hdev->rx_q);
1619 skb_queue_purge(&hdev->cmd_q);
1620 skb_queue_purge(&hdev->raw_q);
1621
1622 /* Drop last sent command */
1623 if (hdev->sent_cmd) {
65cc2b49 1624 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1625 kfree_skb(hdev->sent_cmd);
1626 hdev->sent_cmd = NULL;
1627 }
1628
e9ca8bf1 1629 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1630 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1631
1da177e4
LT
1632 /* After this point our queues are empty
1633 * and no tasks are scheduled. */
1634 hdev->close(hdev);
1635
35b973c9 1636 /* Clear flags */
fee746b0 1637 hdev->flags &= BIT(HCI_RAW);
eacb44df 1638 hci_dev_clear_volatile_flags(hdev);
35b973c9 1639
ced5c338 1640 /* Controller radio is available but is currently powered down */
536619e8 1641 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1642
e59fda8d 1643 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1644 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1645 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1646
b504430c 1647 hci_req_sync_unlock(hdev);
1da177e4
LT
1648
1649 hci_dev_put(hdev);
1650 return 0;
1651}
1652
1653int hci_dev_close(__u16 dev)
1654{
1655 struct hci_dev *hdev;
1656 int err;
1657
70f23020
AE
1658 hdev = hci_dev_get(dev);
1659 if (!hdev)
1da177e4 1660 return -ENODEV;
8ee56540 1661
d7a5a11d 1662 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1663 err = -EBUSY;
1664 goto done;
1665 }
1666
a69d8927 1667 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1668 cancel_delayed_work(&hdev->power_off);
1669
1da177e4 1670 err = hci_dev_do_close(hdev);
8ee56540 1671
0736cfa8 1672done:
1da177e4
LT
1673 hci_dev_put(hdev);
1674 return err;
1675}
1676
5c912495 1677static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1678{
5c912495 1679 int ret;
1da177e4 1680
5c912495 1681 BT_DBG("%s %p", hdev->name, hdev);
1da177e4 1682
b504430c 1683 hci_req_sync_lock(hdev);
1da177e4 1684
1da177e4
LT
1685 /* Drop queues */
1686 skb_queue_purge(&hdev->rx_q);
1687 skb_queue_purge(&hdev->cmd_q);
1688
76727c02
JH
1689 /* Avoid potential lockdep warnings from the *_flush() calls by
1690 * ensuring the workqueue is empty up front.
1691 */
1692 drain_workqueue(hdev->workqueue);
1693
09fd0de5 1694 hci_dev_lock(hdev);
1f9b9a5d 1695 hci_inquiry_cache_flush(hdev);
1da177e4 1696 hci_conn_hash_flush(hdev);
09fd0de5 1697 hci_dev_unlock(hdev);
1da177e4
LT
1698
1699 if (hdev->flush)
1700 hdev->flush(hdev);
1701
8e87d142 1702 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1703 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1704
4ebeee2d 1705 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1da177e4 1706
b504430c 1707 hci_req_sync_unlock(hdev);
1da177e4
LT
1708 return ret;
1709}
1710
5c912495
MH
1711int hci_dev_reset(__u16 dev)
1712{
1713 struct hci_dev *hdev;
1714 int err;
1715
1716 hdev = hci_dev_get(dev);
1717 if (!hdev)
1718 return -ENODEV;
1719
1720 if (!test_bit(HCI_UP, &hdev->flags)) {
1721 err = -ENETDOWN;
1722 goto done;
1723 }
1724
d7a5a11d 1725 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1726 err = -EBUSY;
1727 goto done;
1728 }
1729
d7a5a11d 1730 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1731 err = -EOPNOTSUPP;
1732 goto done;
1733 }
1734
1735 err = hci_dev_do_reset(hdev);
1736
1737done:
1738 hci_dev_put(hdev);
1739 return err;
1740}
1741
1da177e4
LT
1742int hci_dev_reset_stat(__u16 dev)
1743{
1744 struct hci_dev *hdev;
1745 int ret = 0;
1746
70f23020
AE
1747 hdev = hci_dev_get(dev);
1748 if (!hdev)
1da177e4
LT
1749 return -ENODEV;
1750
d7a5a11d 1751 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1752 ret = -EBUSY;
1753 goto done;
1754 }
1755
d7a5a11d 1756 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1757 ret = -EOPNOTSUPP;
1758 goto done;
1759 }
1760
1da177e4
LT
1761 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1762
0736cfa8 1763done:
1da177e4 1764 hci_dev_put(hdev);
1da177e4
LT
1765 return ret;
1766}
1767
123abc08
JH
1768static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1769{
bc6d2d04 1770 bool conn_changed, discov_changed;
123abc08
JH
1771
1772 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1773
1774 if ((scan & SCAN_PAGE))
238be788
MH
1775 conn_changed = !hci_dev_test_and_set_flag(hdev,
1776 HCI_CONNECTABLE);
123abc08 1777 else
a69d8927
MH
1778 conn_changed = hci_dev_test_and_clear_flag(hdev,
1779 HCI_CONNECTABLE);
123abc08 1780
bc6d2d04 1781 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1782 discov_changed = !hci_dev_test_and_set_flag(hdev,
1783 HCI_DISCOVERABLE);
bc6d2d04 1784 } else {
a358dc11 1785 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1786 discov_changed = hci_dev_test_and_clear_flag(hdev,
1787 HCI_DISCOVERABLE);
bc6d2d04
JH
1788 }
1789
d7a5a11d 1790 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1791 return;
1792
bc6d2d04
JH
1793 if (conn_changed || discov_changed) {
1794 /* In case this was disabled through mgmt */
a1536da2 1795 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1796
d7a5a11d 1797 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
cab054ab 1798 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
bc6d2d04 1799
123abc08 1800 mgmt_new_settings(hdev);
bc6d2d04 1801 }
123abc08
JH
1802}
1803
1da177e4
LT
1804int hci_dev_cmd(unsigned int cmd, void __user *arg)
1805{
1806 struct hci_dev *hdev;
1807 struct hci_dev_req dr;
1808 int err = 0;
1809
1810 if (copy_from_user(&dr, arg, sizeof(dr)))
1811 return -EFAULT;
1812
70f23020
AE
1813 hdev = hci_dev_get(dr.dev_id);
1814 if (!hdev)
1da177e4
LT
1815 return -ENODEV;
1816
d7a5a11d 1817 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1818 err = -EBUSY;
1819 goto done;
1820 }
1821
d7a5a11d 1822 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1823 err = -EOPNOTSUPP;
1824 goto done;
1825 }
1826
ca8bee5d 1827 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
1828 err = -EOPNOTSUPP;
1829 goto done;
1830 }
1831
d7a5a11d 1832 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1833 err = -EOPNOTSUPP;
1834 goto done;
1835 }
1836
1da177e4
LT
1837 switch (cmd) {
1838 case HCISETAUTH:
01178cd4 1839 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 1840 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
1841 break;
1842
1843 case HCISETENCRYPT:
1844 if (!lmp_encrypt_capable(hdev)) {
1845 err = -EOPNOTSUPP;
1846 break;
1847 }
1848
1849 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1850 /* Auth must be enabled first */
01178cd4 1851 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 1852 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
1853 if (err)
1854 break;
1855 }
1856
01178cd4 1857 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
4ebeee2d 1858 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
1859 break;
1860
1861 case HCISETSCAN:
01178cd4 1862 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
4ebeee2d 1863 HCI_INIT_TIMEOUT, NULL);
91a668b0 1864
bc6d2d04
JH
1865 /* Ensure that the connectable and discoverable states
1866 * get correctly modified as this was a non-mgmt change.
91a668b0 1867 */
123abc08
JH
1868 if (!err)
1869 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1870 break;
1871
1da177e4 1872 case HCISETLINKPOL:
01178cd4 1873 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
4ebeee2d 1874 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
1875 break;
1876
1877 case HCISETLINKMODE:
e4e8e37c
MH
1878 hdev->link_mode = ((__u16) dr.dev_opt) &
1879 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1880 break;
1881
1882 case HCISETPTYPE:
1883 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1884 break;
1885
1886 case HCISETACLMTU:
e4e8e37c
MH
1887 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1888 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1889 break;
1890
1891 case HCISETSCOMTU:
e4e8e37c
MH
1892 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1893 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1894 break;
1895
1896 default:
1897 err = -EINVAL;
1898 break;
1899 }
e4e8e37c 1900
0736cfa8 1901done:
1da177e4
LT
1902 hci_dev_put(hdev);
1903 return err;
1904}
1905
1906int hci_get_dev_list(void __user *arg)
1907{
8035ded4 1908 struct hci_dev *hdev;
1da177e4
LT
1909 struct hci_dev_list_req *dl;
1910 struct hci_dev_req *dr;
1da177e4
LT
1911 int n = 0, size, err;
1912 __u16 dev_num;
1913
1914 if (get_user(dev_num, (__u16 __user *) arg))
1915 return -EFAULT;
1916
1917 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1918 return -EINVAL;
1919
1920 size = sizeof(*dl) + dev_num * sizeof(*dr);
1921
70f23020
AE
1922 dl = kzalloc(size, GFP_KERNEL);
1923 if (!dl)
1da177e4
LT
1924 return -ENOMEM;
1925
1926 dr = dl->dev_req;
1927
f20d09d5 1928 read_lock(&hci_dev_list_lock);
8035ded4 1929 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 1930 unsigned long flags = hdev->flags;
c542a06c 1931
2e84d8db
MH
1932 /* When the auto-off is configured it means the transport
1933 * is running, but in that case still indicate that the
1934 * device is actually down.
1935 */
d7a5a11d 1936 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 1937 flags &= ~BIT(HCI_UP);
c542a06c 1938
1da177e4 1939 (dr + n)->dev_id = hdev->id;
2e84d8db 1940 (dr + n)->dev_opt = flags;
c542a06c 1941
1da177e4
LT
1942 if (++n >= dev_num)
1943 break;
1944 }
f20d09d5 1945 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1946
1947 dl->dev_num = n;
1948 size = sizeof(*dl) + n * sizeof(*dr);
1949
1950 err = copy_to_user(arg, dl, size);
1951 kfree(dl);
1952
1953 return err ? -EFAULT : 0;
1954}
1955
1956int hci_get_dev_info(void __user *arg)
1957{
1958 struct hci_dev *hdev;
1959 struct hci_dev_info di;
2e84d8db 1960 unsigned long flags;
1da177e4
LT
1961 int err = 0;
1962
1963 if (copy_from_user(&di, arg, sizeof(di)))
1964 return -EFAULT;
1965
70f23020
AE
1966 hdev = hci_dev_get(di.dev_id);
1967 if (!hdev)
1da177e4
LT
1968 return -ENODEV;
1969
2e84d8db
MH
1970 /* When the auto-off is configured it means the transport
1971 * is running, but in that case still indicate that the
1972 * device is actually down.
1973 */
d7a5a11d 1974 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
1975 flags = hdev->flags & ~BIT(HCI_UP);
1976 else
1977 flags = hdev->flags;
c542a06c 1978
1da177e4
LT
1979 strcpy(di.name, hdev->name);
1980 di.bdaddr = hdev->bdaddr;
60f2a3ed 1981 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 1982 di.flags = flags;
1da177e4 1983 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1984 if (lmp_bredr_capable(hdev)) {
1985 di.acl_mtu = hdev->acl_mtu;
1986 di.acl_pkts = hdev->acl_pkts;
1987 di.sco_mtu = hdev->sco_mtu;
1988 di.sco_pkts = hdev->sco_pkts;
1989 } else {
1990 di.acl_mtu = hdev->le_mtu;
1991 di.acl_pkts = hdev->le_pkts;
1992 di.sco_mtu = 0;
1993 di.sco_pkts = 0;
1994 }
1da177e4
LT
1995 di.link_policy = hdev->link_policy;
1996 di.link_mode = hdev->link_mode;
1997
1998 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1999 memcpy(&di.features, &hdev->features, sizeof(di.features));
2000
2001 if (copy_to_user(arg, &di, sizeof(di)))
2002 err = -EFAULT;
2003
2004 hci_dev_put(hdev);
2005
2006 return err;
2007}
2008
2009/* ---- Interface to HCI drivers ---- */
2010
611b30f7
MH
2011static int hci_rfkill_set_block(void *data, bool blocked)
2012{
2013 struct hci_dev *hdev = data;
2014
2015 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2016
d7a5a11d 2017 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2018 return -EBUSY;
2019
5e130367 2020 if (blocked) {
a1536da2 2021 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2022 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2023 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2024 hci_dev_do_close(hdev);
5e130367 2025 } else {
a358dc11 2026 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2027 }
611b30f7
MH
2028
2029 return 0;
2030}
2031
2032static const struct rfkill_ops hci_rfkill_ops = {
2033 .set_block = hci_rfkill_set_block,
2034};
2035
ab81cbf9
JH
2036static void hci_power_on(struct work_struct *work)
2037{
2038 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2039 int err;
ab81cbf9
JH
2040
2041 BT_DBG("%s", hdev->name);
2042
2ff13894
JH
2043 if (test_bit(HCI_UP, &hdev->flags) &&
2044 hci_dev_test_flag(hdev, HCI_MGMT) &&
2045 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
d82142a8 2046 cancel_delayed_work(&hdev->power_off);
2ff13894
JH
2047 hci_req_sync_lock(hdev);
2048 err = __hci_req_hci_power_on(hdev);
2049 hci_req_sync_unlock(hdev);
2050 mgmt_power_on(hdev, err);
2051 return;
2052 }
2053
cbed0ca1 2054 err = hci_dev_do_open(hdev);
96570ffc 2055 if (err < 0) {
3ad67582 2056 hci_dev_lock(hdev);
96570ffc 2057 mgmt_set_powered_failed(hdev, err);
3ad67582 2058 hci_dev_unlock(hdev);
ab81cbf9 2059 return;
96570ffc 2060 }
ab81cbf9 2061
a5c8f270
MH
2062 /* During the HCI setup phase, a few error conditions are
2063 * ignored and they need to be checked now. If they are still
2064 * valid, it is important to turn the device back off.
2065 */
d7a5a11d
MH
2066 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2067 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
ca8bee5d 2068 (hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
2069 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2070 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2071 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2072 hci_dev_do_close(hdev);
d7a5a11d 2073 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2074 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2075 HCI_AUTO_OFF_TIMEOUT);
bf543036 2076 }
ab81cbf9 2077
a69d8927 2078 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2079 /* For unconfigured devices, set the HCI_RAW flag
2080 * so that userspace can easily identify them.
4a964404 2081 */
d7a5a11d 2082 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2083 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2084
2085 /* For fully configured devices, this will send
2086 * the Index Added event. For unconfigured devices,
2087 * it will send Unconfigued Index Added event.
2088 *
2089 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2090 * and no event will be send.
2091 */
2092 mgmt_index_added(hdev);
a69d8927 2093 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2094 /* When the controller is now configured, then it
2095 * is important to clear the HCI_RAW flag.
2096 */
d7a5a11d 2097 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2098 clear_bit(HCI_RAW, &hdev->flags);
2099
d603b76b
MH
2100 /* Powering on the controller with HCI_CONFIG set only
2101 * happens with the transition from unconfigured to
2102 * configured. This will send the Index Added event.
2103 */
744cf19e 2104 mgmt_index_added(hdev);
fee746b0 2105 }
ab81cbf9
JH
2106}
2107
2108static void hci_power_off(struct work_struct *work)
2109{
3243553f 2110 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2111 power_off.work);
ab81cbf9
JH
2112
2113 BT_DBG("%s", hdev->name);
2114
8ee56540 2115 hci_dev_do_close(hdev);
ab81cbf9
JH
2116}
2117
c7741d16
MH
2118static void hci_error_reset(struct work_struct *work)
2119{
2120 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2121
2122 BT_DBG("%s", hdev->name);
2123
2124 if (hdev->hw_error)
2125 hdev->hw_error(hdev, hdev->hw_error_code);
2126 else
2127 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2128 hdev->hw_error_code);
2129
2130 if (hci_dev_do_close(hdev))
2131 return;
2132
c7741d16
MH
2133 hci_dev_do_open(hdev);
2134}
2135
35f7498a 2136void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2137{
4821002c 2138 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2139
4821002c
JH
2140 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2141 list_del(&uuid->list);
2aeb9a1a
JH
2142 kfree(uuid);
2143 }
2aeb9a1a
JH
2144}
2145
35f7498a 2146void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2147{
0378b597 2148 struct link_key *key;
55ed8ca1 2149
0378b597
JH
2150 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2151 list_del_rcu(&key->list);
2152 kfree_rcu(key, rcu);
55ed8ca1 2153 }
55ed8ca1
JH
2154}
2155
35f7498a 2156void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2157{
970d0f1b 2158 struct smp_ltk *k;
b899efaf 2159
970d0f1b
JH
2160 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2161 list_del_rcu(&k->list);
2162 kfree_rcu(k, rcu);
b899efaf 2163 }
b899efaf
VCG
2164}
2165
970c4e46
JH
2166void hci_smp_irks_clear(struct hci_dev *hdev)
2167{
adae20cb 2168 struct smp_irk *k;
970c4e46 2169
adae20cb
JH
2170 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2171 list_del_rcu(&k->list);
2172 kfree_rcu(k, rcu);
970c4e46
JH
2173 }
2174}
2175
55ed8ca1
JH
2176struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2177{
8035ded4 2178 struct link_key *k;
55ed8ca1 2179
0378b597
JH
2180 rcu_read_lock();
2181 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2182 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2183 rcu_read_unlock();
55ed8ca1 2184 return k;
0378b597
JH
2185 }
2186 }
2187 rcu_read_unlock();
55ed8ca1
JH
2188
2189 return NULL;
2190}
2191
745c0ce3 2192static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2193 u8 key_type, u8 old_key_type)
d25e28ab
JH
2194{
2195 /* Legacy key */
2196 if (key_type < 0x03)
745c0ce3 2197 return true;
d25e28ab
JH
2198
2199 /* Debug keys are insecure so don't store them persistently */
2200 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2201 return false;
d25e28ab
JH
2202
2203 /* Changed combination key and there's no previous one */
2204 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2205 return false;
d25e28ab
JH
2206
2207 /* Security mode 3 case */
2208 if (!conn)
745c0ce3 2209 return true;
d25e28ab 2210
e3befab9
JH
2211 /* BR/EDR key derived using SC from an LE link */
2212 if (conn->type == LE_LINK)
2213 return true;
2214
d25e28ab
JH
2215 /* Neither local nor remote side had no-bonding as requirement */
2216 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2217 return true;
d25e28ab
JH
2218
2219 /* Local side had dedicated bonding as requirement */
2220 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2221 return true;
d25e28ab
JH
2222
2223 /* Remote side had dedicated bonding as requirement */
2224 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2225 return true;
d25e28ab
JH
2226
2227 /* If none of the above criteria match, then don't store the key
2228 * persistently */
745c0ce3 2229 return false;
d25e28ab
JH
2230}
2231
e804d25d 2232static u8 ltk_role(u8 type)
98a0b845 2233{
e804d25d
JH
2234 if (type == SMP_LTK)
2235 return HCI_ROLE_MASTER;
98a0b845 2236
e804d25d 2237 return HCI_ROLE_SLAVE;
98a0b845
JH
2238}
2239
f3a73d97
JH
2240struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2241 u8 addr_type, u8 role)
75d262c2 2242{
c9839a11 2243 struct smp_ltk *k;
75d262c2 2244
970d0f1b
JH
2245 rcu_read_lock();
2246 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2247 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2248 continue;
2249
923e2414 2250 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2251 rcu_read_unlock();
75d262c2 2252 return k;
970d0f1b
JH
2253 }
2254 }
2255 rcu_read_unlock();
75d262c2
VCG
2256
2257 return NULL;
2258}
75d262c2 2259
970c4e46
JH
2260struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2261{
2262 struct smp_irk *irk;
2263
adae20cb
JH
2264 rcu_read_lock();
2265 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2266 if (!bacmp(&irk->rpa, rpa)) {
2267 rcu_read_unlock();
970c4e46 2268 return irk;
adae20cb 2269 }
970c4e46
JH
2270 }
2271
adae20cb 2272 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2273 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2274 bacpy(&irk->rpa, rpa);
adae20cb 2275 rcu_read_unlock();
970c4e46
JH
2276 return irk;
2277 }
2278 }
adae20cb 2279 rcu_read_unlock();
970c4e46
JH
2280
2281 return NULL;
2282}
2283
2284struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2285 u8 addr_type)
2286{
2287 struct smp_irk *irk;
2288
6cfc9988
JH
2289 /* Identity Address must be public or static random */
2290 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2291 return NULL;
2292
adae20cb
JH
2293 rcu_read_lock();
2294 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2295 if (addr_type == irk->addr_type &&
adae20cb
JH
2296 bacmp(bdaddr, &irk->bdaddr) == 0) {
2297 rcu_read_unlock();
970c4e46 2298 return irk;
adae20cb 2299 }
970c4e46 2300 }
adae20cb 2301 rcu_read_unlock();
970c4e46
JH
2302
2303 return NULL;
2304}
2305
567fa2aa 2306struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2307 bdaddr_t *bdaddr, u8 *val, u8 type,
2308 u8 pin_len, bool *persistent)
55ed8ca1
JH
2309{
2310 struct link_key *key, *old_key;
745c0ce3 2311 u8 old_key_type;
55ed8ca1
JH
2312
2313 old_key = hci_find_link_key(hdev, bdaddr);
2314 if (old_key) {
2315 old_key_type = old_key->type;
2316 key = old_key;
2317 } else {
12adcf3a 2318 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2319 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2320 if (!key)
567fa2aa 2321 return NULL;
0378b597 2322 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2323 }
2324
6ed93dc6 2325 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2326
d25e28ab
JH
2327 /* Some buggy controller combinations generate a changed
2328 * combination key for legacy pairing even when there's no
2329 * previous key */
2330 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2331 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2332 type = HCI_LK_COMBINATION;
655fe6ec
JH
2333 if (conn)
2334 conn->key_type = type;
2335 }
d25e28ab 2336
55ed8ca1 2337 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2338 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2339 key->pin_len = pin_len;
2340
b6020ba0 2341 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2342 key->type = old_key_type;
4748fed2
JH
2343 else
2344 key->type = type;
2345
7652ff6a
JH
2346 if (persistent)
2347 *persistent = hci_persistent_key(hdev, conn, type,
2348 old_key_type);
4df378a1 2349
567fa2aa 2350 return key;
55ed8ca1
JH
2351}
2352
ca9142b8 2353struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2354 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2355 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2356{
c9839a11 2357 struct smp_ltk *key, *old_key;
e804d25d 2358 u8 role = ltk_role(type);
75d262c2 2359
f3a73d97 2360 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2361 if (old_key)
75d262c2 2362 key = old_key;
c9839a11 2363 else {
0a14ab41 2364 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2365 if (!key)
ca9142b8 2366 return NULL;
970d0f1b 2367 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2368 }
2369
75d262c2 2370 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2371 key->bdaddr_type = addr_type;
2372 memcpy(key->val, tk, sizeof(key->val));
2373 key->authenticated = authenticated;
2374 key->ediv = ediv;
fe39c7b2 2375 key->rand = rand;
c9839a11
VCG
2376 key->enc_size = enc_size;
2377 key->type = type;
75d262c2 2378
ca9142b8 2379 return key;
75d262c2
VCG
2380}
2381
ca9142b8
JH
2382struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2383 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2384{
2385 struct smp_irk *irk;
2386
2387 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2388 if (!irk) {
2389 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2390 if (!irk)
ca9142b8 2391 return NULL;
970c4e46
JH
2392
2393 bacpy(&irk->bdaddr, bdaddr);
2394 irk->addr_type = addr_type;
2395
adae20cb 2396 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2397 }
2398
2399 memcpy(irk->val, val, 16);
2400 bacpy(&irk->rpa, rpa);
2401
ca9142b8 2402 return irk;
970c4e46
JH
2403}
2404
55ed8ca1
JH
2405int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2406{
2407 struct link_key *key;
2408
2409 key = hci_find_link_key(hdev, bdaddr);
2410 if (!key)
2411 return -ENOENT;
2412
6ed93dc6 2413 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2414
0378b597
JH
2415 list_del_rcu(&key->list);
2416 kfree_rcu(key, rcu);
55ed8ca1
JH
2417
2418 return 0;
2419}
2420
e0b2b27e 2421int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2422{
970d0f1b 2423 struct smp_ltk *k;
c51ffa0b 2424 int removed = 0;
b899efaf 2425
970d0f1b 2426 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2427 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2428 continue;
2429
6ed93dc6 2430 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2431
970d0f1b
JH
2432 list_del_rcu(&k->list);
2433 kfree_rcu(k, rcu);
c51ffa0b 2434 removed++;
b899efaf
VCG
2435 }
2436
c51ffa0b 2437 return removed ? 0 : -ENOENT;
b899efaf
VCG
2438}
2439
a7ec7338
JH
2440void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2441{
adae20cb 2442 struct smp_irk *k;
a7ec7338 2443
adae20cb 2444 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2445 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2446 continue;
2447
2448 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2449
adae20cb
JH
2450 list_del_rcu(&k->list);
2451 kfree_rcu(k, rcu);
a7ec7338
JH
2452 }
2453}
2454
55e76b38
JH
2455bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2456{
2457 struct smp_ltk *k;
4ba9faf3 2458 struct smp_irk *irk;
55e76b38
JH
2459 u8 addr_type;
2460
2461 if (type == BDADDR_BREDR) {
2462 if (hci_find_link_key(hdev, bdaddr))
2463 return true;
2464 return false;
2465 }
2466
2467 /* Convert to HCI addr type which struct smp_ltk uses */
2468 if (type == BDADDR_LE_PUBLIC)
2469 addr_type = ADDR_LE_DEV_PUBLIC;
2470 else
2471 addr_type = ADDR_LE_DEV_RANDOM;
2472
4ba9faf3
JH
2473 irk = hci_get_irk(hdev, bdaddr, addr_type);
2474 if (irk) {
2475 bdaddr = &irk->bdaddr;
2476 addr_type = irk->addr_type;
2477 }
2478
55e76b38
JH
2479 rcu_read_lock();
2480 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2481 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2482 rcu_read_unlock();
55e76b38 2483 return true;
87c8b28d 2484 }
55e76b38
JH
2485 }
2486 rcu_read_unlock();
2487
2488 return false;
2489}
2490
6bd32326 2491/* HCI command timer function */
65cc2b49 2492static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2493{
65cc2b49
MH
2494 struct hci_dev *hdev = container_of(work, struct hci_dev,
2495 cmd_timer.work);
6bd32326 2496
bda4f23a
AE
2497 if (hdev->sent_cmd) {
2498 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2499 u16 opcode = __le16_to_cpu(sent->opcode);
2500
2501 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2502 } else {
2503 BT_ERR("%s command tx timeout", hdev->name);
2504 }
2505
6bd32326 2506 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2507 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2508}
2509
2763eda6 2510struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2511 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2512{
2513 struct oob_data *data;
2514
6928a924
JH
2515 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2516 if (bacmp(bdaddr, &data->bdaddr) != 0)
2517 continue;
2518 if (data->bdaddr_type != bdaddr_type)
2519 continue;
2520 return data;
2521 }
2763eda6
SJ
2522
2523 return NULL;
2524}
2525
6928a924
JH
2526int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2527 u8 bdaddr_type)
2763eda6
SJ
2528{
2529 struct oob_data *data;
2530
6928a924 2531 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2532 if (!data)
2533 return -ENOENT;
2534
6928a924 2535 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2536
2537 list_del(&data->list);
2538 kfree(data);
2539
2540 return 0;
2541}
2542
35f7498a 2543void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2544{
2545 struct oob_data *data, *n;
2546
2547 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2548 list_del(&data->list);
2549 kfree(data);
2550 }
2763eda6
SJ
2551}
2552
0798872e 2553int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2554 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2555 u8 *hash256, u8 *rand256)
2763eda6
SJ
2556{
2557 struct oob_data *data;
2558
6928a924 2559 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2560 if (!data) {
0a14ab41 2561 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2562 if (!data)
2563 return -ENOMEM;
2564
2565 bacpy(&data->bdaddr, bdaddr);
6928a924 2566 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2567 list_add(&data->list, &hdev->remote_oob_data);
2568 }
2569
81328d5c
JH
2570 if (hash192 && rand192) {
2571 memcpy(data->hash192, hash192, sizeof(data->hash192));
2572 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2573 if (hash256 && rand256)
2574 data->present = 0x03;
81328d5c
JH
2575 } else {
2576 memset(data->hash192, 0, sizeof(data->hash192));
2577 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2578 if (hash256 && rand256)
2579 data->present = 0x02;
2580 else
2581 data->present = 0x00;
0798872e
MH
2582 }
2583
81328d5c
JH
2584 if (hash256 && rand256) {
2585 memcpy(data->hash256, hash256, sizeof(data->hash256));
2586 memcpy(data->rand256, rand256, sizeof(data->rand256));
2587 } else {
2588 memset(data->hash256, 0, sizeof(data->hash256));
2589 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2590 if (hash192 && rand192)
2591 data->present = 0x01;
81328d5c 2592 }
0798872e 2593
6ed93dc6 2594 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2595
2596 return 0;
2597}
2598
d2609b34
FG
2599/* This function requires the caller holds hdev->lock */
2600struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2601{
2602 struct adv_info *adv_instance;
2603
2604 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2605 if (adv_instance->instance == instance)
2606 return adv_instance;
2607 }
2608
2609 return NULL;
2610}
2611
2612/* This function requires the caller holds hdev->lock */
74b93e9f
PK
2613struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2614{
d2609b34
FG
2615 struct adv_info *cur_instance;
2616
2617 cur_instance = hci_find_adv_instance(hdev, instance);
2618 if (!cur_instance)
2619 return NULL;
2620
2621 if (cur_instance == list_last_entry(&hdev->adv_instances,
2622 struct adv_info, list))
2623 return list_first_entry(&hdev->adv_instances,
2624 struct adv_info, list);
2625 else
2626 return list_next_entry(cur_instance, list);
2627}
2628
2629/* This function requires the caller holds hdev->lock */
2630int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2631{
2632 struct adv_info *adv_instance;
2633
2634 adv_instance = hci_find_adv_instance(hdev, instance);
2635 if (!adv_instance)
2636 return -ENOENT;
2637
2638 BT_DBG("%s removing %dMR", hdev->name, instance);
2639
cab054ab
JH
2640 if (hdev->cur_adv_instance == instance) {
2641 if (hdev->adv_instance_timeout) {
2642 cancel_delayed_work(&hdev->adv_instance_expire);
2643 hdev->adv_instance_timeout = 0;
2644 }
2645 hdev->cur_adv_instance = 0x00;
5d900e46
FG
2646 }
2647
d2609b34
FG
2648 list_del(&adv_instance->list);
2649 kfree(adv_instance);
2650
2651 hdev->adv_instance_cnt--;
2652
2653 return 0;
2654}
2655
2656/* This function requires the caller holds hdev->lock */
2657void hci_adv_instances_clear(struct hci_dev *hdev)
2658{
2659 struct adv_info *adv_instance, *n;
2660
5d900e46
FG
2661 if (hdev->adv_instance_timeout) {
2662 cancel_delayed_work(&hdev->adv_instance_expire);
2663 hdev->adv_instance_timeout = 0;
2664 }
2665
d2609b34
FG
2666 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2667 list_del(&adv_instance->list);
2668 kfree(adv_instance);
2669 }
2670
2671 hdev->adv_instance_cnt = 0;
cab054ab 2672 hdev->cur_adv_instance = 0x00;
d2609b34
FG
2673}
2674
2675/* This function requires the caller holds hdev->lock */
2676int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2677 u16 adv_data_len, u8 *adv_data,
2678 u16 scan_rsp_len, u8 *scan_rsp_data,
2679 u16 timeout, u16 duration)
2680{
2681 struct adv_info *adv_instance;
2682
2683 adv_instance = hci_find_adv_instance(hdev, instance);
2684 if (adv_instance) {
2685 memset(adv_instance->adv_data, 0,
2686 sizeof(adv_instance->adv_data));
2687 memset(adv_instance->scan_rsp_data, 0,
2688 sizeof(adv_instance->scan_rsp_data));
2689 } else {
2690 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2691 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2692 return -EOVERFLOW;
2693
39ecfad6 2694 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2695 if (!adv_instance)
2696 return -ENOMEM;
2697
fffd38bc 2698 adv_instance->pending = true;
d2609b34
FG
2699 adv_instance->instance = instance;
2700 list_add(&adv_instance->list, &hdev->adv_instances);
2701 hdev->adv_instance_cnt++;
2702 }
2703
2704 adv_instance->flags = flags;
2705 adv_instance->adv_data_len = adv_data_len;
2706 adv_instance->scan_rsp_len = scan_rsp_len;
2707
2708 if (adv_data_len)
2709 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2710
2711 if (scan_rsp_len)
2712 memcpy(adv_instance->scan_rsp_data,
2713 scan_rsp_data, scan_rsp_len);
2714
2715 adv_instance->timeout = timeout;
5d900e46 2716 adv_instance->remaining_time = timeout;
d2609b34
FG
2717
2718 if (duration == 0)
2719 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2720 else
2721 adv_instance->duration = duration;
2722
2723 BT_DBG("%s for %dMR", hdev->name, instance);
2724
2725 return 0;
2726}
2727
dcc36c16 2728struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2729 bdaddr_t *bdaddr, u8 type)
b2a66aad 2730{
8035ded4 2731 struct bdaddr_list *b;
b2a66aad 2732
dcc36c16 2733 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2734 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2735 return b;
b9ee0a78 2736 }
b2a66aad
AJ
2737
2738 return NULL;
2739}
2740
dcc36c16 2741void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad 2742{
7eb7404f 2743 struct bdaddr_list *b, *n;
b2a66aad 2744
7eb7404f
GT
2745 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2746 list_del(&b->list);
b2a66aad
AJ
2747 kfree(b);
2748 }
b2a66aad
AJ
2749}
2750
dcc36c16 2751int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2752{
2753 struct bdaddr_list *entry;
b2a66aad 2754
b9ee0a78 2755 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2756 return -EBADF;
2757
dcc36c16 2758 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2759 return -EEXIST;
b2a66aad 2760
27f70f3e 2761 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2762 if (!entry)
2763 return -ENOMEM;
b2a66aad
AJ
2764
2765 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2766 entry->bdaddr_type = type;
b2a66aad 2767
dcc36c16 2768 list_add(&entry->list, list);
b2a66aad 2769
2a8357f2 2770 return 0;
b2a66aad
AJ
2771}
2772
dcc36c16 2773int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2774{
2775 struct bdaddr_list *entry;
b2a66aad 2776
35f7498a 2777 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2778 hci_bdaddr_list_clear(list);
35f7498a
JH
2779 return 0;
2780 }
b2a66aad 2781
dcc36c16 2782 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2783 if (!entry)
2784 return -ENOENT;
2785
2786 list_del(&entry->list);
2787 kfree(entry);
2788
2789 return 0;
2790}
2791
15819a70
AG
2792/* This function requires the caller holds hdev->lock */
2793struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2794 bdaddr_t *addr, u8 addr_type)
2795{
2796 struct hci_conn_params *params;
2797
2798 list_for_each_entry(params, &hdev->le_conn_params, list) {
2799 if (bacmp(&params->addr, addr) == 0 &&
2800 params->addr_type == addr_type) {
2801 return params;
2802 }
2803 }
2804
2805 return NULL;
2806}
2807
4b10966f 2808/* This function requires the caller holds hdev->lock */
501f8827
JH
2809struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2810 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2811{
912b42ef 2812 struct hci_conn_params *param;
a9b0a04c 2813
501f8827 2814 list_for_each_entry(param, list, action) {
912b42ef
JH
2815 if (bacmp(&param->addr, addr) == 0 &&
2816 param->addr_type == addr_type)
2817 return param;
4b10966f
MH
2818 }
2819
2820 return NULL;
a9b0a04c
AG
2821}
2822
15819a70 2823/* This function requires the caller holds hdev->lock */
51d167c0
MH
2824struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2825 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2826{
2827 struct hci_conn_params *params;
2828
2829 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2830 if (params)
51d167c0 2831 return params;
15819a70
AG
2832
2833 params = kzalloc(sizeof(*params), GFP_KERNEL);
2834 if (!params) {
2835 BT_ERR("Out of memory");
51d167c0 2836 return NULL;
15819a70
AG
2837 }
2838
2839 bacpy(&params->addr, addr);
2840 params->addr_type = addr_type;
cef952ce
AG
2841
2842 list_add(&params->list, &hdev->le_conn_params);
93450c75 2843 INIT_LIST_HEAD(&params->action);
cef952ce 2844
bf5b3c8b
MH
2845 params->conn_min_interval = hdev->le_conn_min_interval;
2846 params->conn_max_interval = hdev->le_conn_max_interval;
2847 params->conn_latency = hdev->le_conn_latency;
2848 params->supervision_timeout = hdev->le_supv_timeout;
2849 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2850
2851 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2852
51d167c0 2853 return params;
bf5b3c8b
MH
2854}
2855
f6c63249 2856static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2857{
f8aaf9b6 2858 if (params->conn) {
f161dd41 2859 hci_conn_drop(params->conn);
f8aaf9b6
JH
2860 hci_conn_put(params->conn);
2861 }
f161dd41 2862
95305baa 2863 list_del(&params->action);
15819a70
AG
2864 list_del(&params->list);
2865 kfree(params);
f6c63249
JH
2866}
2867
2868/* This function requires the caller holds hdev->lock */
2869void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2870{
2871 struct hci_conn_params *params;
2872
2873 params = hci_conn_params_lookup(hdev, addr, addr_type);
2874 if (!params)
2875 return;
2876
2877 hci_conn_params_free(params);
15819a70 2878
95305baa
JH
2879 hci_update_background_scan(hdev);
2880
15819a70
AG
2881 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2882}
2883
2884/* This function requires the caller holds hdev->lock */
55af49a8 2885void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
2886{
2887 struct hci_conn_params *params, *tmp;
2888
2889 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
2890 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2891 continue;
f75113a2
JP
2892
2893 /* If trying to estabilish one time connection to disabled
2894 * device, leave the params, but mark them as just once.
2895 */
2896 if (params->explicit_connect) {
2897 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2898 continue;
2899 }
2900
15819a70
AG
2901 list_del(&params->list);
2902 kfree(params);
2903 }
2904
55af49a8 2905 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
2906}
2907
2908/* This function requires the caller holds hdev->lock */
030e7f81 2909static void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 2910{
15819a70 2911 struct hci_conn_params *params, *tmp;
77a77a30 2912
f6c63249
JH
2913 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2914 hci_conn_params_free(params);
77a77a30 2915
15819a70 2916 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
2917}
2918
a1f4c318
JH
2919/* Copy the Identity Address of the controller.
2920 *
2921 * If the controller has a public BD_ADDR, then by default use that one.
2922 * If this is a LE only controller without a public address, default to
2923 * the static random address.
2924 *
2925 * For debugging purposes it is possible to force controllers with a
2926 * public address to use the static random address instead.
50b5b952
MH
2927 *
2928 * In case BR/EDR has been disabled on a dual-mode controller and
2929 * userspace has configured a static address, then that address
2930 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
2931 */
2932void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2933 u8 *bdaddr_type)
2934{
b7cb93e5 2935 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 2936 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 2937 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 2938 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
2939 bacpy(bdaddr, &hdev->static_addr);
2940 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2941 } else {
2942 bacpy(bdaddr, &hdev->bdaddr);
2943 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2944 }
2945}
2946
9be0dab7
DH
2947/* Alloc HCI device */
2948struct hci_dev *hci_alloc_dev(void)
2949{
2950 struct hci_dev *hdev;
2951
27f70f3e 2952 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
2953 if (!hdev)
2954 return NULL;
2955
b1b813d4
DH
2956 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2957 hdev->esco_type = (ESCO_HV1);
2958 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
2959 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2960 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 2961 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
2962 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2963 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
2964 hdev->adv_instance_cnt = 0;
2965 hdev->cur_adv_instance = 0x00;
5d900e46 2966 hdev->adv_instance_timeout = 0;
b1b813d4 2967
b1b813d4
DH
2968 hdev->sniff_max_interval = 800;
2969 hdev->sniff_min_interval = 80;
2970
3f959d46 2971 hdev->le_adv_channel_map = 0x07;
628531c9
GL
2972 hdev->le_adv_min_interval = 0x0800;
2973 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
2974 hdev->le_scan_interval = 0x0060;
2975 hdev->le_scan_window = 0x0030;
b48c3b59
JH
2976 hdev->le_conn_min_interval = 0x0018;
2977 hdev->le_conn_max_interval = 0x0028;
04fb7d90
MH
2978 hdev->le_conn_latency = 0x0000;
2979 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
2980 hdev->le_def_tx_len = 0x001b;
2981 hdev->le_def_tx_time = 0x0148;
2982 hdev->le_max_tx_len = 0x001b;
2983 hdev->le_max_tx_time = 0x0148;
2984 hdev->le_max_rx_len = 0x001b;
2985 hdev->le_max_rx_time = 0x0148;
bef64738 2986
d6bfd59c 2987 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 2988 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
2989 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2990 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 2991
b1b813d4
DH
2992 mutex_init(&hdev->lock);
2993 mutex_init(&hdev->req_lock);
2994
2995 INIT_LIST_HEAD(&hdev->mgmt_pending);
2996 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 2997 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
2998 INIT_LIST_HEAD(&hdev->uuids);
2999 INIT_LIST_HEAD(&hdev->link_keys);
3000 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3001 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3002 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3003 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3004 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3005 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3006 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3007 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3008 INIT_LIST_HEAD(&hdev->adv_instances);
b1b813d4
DH
3009
3010 INIT_WORK(&hdev->rx_work, hci_rx_work);
3011 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3012 INIT_WORK(&hdev->tx_work, hci_tx_work);
3013 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3014 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 3015
b1b813d4 3016 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
b1b813d4 3017
b1b813d4
DH
3018 skb_queue_head_init(&hdev->rx_q);
3019 skb_queue_head_init(&hdev->cmd_q);
3020 skb_queue_head_init(&hdev->raw_q);
3021
3022 init_waitqueue_head(&hdev->req_wait_q);
3023
65cc2b49 3024 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3025
5fc16cc4
JH
3026 hci_request_setup(hdev);
3027
b1b813d4
DH
3028 hci_init_sysfs(hdev);
3029 discovery_init(hdev);
9be0dab7
DH
3030
3031 return hdev;
3032}
3033EXPORT_SYMBOL(hci_alloc_dev);
3034
3035/* Free HCI device */
3036void hci_free_dev(struct hci_dev *hdev)
3037{
9be0dab7
DH
3038 /* will free via device release */
3039 put_device(&hdev->dev);
3040}
3041EXPORT_SYMBOL(hci_free_dev);
3042
1da177e4
LT
3043/* Register HCI device */
3044int hci_register_dev(struct hci_dev *hdev)
3045{
b1b813d4 3046 int id, error;
1da177e4 3047
74292d5a 3048 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3049 return -EINVAL;
3050
08add513
MM
3051 /* Do not allow HCI_AMP devices to register at index 0,
3052 * so the index can be used as the AMP controller ID.
3053 */
3df92b31 3054 switch (hdev->dev_type) {
ca8bee5d 3055 case HCI_PRIMARY:
3df92b31
SL
3056 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3057 break;
3058 case HCI_AMP:
3059 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3060 break;
3061 default:
3062 return -EINVAL;
1da177e4 3063 }
8e87d142 3064
3df92b31
SL
3065 if (id < 0)
3066 return id;
3067
1da177e4
LT
3068 sprintf(hdev->name, "hci%d", id);
3069 hdev->id = id;
2d8b3a11
AE
3070
3071 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3072
d8537548
KC
3073 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3074 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3075 if (!hdev->workqueue) {
3076 error = -ENOMEM;
3077 goto err;
3078 }
f48fd9c8 3079
d8537548
KC
3080 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3081 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3082 if (!hdev->req_workqueue) {
3083 destroy_workqueue(hdev->workqueue);
3084 error = -ENOMEM;
3085 goto err;
3086 }
3087
0153e2ec
MH
3088 if (!IS_ERR_OR_NULL(bt_debugfs))
3089 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3090
bdc3e0f1
MH
3091 dev_set_name(&hdev->dev, "%s", hdev->name);
3092
3093 error = device_add(&hdev->dev);
33ca954d 3094 if (error < 0)
54506918 3095 goto err_wqueue;
1da177e4 3096
6d5d2ee6
HK
3097 hci_leds_init(hdev);
3098
611b30f7 3099 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3100 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3101 hdev);
611b30f7
MH
3102 if (hdev->rfkill) {
3103 if (rfkill_register(hdev->rfkill) < 0) {
3104 rfkill_destroy(hdev->rfkill);
3105 hdev->rfkill = NULL;
3106 }
3107 }
3108
5e130367 3109 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3110 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3111
a1536da2
MH
3112 hci_dev_set_flag(hdev, HCI_SETUP);
3113 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3114
ca8bee5d 3115 if (hdev->dev_type == HCI_PRIMARY) {
56f87901
JH
3116 /* Assume BR/EDR support until proven otherwise (such as
3117 * through reading supported features during init.
3118 */
a1536da2 3119 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3120 }
ce2be9ac 3121
fcee3377
GP
3122 write_lock(&hci_dev_list_lock);
3123 list_add(&hdev->list, &hci_dev_list);
3124 write_unlock(&hci_dev_list_lock);
3125
4a964404
MH
3126 /* Devices that are marked for raw-only usage are unconfigured
3127 * and should not be included in normal operation.
fee746b0
MH
3128 */
3129 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3130 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3131
05fcd4c4 3132 hci_sock_dev_event(hdev, HCI_DEV_REG);
dc946bd8 3133 hci_dev_hold(hdev);
1da177e4 3134
19202573 3135 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3136
1da177e4 3137 return id;
f48fd9c8 3138
33ca954d
DH
3139err_wqueue:
3140 destroy_workqueue(hdev->workqueue);
6ead1bbc 3141 destroy_workqueue(hdev->req_workqueue);
33ca954d 3142err:
3df92b31 3143 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3144
33ca954d 3145 return error;
1da177e4
LT
3146}
3147EXPORT_SYMBOL(hci_register_dev);
3148
3149/* Unregister HCI device */
59735631 3150void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3151{
2d7cc19e 3152 int id;
ef222013 3153
c13854ce 3154 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3155
a1536da2 3156 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3157
3df92b31
SL
3158 id = hdev->id;
3159
f20d09d5 3160 write_lock(&hci_dev_list_lock);
1da177e4 3161 list_del(&hdev->list);
f20d09d5 3162 write_unlock(&hci_dev_list_lock);
1da177e4 3163
b9b5ef18
GP
3164 cancel_work_sync(&hdev->power_on);
3165
bf389cab
JS
3166 hci_dev_do_close(hdev);
3167
ab81cbf9 3168 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3169 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3170 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3171 hci_dev_lock(hdev);
744cf19e 3172 mgmt_index_removed(hdev);
09fd0de5 3173 hci_dev_unlock(hdev);
56e5cb86 3174 }
ab81cbf9 3175
2e58ef3e
JH
3176 /* mgmt_index_removed should take care of emptying the
3177 * pending list */
3178 BUG_ON(!list_empty(&hdev->mgmt_pending));
3179
05fcd4c4 3180 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
1da177e4 3181
611b30f7
MH
3182 if (hdev->rfkill) {
3183 rfkill_unregister(hdev->rfkill);
3184 rfkill_destroy(hdev->rfkill);
3185 }
3186
bdc3e0f1 3187 device_del(&hdev->dev);
147e2d59 3188
0153e2ec 3189 debugfs_remove_recursive(hdev->debugfs);
5177a838
MH
3190 kfree_const(hdev->hw_info);
3191 kfree_const(hdev->fw_info);
0153e2ec 3192
f48fd9c8 3193 destroy_workqueue(hdev->workqueue);
6ead1bbc 3194 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3195
09fd0de5 3196 hci_dev_lock(hdev);
dcc36c16 3197 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3198 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3199 hci_uuids_clear(hdev);
55ed8ca1 3200 hci_link_keys_clear(hdev);
b899efaf 3201 hci_smp_ltks_clear(hdev);
970c4e46 3202 hci_smp_irks_clear(hdev);
2763eda6 3203 hci_remote_oob_data_clear(hdev);
d2609b34 3204 hci_adv_instances_clear(hdev);
dcc36c16 3205 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 3206 hci_conn_params_clear_all(hdev);
22078800 3207 hci_discovery_filter_clear(hdev);
09fd0de5 3208 hci_dev_unlock(hdev);
e2e0cacb 3209
dc946bd8 3210 hci_dev_put(hdev);
3df92b31
SL
3211
3212 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3213}
3214EXPORT_SYMBOL(hci_unregister_dev);
3215
3216/* Suspend HCI device */
3217int hci_suspend_dev(struct hci_dev *hdev)
3218{
05fcd4c4 3219 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
1da177e4
LT
3220 return 0;
3221}
3222EXPORT_SYMBOL(hci_suspend_dev);
3223
3224/* Resume HCI device */
3225int hci_resume_dev(struct hci_dev *hdev)
3226{
05fcd4c4 3227 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
1da177e4
LT
3228 return 0;
3229}
3230EXPORT_SYMBOL(hci_resume_dev);
3231
75e0569f
MH
3232/* Reset HCI device */
3233int hci_reset_dev(struct hci_dev *hdev)
3234{
3235 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3236 struct sk_buff *skb;
3237
3238 skb = bt_skb_alloc(3, GFP_ATOMIC);
3239 if (!skb)
3240 return -ENOMEM;
3241
d79f34e3 3242 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
75e0569f
MH
3243 memcpy(skb_put(skb, 3), hw_err, 3);
3244
3245 /* Send Hardware Error to upper stack */
3246 return hci_recv_frame(hdev, skb);
3247}
3248EXPORT_SYMBOL(hci_reset_dev);
3249
76bca880 3250/* Receive frame from HCI drivers */
e1a26170 3251int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3252{
76bca880 3253 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3254 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3255 kfree_skb(skb);
3256 return -ENXIO;
3257 }
3258
d79f34e3
MH
3259 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3260 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3261 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
fe806dce
MH
3262 kfree_skb(skb);
3263 return -EINVAL;
3264 }
3265
d82603c6 3266 /* Incoming skb */
76bca880
MH
3267 bt_cb(skb)->incoming = 1;
3268
3269 /* Time stamp */
3270 __net_timestamp(skb);
3271
76bca880 3272 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3273 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3274
76bca880
MH
3275 return 0;
3276}
3277EXPORT_SYMBOL(hci_recv_frame);
3278
e875ff84
MH
3279/* Receive diagnostic message from HCI drivers */
3280int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3281{
581d6fd6 3282 /* Mark as diagnostic packet */
d79f34e3 3283 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
581d6fd6 3284
e875ff84
MH
3285 /* Time stamp */
3286 __net_timestamp(skb);
3287
581d6fd6
MH
3288 skb_queue_tail(&hdev->rx_q, skb);
3289 queue_work(hdev->workqueue, &hdev->rx_work);
e875ff84 3290
e875ff84
MH
3291 return 0;
3292}
3293EXPORT_SYMBOL(hci_recv_diag);
3294
5177a838
MH
3295void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3296{
3297 va_list vargs;
3298
3299 va_start(vargs, fmt);
3300 kfree_const(hdev->hw_info);
3301 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3302 va_end(vargs);
3303}
3304EXPORT_SYMBOL(hci_set_hw_info);
3305
3306void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3307{
3308 va_list vargs;
3309
3310 va_start(vargs, fmt);
3311 kfree_const(hdev->fw_info);
3312 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3313 va_end(vargs);
3314}
3315EXPORT_SYMBOL(hci_set_fw_info);
3316
1da177e4
LT
3317/* ---- Interface to upper protocols ---- */
3318
1da177e4
LT
3319int hci_register_cb(struct hci_cb *cb)
3320{
3321 BT_DBG("%p name %s", cb, cb->name);
3322
fba7ecf0 3323 mutex_lock(&hci_cb_list_lock);
00629e0f 3324 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3325 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3326
3327 return 0;
3328}
3329EXPORT_SYMBOL(hci_register_cb);
3330
3331int hci_unregister_cb(struct hci_cb *cb)
3332{
3333 BT_DBG("%p name %s", cb, cb->name);
3334
fba7ecf0 3335 mutex_lock(&hci_cb_list_lock);
1da177e4 3336 list_del(&cb->list);
fba7ecf0 3337 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3338
3339 return 0;
3340}
3341EXPORT_SYMBOL(hci_unregister_cb);
3342
51086991 3343static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3344{
cdc52faa
MH
3345 int err;
3346
d79f34e3
MH
3347 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3348 skb->len);
1da177e4 3349
cd82e61c
MH
3350 /* Time stamp */
3351 __net_timestamp(skb);
1da177e4 3352
cd82e61c
MH
3353 /* Send copy to monitor */
3354 hci_send_to_monitor(hdev, skb);
3355
3356 if (atomic_read(&hdev->promisc)) {
3357 /* Send copy to the sockets */
470fe1b5 3358 hci_send_to_sock(hdev, skb);
1da177e4
LT
3359 }
3360
3361 /* Get rid of skb owner, prior to sending to the driver. */
3362 skb_orphan(skb);
3363
73d0d3c8
MH
3364 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3365 kfree_skb(skb);
3366 return;
3367 }
3368
cdc52faa
MH
3369 err = hdev->send(hdev, skb);
3370 if (err < 0) {
3371 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3372 kfree_skb(skb);
3373 }
1da177e4
LT
3374}
3375
1ca3a9d0 3376/* Send HCI command */
07dc93dd
JH
3377int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3378 const void *param)
1ca3a9d0
JH
3379{
3380 struct sk_buff *skb;
3381
3382 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3383
3384 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3385 if (!skb) {
3386 BT_ERR("%s no memory for command", hdev->name);
3387 return -ENOMEM;
3388 }
3389
49c922bb 3390 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3391 * single-command requests.
3392 */
44d27137 3393 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
11714b3d 3394
1da177e4 3395 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3396 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3397
3398 return 0;
3399}
1da177e4
LT
3400
3401/* Get data from the previously sent command */
a9de9248 3402void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3403{
3404 struct hci_command_hdr *hdr;
3405
3406 if (!hdev->sent_cmd)
3407 return NULL;
3408
3409 hdr = (void *) hdev->sent_cmd->data;
3410
a9de9248 3411 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3412 return NULL;
3413
f0e09510 3414 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3415
3416 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3417}
3418
fbef168f
LP
3419/* Send HCI command and wait for command commplete event */
3420struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3421 const void *param, u32 timeout)
3422{
3423 struct sk_buff *skb;
3424
3425 if (!test_bit(HCI_UP, &hdev->flags))
3426 return ERR_PTR(-ENETDOWN);
3427
3428 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3429
b504430c 3430 hci_req_sync_lock(hdev);
fbef168f 3431 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
b504430c 3432 hci_req_sync_unlock(hdev);
fbef168f
LP
3433
3434 return skb;
3435}
3436EXPORT_SYMBOL(hci_cmd_sync);
3437
1da177e4
LT
3438/* Send ACL data */
3439static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3440{
3441 struct hci_acl_hdr *hdr;
3442 int len = skb->len;
3443
badff6d0
ACM
3444 skb_push(skb, HCI_ACL_HDR_SIZE);
3445 skb_reset_transport_header(skb);
9c70220b 3446 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3447 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3448 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3449}
3450
ee22be7e 3451static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3452 struct sk_buff *skb, __u16 flags)
1da177e4 3453{
ee22be7e 3454 struct hci_conn *conn = chan->conn;
1da177e4
LT
3455 struct hci_dev *hdev = conn->hdev;
3456 struct sk_buff *list;
3457
087bfd99
GP
3458 skb->len = skb_headlen(skb);
3459 skb->data_len = 0;
3460
d79f34e3 3461 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
204a6e54
AE
3462
3463 switch (hdev->dev_type) {
ca8bee5d 3464 case HCI_PRIMARY:
204a6e54
AE
3465 hci_add_acl_hdr(skb, conn->handle, flags);
3466 break;
3467 case HCI_AMP:
3468 hci_add_acl_hdr(skb, chan->handle, flags);
3469 break;
3470 default:
3471 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3472 return;
3473 }
087bfd99 3474
70f23020
AE
3475 list = skb_shinfo(skb)->frag_list;
3476 if (!list) {
1da177e4
LT
3477 /* Non fragmented */
3478 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3479
73d80deb 3480 skb_queue_tail(queue, skb);
1da177e4
LT
3481 } else {
3482 /* Fragmented */
3483 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3484
3485 skb_shinfo(skb)->frag_list = NULL;
3486
9cfd5a23
JR
3487 /* Queue all fragments atomically. We need to use spin_lock_bh
3488 * here because of 6LoWPAN links, as there this function is
3489 * called from softirq and using normal spin lock could cause
3490 * deadlocks.
3491 */
3492 spin_lock_bh(&queue->lock);
1da177e4 3493
73d80deb 3494 __skb_queue_tail(queue, skb);
e702112f
AE
3495
3496 flags &= ~ACL_START;
3497 flags |= ACL_CONT;
1da177e4
LT
3498 do {
3499 skb = list; list = list->next;
8e87d142 3500
d79f34e3 3501 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
e702112f 3502 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3503
3504 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3505
73d80deb 3506 __skb_queue_tail(queue, skb);
1da177e4
LT
3507 } while (list);
3508
9cfd5a23 3509 spin_unlock_bh(&queue->lock);
1da177e4 3510 }
73d80deb
LAD
3511}
3512
3513void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3514{
ee22be7e 3515 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3516
f0e09510 3517 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3518
ee22be7e 3519 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3520
3eff45ea 3521 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3522}
1da177e4
LT
3523
3524/* Send SCO data */
0d861d8b 3525void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3526{
3527 struct hci_dev *hdev = conn->hdev;
3528 struct hci_sco_hdr hdr;
3529
3530 BT_DBG("%s len %d", hdev->name, skb->len);
3531
aca3192c 3532 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3533 hdr.dlen = skb->len;
3534
badff6d0
ACM
3535 skb_push(skb, HCI_SCO_HDR_SIZE);
3536 skb_reset_transport_header(skb);
9c70220b 3537 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3538
d79f34e3 3539 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
c78ae283 3540
1da177e4 3541 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3542 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3543}
1da177e4
LT
3544
3545/* ---- HCI TX task (outgoing data) ---- */
3546
3547/* HCI Connection scheduler */
6039aa73
GP
3548static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3549 int *quote)
1da177e4
LT
3550{
3551 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3552 struct hci_conn *conn = NULL, *c;
abc5de8f 3553 unsigned int num = 0, min = ~0;
1da177e4 3554
8e87d142 3555 /* We don't have to lock device here. Connections are always
1da177e4 3556 * added and removed with TX task disabled. */
bf4c6325
GP
3557
3558 rcu_read_lock();
3559
3560 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3561 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3562 continue;
769be974
MH
3563
3564 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3565 continue;
3566
1da177e4
LT
3567 num++;
3568
3569 if (c->sent < min) {
3570 min = c->sent;
3571 conn = c;
3572 }
52087a79
LAD
3573
3574 if (hci_conn_num(hdev, type) == num)
3575 break;
1da177e4
LT
3576 }
3577
bf4c6325
GP
3578 rcu_read_unlock();
3579
1da177e4 3580 if (conn) {
6ed58ec5
VT
3581 int cnt, q;
3582
3583 switch (conn->type) {
3584 case ACL_LINK:
3585 cnt = hdev->acl_cnt;
3586 break;
3587 case SCO_LINK:
3588 case ESCO_LINK:
3589 cnt = hdev->sco_cnt;
3590 break;
3591 case LE_LINK:
3592 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3593 break;
3594 default:
3595 cnt = 0;
3596 BT_ERR("Unknown link type");
3597 }
3598
3599 q = cnt / num;
1da177e4
LT
3600 *quote = q ? q : 1;
3601 } else
3602 *quote = 0;
3603
3604 BT_DBG("conn %p quote %d", conn, *quote);
3605 return conn;
3606}
3607
6039aa73 3608static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3609{
3610 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3611 struct hci_conn *c;
1da177e4 3612
bae1f5d9 3613 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3614
bf4c6325
GP
3615 rcu_read_lock();
3616
1da177e4 3617 /* Kill stalled connections */
bf4c6325 3618 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3619 if (c->type == type && c->sent) {
6ed93dc6
AE
3620 BT_ERR("%s killing stalled connection %pMR",
3621 hdev->name, &c->dst);
bed71748 3622 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3623 }
3624 }
bf4c6325
GP
3625
3626 rcu_read_unlock();
1da177e4
LT
3627}
3628
6039aa73
GP
3629static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3630 int *quote)
1da177e4 3631{
73d80deb
LAD
3632 struct hci_conn_hash *h = &hdev->conn_hash;
3633 struct hci_chan *chan = NULL;
abc5de8f 3634 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3635 struct hci_conn *conn;
73d80deb
LAD
3636 int cnt, q, conn_num = 0;
3637
3638 BT_DBG("%s", hdev->name);
3639
bf4c6325
GP
3640 rcu_read_lock();
3641
3642 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3643 struct hci_chan *tmp;
3644
3645 if (conn->type != type)
3646 continue;
3647
3648 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3649 continue;
3650
3651 conn_num++;
3652
8192edef 3653 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3654 struct sk_buff *skb;
3655
3656 if (skb_queue_empty(&tmp->data_q))
3657 continue;
3658
3659 skb = skb_peek(&tmp->data_q);
3660 if (skb->priority < cur_prio)
3661 continue;
3662
3663 if (skb->priority > cur_prio) {
3664 num = 0;
3665 min = ~0;
3666 cur_prio = skb->priority;
3667 }
3668
3669 num++;
3670
3671 if (conn->sent < min) {
3672 min = conn->sent;
3673 chan = tmp;
3674 }
3675 }
3676
3677 if (hci_conn_num(hdev, type) == conn_num)
3678 break;
3679 }
3680
bf4c6325
GP
3681 rcu_read_unlock();
3682
73d80deb
LAD
3683 if (!chan)
3684 return NULL;
3685
3686 switch (chan->conn->type) {
3687 case ACL_LINK:
3688 cnt = hdev->acl_cnt;
3689 break;
bd1eb66b
AE
3690 case AMP_LINK:
3691 cnt = hdev->block_cnt;
3692 break;
73d80deb
LAD
3693 case SCO_LINK:
3694 case ESCO_LINK:
3695 cnt = hdev->sco_cnt;
3696 break;
3697 case LE_LINK:
3698 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3699 break;
3700 default:
3701 cnt = 0;
3702 BT_ERR("Unknown link type");
3703 }
3704
3705 q = cnt / num;
3706 *quote = q ? q : 1;
3707 BT_DBG("chan %p quote %d", chan, *quote);
3708 return chan;
3709}
3710
02b20f0b
LAD
3711static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3712{
3713 struct hci_conn_hash *h = &hdev->conn_hash;
3714 struct hci_conn *conn;
3715 int num = 0;
3716
3717 BT_DBG("%s", hdev->name);
3718
bf4c6325
GP
3719 rcu_read_lock();
3720
3721 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3722 struct hci_chan *chan;
3723
3724 if (conn->type != type)
3725 continue;
3726
3727 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3728 continue;
3729
3730 num++;
3731
8192edef 3732 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3733 struct sk_buff *skb;
3734
3735 if (chan->sent) {
3736 chan->sent = 0;
3737 continue;
3738 }
3739
3740 if (skb_queue_empty(&chan->data_q))
3741 continue;
3742
3743 skb = skb_peek(&chan->data_q);
3744 if (skb->priority >= HCI_PRIO_MAX - 1)
3745 continue;
3746
3747 skb->priority = HCI_PRIO_MAX - 1;
3748
3749 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3750 skb->priority);
02b20f0b
LAD
3751 }
3752
3753 if (hci_conn_num(hdev, type) == num)
3754 break;
3755 }
bf4c6325
GP
3756
3757 rcu_read_unlock();
3758
02b20f0b
LAD
3759}
3760
b71d385a
AE
3761static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3762{
3763 /* Calculate count of blocks used by this packet */
3764 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3765}
3766
6039aa73 3767static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3768{
d7a5a11d 3769 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
3770 /* ACL tx timeout must be longer than maximum
3771 * link supervision timeout (40.9 seconds) */
63d2bc1b 3772 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3773 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3774 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3775 }
63d2bc1b 3776}
1da177e4 3777
6039aa73 3778static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3779{
3780 unsigned int cnt = hdev->acl_cnt;
3781 struct hci_chan *chan;
3782 struct sk_buff *skb;
3783 int quote;
3784
3785 __check_timeout(hdev, cnt);
04837f64 3786
73d80deb 3787 while (hdev->acl_cnt &&
a8c5fb1a 3788 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3789 u32 priority = (skb_peek(&chan->data_q))->priority;
3790 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3791 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3792 skb->len, skb->priority);
73d80deb 3793
ec1cce24
LAD
3794 /* Stop if priority has changed */
3795 if (skb->priority < priority)
3796 break;
3797
3798 skb = skb_dequeue(&chan->data_q);
3799
73d80deb 3800 hci_conn_enter_active_mode(chan->conn,
04124681 3801 bt_cb(skb)->force_active);
04837f64 3802
57d17d70 3803 hci_send_frame(hdev, skb);
1da177e4
LT
3804 hdev->acl_last_tx = jiffies;
3805
3806 hdev->acl_cnt--;
73d80deb
LAD
3807 chan->sent++;
3808 chan->conn->sent++;
1da177e4
LT
3809 }
3810 }
02b20f0b
LAD
3811
3812 if (cnt != hdev->acl_cnt)
3813 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3814}
3815
6039aa73 3816static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3817{
63d2bc1b 3818 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3819 struct hci_chan *chan;
3820 struct sk_buff *skb;
3821 int quote;
bd1eb66b 3822 u8 type;
b71d385a 3823
63d2bc1b 3824 __check_timeout(hdev, cnt);
b71d385a 3825
bd1eb66b
AE
3826 BT_DBG("%s", hdev->name);
3827
3828 if (hdev->dev_type == HCI_AMP)
3829 type = AMP_LINK;
3830 else
3831 type = ACL_LINK;
3832
b71d385a 3833 while (hdev->block_cnt > 0 &&
bd1eb66b 3834 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3835 u32 priority = (skb_peek(&chan->data_q))->priority;
3836 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3837 int blocks;
3838
3839 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3840 skb->len, skb->priority);
b71d385a
AE
3841
3842 /* Stop if priority has changed */
3843 if (skb->priority < priority)
3844 break;
3845
3846 skb = skb_dequeue(&chan->data_q);
3847
3848 blocks = __get_blocks(hdev, skb);
3849 if (blocks > hdev->block_cnt)
3850 return;
3851
3852 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3853 bt_cb(skb)->force_active);
b71d385a 3854
57d17d70 3855 hci_send_frame(hdev, skb);
b71d385a
AE
3856 hdev->acl_last_tx = jiffies;
3857
3858 hdev->block_cnt -= blocks;
3859 quote -= blocks;
3860
3861 chan->sent += blocks;
3862 chan->conn->sent += blocks;
3863 }
3864 }
3865
3866 if (cnt != hdev->block_cnt)
bd1eb66b 3867 hci_prio_recalculate(hdev, type);
b71d385a
AE
3868}
3869
6039aa73 3870static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3871{
3872 BT_DBG("%s", hdev->name);
3873
bd1eb66b 3874 /* No ACL link over BR/EDR controller */
ca8bee5d 3875 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
bd1eb66b
AE
3876 return;
3877
3878 /* No AMP link over AMP controller */
3879 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3880 return;
3881
3882 switch (hdev->flow_ctl_mode) {
3883 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3884 hci_sched_acl_pkt(hdev);
3885 break;
3886
3887 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3888 hci_sched_acl_blk(hdev);
3889 break;
3890 }
3891}
3892
1da177e4 3893/* Schedule SCO */
6039aa73 3894static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3895{
3896 struct hci_conn *conn;
3897 struct sk_buff *skb;
3898 int quote;
3899
3900 BT_DBG("%s", hdev->name);
3901
52087a79
LAD
3902 if (!hci_conn_num(hdev, SCO_LINK))
3903 return;
3904
1da177e4
LT
3905 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3906 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3907 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3908 hci_send_frame(hdev, skb);
1da177e4
LT
3909
3910 conn->sent++;
3911 if (conn->sent == ~0)
3912 conn->sent = 0;
3913 }
3914 }
3915}
3916
6039aa73 3917static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3918{
3919 struct hci_conn *conn;
3920 struct sk_buff *skb;
3921 int quote;
3922
3923 BT_DBG("%s", hdev->name);
3924
52087a79
LAD
3925 if (!hci_conn_num(hdev, ESCO_LINK))
3926 return;
3927
8fc9ced3
GP
3928 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3929 &quote))) {
b6a0dc82
MH
3930 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3931 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3932 hci_send_frame(hdev, skb);
b6a0dc82
MH
3933
3934 conn->sent++;
3935 if (conn->sent == ~0)
3936 conn->sent = 0;
3937 }
3938 }
3939}
3940
6039aa73 3941static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3942{
73d80deb 3943 struct hci_chan *chan;
6ed58ec5 3944 struct sk_buff *skb;
02b20f0b 3945 int quote, cnt, tmp;
6ed58ec5
VT
3946
3947 BT_DBG("%s", hdev->name);
3948
52087a79
LAD
3949 if (!hci_conn_num(hdev, LE_LINK))
3950 return;
3951
d7a5a11d 3952 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6ed58ec5
VT
3953 /* LE tx timeout must be longer than maximum
3954 * link supervision timeout (40.9 seconds) */
bae1f5d9 3955 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3956 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3957 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3958 }
3959
3960 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3961 tmp = cnt;
73d80deb 3962 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3963 u32 priority = (skb_peek(&chan->data_q))->priority;
3964 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3965 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3966 skb->len, skb->priority);
6ed58ec5 3967
ec1cce24
LAD
3968 /* Stop if priority has changed */
3969 if (skb->priority < priority)
3970 break;
3971
3972 skb = skb_dequeue(&chan->data_q);
3973
57d17d70 3974 hci_send_frame(hdev, skb);
6ed58ec5
VT
3975 hdev->le_last_tx = jiffies;
3976
3977 cnt--;
73d80deb
LAD
3978 chan->sent++;
3979 chan->conn->sent++;
6ed58ec5
VT
3980 }
3981 }
73d80deb 3982
6ed58ec5
VT
3983 if (hdev->le_pkts)
3984 hdev->le_cnt = cnt;
3985 else
3986 hdev->acl_cnt = cnt;
02b20f0b
LAD
3987
3988 if (cnt != tmp)
3989 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3990}
3991
3eff45ea 3992static void hci_tx_work(struct work_struct *work)
1da177e4 3993{
3eff45ea 3994 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3995 struct sk_buff *skb;
3996
6ed58ec5 3997 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3998 hdev->sco_cnt, hdev->le_cnt);
1da177e4 3999
d7a5a11d 4000 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e
MH
4001 /* Schedule queues and send stuff to HCI driver */
4002 hci_sched_acl(hdev);
4003 hci_sched_sco(hdev);
4004 hci_sched_esco(hdev);
4005 hci_sched_le(hdev);
4006 }
6ed58ec5 4007
1da177e4
LT
4008 /* Send next queued raw (unknown type) packet */
4009 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4010 hci_send_frame(hdev, skb);
1da177e4
LT
4011}
4012
25985edc 4013/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4014
4015/* ACL data packet */
6039aa73 4016static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4017{
4018 struct hci_acl_hdr *hdr = (void *) skb->data;
4019 struct hci_conn *conn;
4020 __u16 handle, flags;
4021
4022 skb_pull(skb, HCI_ACL_HDR_SIZE);
4023
4024 handle = __le16_to_cpu(hdr->handle);
4025 flags = hci_flags(handle);
4026 handle = hci_handle(handle);
4027
f0e09510 4028 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4029 handle, flags);
1da177e4
LT
4030
4031 hdev->stat.acl_rx++;
4032
4033 hci_dev_lock(hdev);
4034 conn = hci_conn_hash_lookup_handle(hdev, handle);
4035 hci_dev_unlock(hdev);
8e87d142 4036
1da177e4 4037 if (conn) {
65983fc7 4038 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4039
1da177e4 4040 /* Send to upper protocol */
686ebf28
UF
4041 l2cap_recv_acldata(conn, skb, flags);
4042 return;
1da177e4 4043 } else {
8e87d142 4044 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4045 hdev->name, handle);
1da177e4
LT
4046 }
4047
4048 kfree_skb(skb);
4049}
4050
4051/* SCO data packet */
6039aa73 4052static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4053{
4054 struct hci_sco_hdr *hdr = (void *) skb->data;
4055 struct hci_conn *conn;
4056 __u16 handle;
4057
4058 skb_pull(skb, HCI_SCO_HDR_SIZE);
4059
4060 handle = __le16_to_cpu(hdr->handle);
4061
f0e09510 4062 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4063
4064 hdev->stat.sco_rx++;
4065
4066 hci_dev_lock(hdev);
4067 conn = hci_conn_hash_lookup_handle(hdev, handle);
4068 hci_dev_unlock(hdev);
4069
4070 if (conn) {
1da177e4 4071 /* Send to upper protocol */
686ebf28
UF
4072 sco_recv_scodata(conn, skb);
4073 return;
1da177e4 4074 } else {
8e87d142 4075 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4076 hdev->name, handle);
1da177e4
LT
4077 }
4078
4079 kfree_skb(skb);
4080}
4081
9238f36a
JH
4082static bool hci_req_is_complete(struct hci_dev *hdev)
4083{
4084 struct sk_buff *skb;
4085
4086 skb = skb_peek(&hdev->cmd_q);
4087 if (!skb)
4088 return true;
4089
44d27137 4090 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
9238f36a
JH
4091}
4092
42c6b129
JH
4093static void hci_resend_last(struct hci_dev *hdev)
4094{
4095 struct hci_command_hdr *sent;
4096 struct sk_buff *skb;
4097 u16 opcode;
4098
4099 if (!hdev->sent_cmd)
4100 return;
4101
4102 sent = (void *) hdev->sent_cmd->data;
4103 opcode = __le16_to_cpu(sent->opcode);
4104 if (opcode == HCI_OP_RESET)
4105 return;
4106
4107 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4108 if (!skb)
4109 return;
4110
4111 skb_queue_head(&hdev->cmd_q, skb);
4112 queue_work(hdev->workqueue, &hdev->cmd_work);
4113}
4114
e6214487
JH
4115void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4116 hci_req_complete_t *req_complete,
4117 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4118{
9238f36a
JH
4119 struct sk_buff *skb;
4120 unsigned long flags;
4121
4122 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4123
42c6b129
JH
4124 /* If the completed command doesn't match the last one that was
4125 * sent we need to do special handling of it.
9238f36a 4126 */
42c6b129
JH
4127 if (!hci_sent_cmd_data(hdev, opcode)) {
4128 /* Some CSR based controllers generate a spontaneous
4129 * reset complete event during init and any pending
4130 * command will never be completed. In such a case we
4131 * need to resend whatever was the last sent
4132 * command.
4133 */
4134 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4135 hci_resend_last(hdev);
4136
9238f36a 4137 return;
42c6b129 4138 }
9238f36a
JH
4139
4140 /* If the command succeeded and there's still more commands in
4141 * this request the request is not yet complete.
4142 */
4143 if (!status && !hci_req_is_complete(hdev))
4144 return;
4145
4146 /* If this was the last command in a request the complete
4147 * callback would be found in hdev->sent_cmd instead of the
4148 * command queue (hdev->cmd_q).
4149 */
44d27137
JH
4150 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4151 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
e6214487
JH
4152 return;
4153 }
53e21fbc 4154
44d27137
JH
4155 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4156 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
e6214487 4157 return;
9238f36a
JH
4158 }
4159
4160 /* Remove all pending commands belonging to this request */
4161 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4162 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
44d27137 4163 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
9238f36a
JH
4164 __skb_queue_head(&hdev->cmd_q, skb);
4165 break;
4166 }
4167
3bd7594e
DA
4168 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4169 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4170 else
4171 *req_complete = bt_cb(skb)->hci.req_complete;
9238f36a
JH
4172 kfree_skb(skb);
4173 }
4174 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4175}
4176
b78752cc 4177static void hci_rx_work(struct work_struct *work)
1da177e4 4178{
b78752cc 4179 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4180 struct sk_buff *skb;
4181
4182 BT_DBG("%s", hdev->name);
4183
1da177e4 4184 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4185 /* Send copy to monitor */
4186 hci_send_to_monitor(hdev, skb);
4187
1da177e4
LT
4188 if (atomic_read(&hdev->promisc)) {
4189 /* Send copy to the sockets */
470fe1b5 4190 hci_send_to_sock(hdev, skb);
1da177e4
LT
4191 }
4192
d7a5a11d 4193 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1da177e4
LT
4194 kfree_skb(skb);
4195 continue;
4196 }
4197
4198 if (test_bit(HCI_INIT, &hdev->flags)) {
4199 /* Don't process data packets in this states. */
d79f34e3 4200 switch (hci_skb_pkt_type(skb)) {
1da177e4
LT
4201 case HCI_ACLDATA_PKT:
4202 case HCI_SCODATA_PKT:
4203 kfree_skb(skb);
4204 continue;
3ff50b79 4205 }
1da177e4
LT
4206 }
4207
4208 /* Process frame */
d79f34e3 4209 switch (hci_skb_pkt_type(skb)) {
1da177e4 4210 case HCI_EVENT_PKT:
b78752cc 4211 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4212 hci_event_packet(hdev, skb);
4213 break;
4214
4215 case HCI_ACLDATA_PKT:
4216 BT_DBG("%s ACL data packet", hdev->name);
4217 hci_acldata_packet(hdev, skb);
4218 break;
4219
4220 case HCI_SCODATA_PKT:
4221 BT_DBG("%s SCO data packet", hdev->name);
4222 hci_scodata_packet(hdev, skb);
4223 break;
4224
4225 default:
4226 kfree_skb(skb);
4227 break;
4228 }
4229 }
1da177e4
LT
4230}
4231
c347b765 4232static void hci_cmd_work(struct work_struct *work)
1da177e4 4233{
c347b765 4234 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4235 struct sk_buff *skb;
4236
2104786b
AE
4237 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4238 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4239
1da177e4 4240 /* Send queued commands */
5a08ecce
AE
4241 if (atomic_read(&hdev->cmd_cnt)) {
4242 skb = skb_dequeue(&hdev->cmd_q);
4243 if (!skb)
4244 return;
4245
7585b97a 4246 kfree_skb(hdev->sent_cmd);
1da177e4 4247
a675d7f1 4248 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4249 if (hdev->sent_cmd) {
1da177e4 4250 atomic_dec(&hdev->cmd_cnt);
57d17d70 4251 hci_send_frame(hdev, skb);
7bdb8a5c 4252 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4253 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4254 else
65cc2b49
MH
4255 schedule_delayed_work(&hdev->cmd_timer,
4256 HCI_CMD_TIMEOUT);
1da177e4
LT
4257 } else {
4258 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4259 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4260 }
4261 }
4262}