Bluetooth: advmon offload MSFT add monitor
[linux-2.6-block.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
8c520a59 29#include <linux/rfkill.h>
baf27f6e 30#include <linux/debugfs.h>
99780a7b 31#include <linux/crypto.h>
7a0e5b15 32#include <linux/property.h>
9952d90e
APS
33#include <linux/suspend.h>
34#include <linux/wait.h>
47219839 35#include <asm/unaligned.h>
1da177e4
LT
36
37#include <net/bluetooth/bluetooth.h>
38#include <net/bluetooth/hci_core.h>
4bc58f51 39#include <net/bluetooth/l2cap.h>
af58925c 40#include <net/bluetooth/mgmt.h>
1da177e4 41
0857dd3b 42#include "hci_request.h"
60c5f5fb 43#include "hci_debugfs.h"
970c4e46 44#include "smp.h"
6d5d2ee6 45#include "leds.h"
145373cb 46#include "msft.h"
970c4e46 47
b78752cc 48static void hci_rx_work(struct work_struct *work);
c347b765 49static void hci_cmd_work(struct work_struct *work);
3eff45ea 50static void hci_tx_work(struct work_struct *work);
1da177e4 51
1da177e4
LT
52/* HCI device list */
53LIST_HEAD(hci_dev_list);
54DEFINE_RWLOCK(hci_dev_list_lock);
55
56/* HCI callback list */
57LIST_HEAD(hci_cb_list);
fba7ecf0 58DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 59
3df92b31
SL
60/* HCI ID Numbering */
61static DEFINE_IDA(hci_index_ida);
62
baf27f6e
MH
63/* ---- HCI debugfs entries ---- */
64
4b4148e9
MH
65static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
74b93e9f 71 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
4b4148e9
MH
72 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75}
76
77static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
4b4148e9 82 bool enable;
3bf5e97d 83 int err;
4b4148e9
MH
84
85 if (!test_bit(HCI_UP, &hdev->flags))
86 return -ENETDOWN;
87
3bf5e97d
AS
88 err = kstrtobool_from_user(user_buf, count, &enable);
89 if (err)
90 return err;
4b4148e9 91
b7cb93e5 92 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
93 return -EALREADY;
94
b504430c 95 hci_req_sync_lock(hdev);
4b4148e9
MH
96 if (enable)
97 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
98 HCI_CMD_TIMEOUT);
99 else
100 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
101 HCI_CMD_TIMEOUT);
b504430c 102 hci_req_sync_unlock(hdev);
4b4148e9
MH
103
104 if (IS_ERR(skb))
105 return PTR_ERR(skb);
106
4b4148e9
MH
107 kfree_skb(skb);
108
b7cb93e5 109 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
110
111 return count;
112}
113
114static const struct file_operations dut_mode_fops = {
115 .open = simple_open,
116 .read = dut_mode_read,
117 .write = dut_mode_write,
118 .llseek = default_llseek,
119};
120
4b4113d6
MH
121static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
122 size_t count, loff_t *ppos)
123{
124 struct hci_dev *hdev = file->private_data;
125 char buf[3];
126
74b93e9f 127 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
4b4113d6
MH
128 buf[1] = '\n';
129 buf[2] = '\0';
130 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
131}
132
133static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
134 size_t count, loff_t *ppos)
135{
136 struct hci_dev *hdev = file->private_data;
4b4113d6
MH
137 bool enable;
138 int err;
139
3bf5e97d
AS
140 err = kstrtobool_from_user(user_buf, count, &enable);
141 if (err)
142 return err;
4b4113d6 143
7e995b9e 144 /* When the diagnostic flags are not persistent and the transport
b56c7b25
MH
145 * is not active or in user channel operation, then there is no need
146 * for the vendor callback. Instead just store the desired value and
147 * the setting will be programmed when the controller gets powered on.
7e995b9e
MH
148 */
149 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
b56c7b25
MH
150 (!test_bit(HCI_RUNNING, &hdev->flags) ||
151 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
7e995b9e
MH
152 goto done;
153
b504430c 154 hci_req_sync_lock(hdev);
4b4113d6 155 err = hdev->set_diag(hdev, enable);
b504430c 156 hci_req_sync_unlock(hdev);
4b4113d6
MH
157
158 if (err < 0)
159 return err;
160
7e995b9e 161done:
4b4113d6
MH
162 if (enable)
163 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
164 else
165 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
166
167 return count;
168}
169
170static const struct file_operations vendor_diag_fops = {
171 .open = simple_open,
172 .read = vendor_diag_read,
173 .write = vendor_diag_write,
174 .llseek = default_llseek,
175};
176
f640ee98
MH
177static void hci_debugfs_create_basic(struct hci_dev *hdev)
178{
179 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
180 &dut_mode_fops);
181
182 if (hdev->set_diag)
183 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
184 &vendor_diag_fops);
185}
186
a1d01db1 187static int hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 188{
42c6b129 189 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
190
191 /* Reset device */
42c6b129
JH
192 set_bit(HCI_RESET, &req->hdev->flags);
193 hci_req_add(req, HCI_OP_RESET, 0, NULL);
a1d01db1 194 return 0;
1da177e4
LT
195}
196
42c6b129 197static void bredr_init(struct hci_request *req)
1da177e4 198{
42c6b129 199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 200
1da177e4 201 /* Read Local Supported Features */
42c6b129 202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 203
1143e5a6 204 /* Read Local Version */
42c6b129 205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
206
207 /* Read BD Address */
42c6b129 208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
209}
210
0af801b9 211static void amp_init1(struct hci_request *req)
e61ef499 212{
42c6b129 213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 214
e61ef499 215 /* Read Local Version */
42c6b129 216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 217
f6996cfe
MH
218 /* Read Local Supported Commands */
219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
220
6bcbc489 221 /* Read Local AMP Info */
42c6b129 222 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
223
224 /* Read Data Blk size */
42c6b129 225 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 226
f38ba941
MH
227 /* Read Flow Control Mode */
228 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
229
7528ca1c
MH
230 /* Read Location Data */
231 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
232}
233
a1d01db1 234static int amp_init2(struct hci_request *req)
0af801b9
JH
235{
236 /* Read Local Supported Features. Not all AMP controllers
237 * support this so it's placed conditionally in the second
238 * stage init.
239 */
240 if (req->hdev->commands[14] & 0x20)
241 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
a1d01db1
JH
242
243 return 0;
0af801b9
JH
244}
245
a1d01db1 246static int hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 247{
42c6b129 248 struct hci_dev *hdev = req->hdev;
e61ef499
AE
249
250 BT_DBG("%s %ld", hdev->name, opt);
251
11778716
AE
252 /* Reset */
253 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 254 hci_reset_req(req, 0);
11778716 255
e61ef499 256 switch (hdev->dev_type) {
ca8bee5d 257 case HCI_PRIMARY:
42c6b129 258 bredr_init(req);
e61ef499 259 break;
e61ef499 260 case HCI_AMP:
0af801b9 261 amp_init1(req);
e61ef499 262 break;
e61ef499 263 default:
2064ee33 264 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
e61ef499
AE
265 break;
266 }
a1d01db1
JH
267
268 return 0;
e61ef499
AE
269}
270
42c6b129 271static void bredr_setup(struct hci_request *req)
2177bab5 272{
2177bab5
JH
273 __le16 param;
274 __u8 flt_type;
275
276 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 277 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
278
279 /* Read Class of Device */
42c6b129 280 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
281
282 /* Read Local Name */
42c6b129 283 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
284
285 /* Read Voice Setting */
42c6b129 286 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 287
b4cb9fb2
MH
288 /* Read Number of Supported IAC */
289 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
290
4b836f39
MH
291 /* Read Current IAC LAP */
292 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
293
2177bab5
JH
294 /* Clear Event Filters */
295 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 296 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
297
298 /* Connection accept timeout ~20 secs */
dcf4adbf 299 param = cpu_to_le16(0x7d00);
42c6b129 300 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
301}
302
42c6b129 303static void le_setup(struct hci_request *req)
2177bab5 304{
c73eee91
JH
305 struct hci_dev *hdev = req->hdev;
306
2177bab5 307 /* Read LE Buffer Size */
42c6b129 308 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
309
310 /* Read LE Local Supported Features */
42c6b129 311 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 312
747d3f03
MH
313 /* Read LE Supported States */
314 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
315
c73eee91
JH
316 /* LE-only controllers have LE implicitly enabled */
317 if (!lmp_bredr_capable(hdev))
a1536da2 318 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
319}
320
42c6b129 321static void hci_setup_event_mask(struct hci_request *req)
2177bab5 322{
42c6b129
JH
323 struct hci_dev *hdev = req->hdev;
324
2177bab5
JH
325 /* The second byte is 0xff instead of 0x9f (two reserved bits
326 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
327 * command otherwise.
328 */
329 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
330
331 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
332 * any event mask for pre 1.2 devices.
333 */
334 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
335 return;
336
337 if (lmp_bredr_capable(hdev)) {
338 events[4] |= 0x01; /* Flow Specification Complete */
c7882cbd
MH
339 } else {
340 /* Use a different default for LE-only devices */
341 memset(events, 0, sizeof(events));
c7882cbd
MH
342 events[1] |= 0x20; /* Command Complete */
343 events[1] |= 0x40; /* Command Status */
344 events[1] |= 0x80; /* Hardware Error */
5c3d3b4c
MH
345
346 /* If the controller supports the Disconnect command, enable
347 * the corresponding event. In addition enable packet flow
348 * control related events.
349 */
350 if (hdev->commands[0] & 0x20) {
351 events[0] |= 0x10; /* Disconnection Complete */
352 events[2] |= 0x04; /* Number of Completed Packets */
353 events[3] |= 0x02; /* Data Buffer Overflow */
354 }
355
356 /* If the controller supports the Read Remote Version
357 * Information command, enable the corresponding event.
358 */
359 if (hdev->commands[2] & 0x80)
360 events[1] |= 0x08; /* Read Remote Version Information
361 * Complete
362 */
0da71f1b
MH
363
364 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
365 events[0] |= 0x80; /* Encryption Change */
366 events[5] |= 0x80; /* Encryption Key Refresh Complete */
367 }
2177bab5
JH
368 }
369
9fe759ce
MH
370 if (lmp_inq_rssi_capable(hdev) ||
371 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
2177bab5
JH
372 events[4] |= 0x02; /* Inquiry Result with RSSI */
373
70f56aa2
MH
374 if (lmp_ext_feat_capable(hdev))
375 events[4] |= 0x04; /* Read Remote Extended Features Complete */
376
377 if (lmp_esco_capable(hdev)) {
378 events[5] |= 0x08; /* Synchronous Connection Complete */
379 events[5] |= 0x10; /* Synchronous Connection Changed */
380 }
381
2177bab5
JH
382 if (lmp_sniffsubr_capable(hdev))
383 events[5] |= 0x20; /* Sniff Subrating */
384
385 if (lmp_pause_enc_capable(hdev))
386 events[5] |= 0x80; /* Encryption Key Refresh Complete */
387
388 if (lmp_ext_inq_capable(hdev))
389 events[5] |= 0x40; /* Extended Inquiry Result */
390
391 if (lmp_no_flush_capable(hdev))
392 events[7] |= 0x01; /* Enhanced Flush Complete */
393
394 if (lmp_lsto_capable(hdev))
395 events[6] |= 0x80; /* Link Supervision Timeout Changed */
396
397 if (lmp_ssp_capable(hdev)) {
398 events[6] |= 0x01; /* IO Capability Request */
399 events[6] |= 0x02; /* IO Capability Response */
400 events[6] |= 0x04; /* User Confirmation Request */
401 events[6] |= 0x08; /* User Passkey Request */
402 events[6] |= 0x10; /* Remote OOB Data Request */
403 events[6] |= 0x20; /* Simple Pairing Complete */
404 events[7] |= 0x04; /* User Passkey Notification */
405 events[7] |= 0x08; /* Keypress Notification */
406 events[7] |= 0x10; /* Remote Host Supported
407 * Features Notification
408 */
409 }
410
411 if (lmp_le_capable(hdev))
412 events[7] |= 0x20; /* LE Meta-Event */
413
42c6b129 414 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
415}
416
a1d01db1 417static int hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 418{
42c6b129
JH
419 struct hci_dev *hdev = req->hdev;
420
0af801b9
JH
421 if (hdev->dev_type == HCI_AMP)
422 return amp_init2(req);
423
2177bab5 424 if (lmp_bredr_capable(hdev))
42c6b129 425 bredr_setup(req);
56f87901 426 else
a358dc11 427 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
428
429 if (lmp_le_capable(hdev))
42c6b129 430 le_setup(req);
2177bab5 431
0f3adeae
MH
432 /* All Bluetooth 1.2 and later controllers should support the
433 * HCI command for reading the local supported commands.
434 *
435 * Unfortunately some controllers indicate Bluetooth 1.2 support,
436 * but do not have support for this command. If that is the case,
437 * the driver can quirk the behavior and skip reading the local
438 * supported commands.
3f8e2d75 439 */
0f3adeae
MH
440 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
441 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 442 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
443
444 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
445 /* When SSP is available, then the host features page
446 * should also be available as well. However some
447 * controllers list the max_page as 0 as long as SSP
448 * has not been enabled. To achieve proper debugging
449 * output, force the minimum max_page to 1 at least.
450 */
451 hdev->max_page = 0x01;
452
d7a5a11d 453 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 454 u8 mode = 0x01;
574ea3c7 455
42c6b129
JH
456 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
457 sizeof(mode), &mode);
2177bab5
JH
458 } else {
459 struct hci_cp_write_eir cp;
460
461 memset(hdev->eir, 0, sizeof(hdev->eir));
462 memset(&cp, 0, sizeof(cp));
463
42c6b129 464 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
465 }
466 }
467
043ec9bf
MH
468 if (lmp_inq_rssi_capable(hdev) ||
469 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
470 u8 mode;
471
472 /* If Extended Inquiry Result events are supported, then
473 * they are clearly preferred over Inquiry Result with RSSI
474 * events.
475 */
476 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
477
478 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
479 }
2177bab5
JH
480
481 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 482 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
483
484 if (lmp_ext_feat_capable(hdev)) {
485 struct hci_cp_read_local_ext_features cp;
486
487 cp.page = 0x01;
42c6b129
JH
488 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
489 sizeof(cp), &cp);
2177bab5
JH
490 }
491
d7a5a11d 492 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 493 u8 enable = 1;
42c6b129
JH
494 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
495 &enable);
2177bab5 496 }
a1d01db1
JH
497
498 return 0;
2177bab5
JH
499}
500
42c6b129 501static void hci_setup_link_policy(struct hci_request *req)
2177bab5 502{
42c6b129 503 struct hci_dev *hdev = req->hdev;
2177bab5
JH
504 struct hci_cp_write_def_link_policy cp;
505 u16 link_policy = 0;
506
507 if (lmp_rswitch_capable(hdev))
508 link_policy |= HCI_LP_RSWITCH;
509 if (lmp_hold_capable(hdev))
510 link_policy |= HCI_LP_HOLD;
511 if (lmp_sniff_capable(hdev))
512 link_policy |= HCI_LP_SNIFF;
513 if (lmp_park_capable(hdev))
514 link_policy |= HCI_LP_PARK;
515
516 cp.policy = cpu_to_le16(link_policy);
42c6b129 517 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
518}
519
42c6b129 520static void hci_set_le_support(struct hci_request *req)
2177bab5 521{
42c6b129 522 struct hci_dev *hdev = req->hdev;
2177bab5
JH
523 struct hci_cp_write_le_host_supported cp;
524
c73eee91
JH
525 /* LE-only devices do not support explicit enablement */
526 if (!lmp_bredr_capable(hdev))
527 return;
528
2177bab5
JH
529 memset(&cp, 0, sizeof(cp));
530
d7a5a11d 531 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 532 cp.le = 0x01;
32226e4f 533 cp.simul = 0x00;
2177bab5
JH
534 }
535
536 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
537 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
538 &cp);
2177bab5
JH
539}
540
d62e6d67
JH
541static void hci_set_event_mask_page_2(struct hci_request *req)
542{
543 struct hci_dev *hdev = req->hdev;
544 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
313f6888 545 bool changed = false;
d62e6d67
JH
546
547 /* If Connectionless Slave Broadcast master role is supported
548 * enable all necessary events for it.
549 */
53b834d2 550 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
551 events[1] |= 0x40; /* Triggered Clock Capture */
552 events[1] |= 0x80; /* Synchronization Train Complete */
553 events[2] |= 0x10; /* Slave Page Response Timeout */
554 events[2] |= 0x20; /* CSB Channel Map Change */
313f6888 555 changed = true;
d62e6d67
JH
556 }
557
558 /* If Connectionless Slave Broadcast slave role is supported
559 * enable all necessary events for it.
560 */
53b834d2 561 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
562 events[2] |= 0x01; /* Synchronization Train Received */
563 events[2] |= 0x02; /* CSB Receive */
564 events[2] |= 0x04; /* CSB Timeout */
565 events[2] |= 0x08; /* Truncated Page Complete */
313f6888 566 changed = true;
d62e6d67
JH
567 }
568
40c59fcb 569 /* Enable Authenticated Payload Timeout Expired event if supported */
313f6888 570 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
40c59fcb 571 events[2] |= 0x80;
313f6888
MH
572 changed = true;
573 }
40c59fcb 574
313f6888
MH
575 /* Some Broadcom based controllers indicate support for Set Event
576 * Mask Page 2 command, but then actually do not support it. Since
577 * the default value is all bits set to zero, the command is only
578 * required if the event mask has to be changed. In case no change
579 * to the event mask is needed, skip this command.
580 */
581 if (changed)
582 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
583 sizeof(events), events);
d62e6d67
JH
584}
585
a1d01db1 586static int hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 587{
42c6b129 588 struct hci_dev *hdev = req->hdev;
d2c5d77f 589 u8 p;
42c6b129 590
0da71f1b
MH
591 hci_setup_event_mask(req);
592
e81be90b
JH
593 if (hdev->commands[6] & 0x20 &&
594 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
595 struct hci_cp_read_stored_link_key cp;
596
597 bacpy(&cp.bdaddr, BDADDR_ANY);
598 cp.read_all = 0x01;
599 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
600 }
601
2177bab5 602 if (hdev->commands[5] & 0x10)
42c6b129 603 hci_setup_link_policy(req);
2177bab5 604
417287de
MH
605 if (hdev->commands[8] & 0x01)
606 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
607
cde1a8a9
IFM
608 if (hdev->commands[18] & 0x04 &&
609 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
00bce3fb
AM
610 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
611
417287de
MH
612 /* Some older Broadcom based Bluetooth 1.2 controllers do not
613 * support the Read Page Scan Type command. Check support for
614 * this command in the bit mask of supported commands.
615 */
616 if (hdev->commands[13] & 0x01)
617 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
618
9193c6e8
AG
619 if (lmp_le_capable(hdev)) {
620 u8 events[8];
621
622 memset(events, 0, sizeof(events));
4d6c705b
MH
623
624 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
625 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
626
627 /* If controller supports the Connection Parameters Request
628 * Link Layer Procedure, enable the corresponding event.
629 */
630 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
631 events[0] |= 0x20; /* LE Remote Connection
632 * Parameter Request
633 */
634
a9f6068e
MH
635 /* If the controller supports the Data Length Extension
636 * feature, enable the corresponding event.
637 */
638 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
639 events[0] |= 0x40; /* LE Data Length Change */
640
ff3b8df2
MH
641 /* If the controller supports LL Privacy feature, enable
642 * the corresponding event.
643 */
644 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
645 events[1] |= 0x02; /* LE Enhanced Connection
646 * Complete
647 */
648
4b71bba4
MH
649 /* If the controller supports Extended Scanner Filter
650 * Policies, enable the correspondig event.
651 */
652 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
653 events[1] |= 0x04; /* LE Direct Advertising
654 * Report
655 */
656
9756d33b
MH
657 /* If the controller supports Channel Selection Algorithm #2
658 * feature, enable the corresponding event.
659 */
660 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
661 events[2] |= 0x08; /* LE Channel Selection
662 * Algorithm
663 */
664
7d26f5c4
MH
665 /* If the controller supports the LE Set Scan Enable command,
666 * enable the corresponding advertising report event.
667 */
668 if (hdev->commands[26] & 0x08)
669 events[0] |= 0x02; /* LE Advertising Report */
670
671 /* If the controller supports the LE Create Connection
672 * command, enable the corresponding event.
673 */
674 if (hdev->commands[26] & 0x10)
675 events[0] |= 0x01; /* LE Connection Complete */
676
677 /* If the controller supports the LE Connection Update
678 * command, enable the corresponding event.
679 */
680 if (hdev->commands[27] & 0x04)
681 events[0] |= 0x04; /* LE Connection Update
682 * Complete
683 */
684
685 /* If the controller supports the LE Read Remote Used Features
686 * command, enable the corresponding event.
687 */
688 if (hdev->commands[27] & 0x20)
689 events[0] |= 0x08; /* LE Read Remote Used
690 * Features Complete
691 */
692
5a34bd5f
MH
693 /* If the controller supports the LE Read Local P-256
694 * Public Key command, enable the corresponding event.
695 */
696 if (hdev->commands[34] & 0x02)
697 events[0] |= 0x80; /* LE Read Local P-256
698 * Public Key Complete
699 */
700
701 /* If the controller supports the LE Generate DHKey
702 * command, enable the corresponding event.
703 */
704 if (hdev->commands[34] & 0x04)
705 events[1] |= 0x01; /* LE Generate DHKey Complete */
706
27bbca44
MH
707 /* If the controller supports the LE Set Default PHY or
708 * LE Set PHY commands, enable the corresponding event.
709 */
710 if (hdev->commands[35] & (0x20 | 0x40))
711 events[1] |= 0x08; /* LE PHY Update Complete */
712
c215e939
JK
713 /* If the controller supports LE Set Extended Scan Parameters
714 * and LE Set Extended Scan Enable commands, enable the
715 * corresponding event.
716 */
717 if (use_ext_scan(hdev))
718 events[1] |= 0x10; /* LE Extended Advertising
719 * Report
720 */
721
acf0aeae
JK
722 /* If the controller supports the LE Extended Advertising
723 * command, enable the corresponding event.
724 */
725 if (ext_adv_capable(hdev))
726 events[2] |= 0x02; /* LE Advertising Set
727 * Terminated
728 */
729
9193c6e8
AG
730 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
731 events);
732
6b49bcb4
JK
733 /* Read LE Advertising Channel TX Power */
734 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
735 /* HCI TS spec forbids mixing of legacy and extended
736 * advertising commands wherein READ_ADV_TX_POWER is
737 * also included. So do not call it if extended adv
738 * is supported otherwise controller will return
739 * COMMAND_DISALLOWED for extended commands.
740 */
15a49cca
MH
741 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
742 }
743
7c395ea5
DW
744 if (hdev->commands[38] & 0x80) {
745 /* Read LE Min/Max Tx Power*/
746 hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
747 0, NULL);
748 }
749
2ab216a7
MH
750 if (hdev->commands[26] & 0x40) {
751 /* Read LE White List Size */
752 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
753 0, NULL);
754 }
755
756 if (hdev->commands[26] & 0x80) {
757 /* Clear LE White List */
758 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
759 }
760
cfdb0c2d
AN
761 if (hdev->commands[34] & 0x40) {
762 /* Read LE Resolving List Size */
763 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
764 0, NULL);
765 }
766
545f2596
AN
767 if (hdev->commands[34] & 0x20) {
768 /* Clear LE Resolving List */
769 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
770 }
771
a31489d2 772 if (hdev->commands[35] & 0x04) {
b2cc2339
SN
773 __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
774
775 /* Set RPA timeout */
776 hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
777 &rpa_timeout);
778 }
779
a9f6068e
MH
780 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
781 /* Read LE Maximum Data Length */
782 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
783
784 /* Read LE Suggested Default Data Length */
785 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
786 }
787
6b49bcb4
JK
788 if (ext_adv_capable(hdev)) {
789 /* Read LE Number of Supported Advertising Sets */
790 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
791 0, NULL);
792 }
793
42c6b129 794 hci_set_le_support(req);
9193c6e8 795 }
d2c5d77f
JH
796
797 /* Read features beyond page 1 if available */
798 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
799 struct hci_cp_read_local_ext_features cp;
800
801 cp.page = p;
802 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
803 sizeof(cp), &cp);
804 }
a1d01db1
JH
805
806 return 0;
2177bab5
JH
807}
808
a1d01db1 809static int hci_init4_req(struct hci_request *req, unsigned long opt)
5d4e7e8d
JH
810{
811 struct hci_dev *hdev = req->hdev;
812
36f260ce
MH
813 /* Some Broadcom based Bluetooth controllers do not support the
814 * Delete Stored Link Key command. They are clearly indicating its
815 * absence in the bit mask of supported commands.
816 *
bb6d6895 817 * Check the supported commands and only if the command is marked
36f260ce
MH
818 * as supported send it. If not supported assume that the controller
819 * does not have actual support for stored link keys which makes this
820 * command redundant anyway.
821 *
822 * Some controllers indicate that they support handling deleting
823 * stored link keys, but they don't. The quirk lets a driver
824 * just disable this command.
825 */
826 if (hdev->commands[6] & 0x80 &&
827 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
828 struct hci_cp_delete_stored_link_key cp;
829
830 bacpy(&cp.bdaddr, BDADDR_ANY);
831 cp.delete_all = 0x01;
832 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
833 sizeof(cp), &cp);
834 }
835
d62e6d67
JH
836 /* Set event mask page 2 if the HCI command for it is supported */
837 if (hdev->commands[22] & 0x04)
838 hci_set_event_mask_page_2(req);
839
109e3191
MH
840 /* Read local codec list if the HCI command is supported */
841 if (hdev->commands[29] & 0x20)
842 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
843
a4790360
MH
844 /* Read local pairing options if the HCI command is supported */
845 if (hdev->commands[41] & 0x08)
846 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
847
f4fe73ed
MH
848 /* Get MWS transport configuration if the HCI command is supported */
849 if (hdev->commands[30] & 0x08)
850 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
851
5d4e7e8d 852 /* Check for Synchronization Train support */
53b834d2 853 if (lmp_sync_train_capable(hdev))
5d4e7e8d 854 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
855
856 /* Enable Secure Connections if supported and configured */
d7a5a11d 857 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 858 bredr_sc_enabled(hdev)) {
a6d0d690 859 u8 support = 0x01;
574ea3c7 860
a6d0d690
MH
861 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
862 sizeof(support), &support);
863 }
a1d01db1 864
00bce3fb
AM
865 /* Set erroneous data reporting if supported to the wideband speech
866 * setting value
867 */
cde1a8a9
IFM
868 if (hdev->commands[18] & 0x08 &&
869 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
00bce3fb
AM
870 bool enabled = hci_dev_test_flag(hdev,
871 HCI_WIDEBAND_SPEECH_ENABLED);
872
873 if (enabled !=
874 (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
875 struct hci_cp_write_def_err_data_reporting cp;
876
877 cp.err_data_reporting = enabled ?
878 ERR_DATA_REPORTING_ENABLED :
879 ERR_DATA_REPORTING_DISABLED;
880
881 hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
882 sizeof(cp), &cp);
883 }
884 }
885
12204875
MH
886 /* Set Suggested Default Data Length to maximum if supported */
887 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
888 struct hci_cp_le_write_def_data_len cp;
889
727ea61a
BDC
890 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
891 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
12204875
MH
892 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
893 }
894
de2ba303
MH
895 /* Set Default PHY parameters if command is supported */
896 if (hdev->commands[35] & 0x20) {
897 struct hci_cp_le_set_default_phy cp;
898
6decb5b4
JK
899 cp.all_phys = 0x00;
900 cp.tx_phys = hdev->le_tx_def_phys;
901 cp.rx_phys = hdev->le_rx_def_phys;
de2ba303
MH
902
903 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
904 }
905
a1d01db1 906 return 0;
5d4e7e8d
JH
907}
908
2177bab5
JH
909static int __hci_init(struct hci_dev *hdev)
910{
911 int err;
912
4ebeee2d 913 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
2177bab5
JH
914 if (err < 0)
915 return err;
916
f640ee98
MH
917 if (hci_dev_test_flag(hdev, HCI_SETUP))
918 hci_debugfs_create_basic(hdev);
4b4148e9 919
4ebeee2d 920 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
0af801b9
JH
921 if (err < 0)
922 return err;
923
ca8bee5d 924 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
2177bab5 925 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 926 * first two stages of init.
2177bab5 927 */
ca8bee5d 928 if (hdev->dev_type != HCI_PRIMARY)
2177bab5
JH
929 return 0;
930
4ebeee2d 931 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
5d4e7e8d
JH
932 if (err < 0)
933 return err;
934
4ebeee2d 935 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
baf27f6e
MH
936 if (err < 0)
937 return err;
938
ec6cef9c
MH
939 /* This function is only called when the controller is actually in
940 * configured state. When the controller is marked as unconfigured,
941 * this initialization procedure is not run.
942 *
943 * It means that it is possible that a controller runs through its
944 * setup phase and then discovers missing settings. If that is the
945 * case, then this function will not be called. It then will only
946 * be called during the config phase.
947 *
948 * So only when in setup phase or config phase, create the debugfs
949 * entries and register the SMP channels.
baf27f6e 950 */
d7a5a11d
MH
951 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
952 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
953 return 0;
954
60c5f5fb
MH
955 hci_debugfs_create_common(hdev);
956
71c3b60e 957 if (lmp_bredr_capable(hdev))
60c5f5fb 958 hci_debugfs_create_bredr(hdev);
2bfa3531 959
162a3bac 960 if (lmp_le_capable(hdev))
60c5f5fb 961 hci_debugfs_create_le(hdev);
e7b8fc92 962
baf27f6e 963 return 0;
2177bab5
JH
964}
965
a1d01db1 966static int hci_init0_req(struct hci_request *req, unsigned long opt)
0ebca7d6
MH
967{
968 struct hci_dev *hdev = req->hdev;
969
970 BT_DBG("%s %ld", hdev->name, opt);
971
972 /* Reset */
973 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
974 hci_reset_req(req, 0);
975
976 /* Read Local Version */
977 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
978
979 /* Read BD Address */
980 if (hdev->set_bdaddr)
981 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
a1d01db1
JH
982
983 return 0;
0ebca7d6
MH
984}
985
986static int __hci_unconf_init(struct hci_dev *hdev)
987{
988 int err;
989
cc78b44b
MH
990 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
991 return 0;
992
4ebeee2d 993 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
0ebca7d6
MH
994 if (err < 0)
995 return err;
996
f640ee98
MH
997 if (hci_dev_test_flag(hdev, HCI_SETUP))
998 hci_debugfs_create_basic(hdev);
999
0ebca7d6
MH
1000 return 0;
1001}
1002
a1d01db1 1003static int hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1004{
1005 __u8 scan = opt;
1006
42c6b129 1007 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1008
1009 /* Inquiry and Page scans */
42c6b129 1010 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
a1d01db1 1011 return 0;
1da177e4
LT
1012}
1013
a1d01db1 1014static int hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1015{
1016 __u8 auth = opt;
1017
42c6b129 1018 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1019
1020 /* Authentication */
42c6b129 1021 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
a1d01db1 1022 return 0;
1da177e4
LT
1023}
1024
a1d01db1 1025static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1026{
1027 __u8 encrypt = opt;
1028
42c6b129 1029 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1030
e4e8e37c 1031 /* Encryption */
42c6b129 1032 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
a1d01db1 1033 return 0;
1da177e4
LT
1034}
1035
a1d01db1 1036static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1037{
1038 __le16 policy = cpu_to_le16(opt);
1039
42c6b129 1040 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1041
1042 /* Default link policy */
42c6b129 1043 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
a1d01db1 1044 return 0;
e4e8e37c
MH
1045}
1046
8e87d142 1047/* Get HCI device by index.
1da177e4
LT
1048 * Device is held on return. */
1049struct hci_dev *hci_dev_get(int index)
1050{
8035ded4 1051 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1052
1053 BT_DBG("%d", index);
1054
1055 if (index < 0)
1056 return NULL;
1057
1058 read_lock(&hci_dev_list_lock);
8035ded4 1059 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1060 if (d->id == index) {
1061 hdev = hci_dev_hold(d);
1062 break;
1063 }
1064 }
1065 read_unlock(&hci_dev_list_lock);
1066 return hdev;
1067}
1da177e4
LT
1068
1069/* ---- Inquiry support ---- */
ff9ef578 1070
30dc78e1
JH
1071bool hci_discovery_active(struct hci_dev *hdev)
1072{
1073 struct discovery_state *discov = &hdev->discovery;
1074
6fbe195d 1075 switch (discov->state) {
343f935b 1076 case DISCOVERY_FINDING:
6fbe195d 1077 case DISCOVERY_RESOLVING:
30dc78e1
JH
1078 return true;
1079
6fbe195d
AG
1080 default:
1081 return false;
1082 }
30dc78e1
JH
1083}
1084
ff9ef578
JH
1085void hci_discovery_set_state(struct hci_dev *hdev, int state)
1086{
bb3e0a33
JH
1087 int old_state = hdev->discovery.state;
1088
ff9ef578
JH
1089 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1090
bb3e0a33 1091 if (old_state == state)
ff9ef578
JH
1092 return;
1093
bb3e0a33
JH
1094 hdev->discovery.state = state;
1095
ff9ef578
JH
1096 switch (state) {
1097 case DISCOVERY_STOPPED:
c54c3860
AG
1098 hci_update_background_scan(hdev);
1099
bb3e0a33 1100 if (old_state != DISCOVERY_STARTING)
7b99b659 1101 mgmt_discovering(hdev, 0);
ff9ef578
JH
1102 break;
1103 case DISCOVERY_STARTING:
1104 break;
343f935b 1105 case DISCOVERY_FINDING:
ff9ef578
JH
1106 mgmt_discovering(hdev, 1);
1107 break;
30dc78e1
JH
1108 case DISCOVERY_RESOLVING:
1109 break;
ff9ef578
JH
1110 case DISCOVERY_STOPPING:
1111 break;
1112 }
ff9ef578
JH
1113}
1114
1f9b9a5d 1115void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1116{
30883512 1117 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1118 struct inquiry_entry *p, *n;
1da177e4 1119
561aafbc
JH
1120 list_for_each_entry_safe(p, n, &cache->all, all) {
1121 list_del(&p->all);
b57c1a56 1122 kfree(p);
1da177e4 1123 }
561aafbc
JH
1124
1125 INIT_LIST_HEAD(&cache->unknown);
1126 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1127}
1128
a8c5fb1a
GP
1129struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1130 bdaddr_t *bdaddr)
1da177e4 1131{
30883512 1132 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1133 struct inquiry_entry *e;
1134
6ed93dc6 1135 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1136
561aafbc
JH
1137 list_for_each_entry(e, &cache->all, all) {
1138 if (!bacmp(&e->data.bdaddr, bdaddr))
1139 return e;
1140 }
1141
1142 return NULL;
1143}
1144
1145struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1146 bdaddr_t *bdaddr)
561aafbc 1147{
30883512 1148 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1149 struct inquiry_entry *e;
1150
6ed93dc6 1151 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1152
1153 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1154 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1155 return e;
1156 }
1157
1158 return NULL;
1da177e4
LT
1159}
1160
30dc78e1 1161struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1162 bdaddr_t *bdaddr,
1163 int state)
30dc78e1
JH
1164{
1165 struct discovery_state *cache = &hdev->discovery;
1166 struct inquiry_entry *e;
1167
6ed93dc6 1168 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1169
1170 list_for_each_entry(e, &cache->resolve, list) {
1171 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1172 return e;
1173 if (!bacmp(&e->data.bdaddr, bdaddr))
1174 return e;
1175 }
1176
1177 return NULL;
1178}
1179
a3d4e20a 1180void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1181 struct inquiry_entry *ie)
a3d4e20a
JH
1182{
1183 struct discovery_state *cache = &hdev->discovery;
1184 struct list_head *pos = &cache->resolve;
1185 struct inquiry_entry *p;
1186
1187 list_del(&ie->list);
1188
1189 list_for_each_entry(p, &cache->resolve, list) {
1190 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1191 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1192 break;
1193 pos = &p->list;
1194 }
1195
1196 list_add(&ie->list, pos);
1197}
1198
af58925c
MH
1199u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1200 bool name_known)
1da177e4 1201{
30883512 1202 struct discovery_state *cache = &hdev->discovery;
70f23020 1203 struct inquiry_entry *ie;
af58925c 1204 u32 flags = 0;
1da177e4 1205
6ed93dc6 1206 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1207
6928a924 1208 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1209
af58925c
MH
1210 if (!data->ssp_mode)
1211 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1212
70f23020 1213 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1214 if (ie) {
af58925c
MH
1215 if (!ie->data.ssp_mode)
1216 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1217
a3d4e20a 1218 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1219 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1220 ie->data.rssi = data->rssi;
1221 hci_inquiry_cache_update_resolve(hdev, ie);
1222 }
1223
561aafbc 1224 goto update;
a3d4e20a 1225 }
561aafbc
JH
1226
1227 /* Entry not in the cache. Add new one. */
27f70f3e 1228 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1229 if (!ie) {
1230 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1231 goto done;
1232 }
561aafbc
JH
1233
1234 list_add(&ie->all, &cache->all);
1235
1236 if (name_known) {
1237 ie->name_state = NAME_KNOWN;
1238 } else {
1239 ie->name_state = NAME_NOT_KNOWN;
1240 list_add(&ie->list, &cache->unknown);
1241 }
70f23020 1242
561aafbc
JH
1243update:
1244 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1245 ie->name_state != NAME_PENDING) {
561aafbc
JH
1246 ie->name_state = NAME_KNOWN;
1247 list_del(&ie->list);
1da177e4
LT
1248 }
1249
70f23020
AE
1250 memcpy(&ie->data, data, sizeof(*data));
1251 ie->timestamp = jiffies;
1da177e4 1252 cache->timestamp = jiffies;
3175405b
JH
1253
1254 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1255 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1256
af58925c
MH
1257done:
1258 return flags;
1da177e4
LT
1259}
1260
1261static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1262{
30883512 1263 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1264 struct inquiry_info *info = (struct inquiry_info *) buf;
1265 struct inquiry_entry *e;
1266 int copied = 0;
1267
561aafbc 1268 list_for_each_entry(e, &cache->all, all) {
1da177e4 1269 struct inquiry_data *data = &e->data;
b57c1a56
JH
1270
1271 if (copied >= num)
1272 break;
1273
1da177e4
LT
1274 bacpy(&info->bdaddr, &data->bdaddr);
1275 info->pscan_rep_mode = data->pscan_rep_mode;
1276 info->pscan_period_mode = data->pscan_period_mode;
1277 info->pscan_mode = data->pscan_mode;
1278 memcpy(info->dev_class, data->dev_class, 3);
1279 info->clock_offset = data->clock_offset;
b57c1a56 1280
1da177e4 1281 info++;
b57c1a56 1282 copied++;
1da177e4
LT
1283 }
1284
1285 BT_DBG("cache %p, copied %d", cache, copied);
1286 return copied;
1287}
1288
a1d01db1 1289static int hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1290{
1291 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1292 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1293 struct hci_cp_inquiry cp;
1294
1295 BT_DBG("%s", hdev->name);
1296
1297 if (test_bit(HCI_INQUIRY, &hdev->flags))
a1d01db1 1298 return 0;
1da177e4
LT
1299
1300 /* Start Inquiry */
1301 memcpy(&cp.lap, &ir->lap, 3);
1302 cp.length = ir->length;
1303 cp.num_rsp = ir->num_rsp;
42c6b129 1304 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
a1d01db1
JH
1305
1306 return 0;
1da177e4
LT
1307}
1308
1309int hci_inquiry(void __user *arg)
1310{
1311 __u8 __user *ptr = arg;
1312 struct hci_inquiry_req ir;
1313 struct hci_dev *hdev;
1314 int err = 0, do_inquiry = 0, max_rsp;
1315 long timeo;
1316 __u8 *buf;
1317
1318 if (copy_from_user(&ir, ptr, sizeof(ir)))
1319 return -EFAULT;
1320
5a08ecce
AE
1321 hdev = hci_dev_get(ir.dev_id);
1322 if (!hdev)
1da177e4
LT
1323 return -ENODEV;
1324
d7a5a11d 1325 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1326 err = -EBUSY;
1327 goto done;
1328 }
1329
d7a5a11d 1330 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1331 err = -EOPNOTSUPP;
1332 goto done;
1333 }
1334
ca8bee5d 1335 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
1336 err = -EOPNOTSUPP;
1337 goto done;
1338 }
1339
d7a5a11d 1340 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1341 err = -EOPNOTSUPP;
1342 goto done;
1343 }
1344
09fd0de5 1345 hci_dev_lock(hdev);
8e87d142 1346 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1347 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1348 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1349 do_inquiry = 1;
1350 }
09fd0de5 1351 hci_dev_unlock(hdev);
1da177e4 1352
04837f64 1353 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1354
1355 if (do_inquiry) {
01178cd4 1356 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
4ebeee2d 1357 timeo, NULL);
70f23020
AE
1358 if (err < 0)
1359 goto done;
3e13fa1e
AG
1360
1361 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1362 * cleared). If it is interrupted by a signal, return -EINTR.
1363 */
74316201 1364 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1365 TASK_INTERRUPTIBLE))
1366 return -EINTR;
70f23020 1367 }
1da177e4 1368
8fc9ced3
GP
1369 /* for unlimited number of responses we will use buffer with
1370 * 255 entries
1371 */
1da177e4
LT
1372 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1373
1374 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1375 * copy it to the user space.
1376 */
6da2ec56 1377 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
70f23020 1378 if (!buf) {
1da177e4
LT
1379 err = -ENOMEM;
1380 goto done;
1381 }
1382
09fd0de5 1383 hci_dev_lock(hdev);
1da177e4 1384 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1385 hci_dev_unlock(hdev);
1da177e4
LT
1386
1387 BT_DBG("num_rsp %d", ir.num_rsp);
1388
1389 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1390 ptr += sizeof(ir);
1391 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1392 ir.num_rsp))
1da177e4 1393 err = -EFAULT;
8e87d142 1394 } else
1da177e4
LT
1395 err = -EFAULT;
1396
1397 kfree(buf);
1398
1399done:
1400 hci_dev_put(hdev);
1401 return err;
1402}
1403
7a0e5b15
MK
1404/**
1405 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1406 * (BD_ADDR) for a HCI device from
1407 * a firmware node property.
1408 * @hdev: The HCI device
1409 *
1410 * Search the firmware node for 'local-bd-address'.
1411 *
1412 * All-zero BD addresses are rejected, because those could be properties
1413 * that exist in the firmware tables, but were not updated by the firmware. For
1414 * example, the DTS could define 'local-bd-address', with zero BD addresses.
1415 */
1416static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1417{
1418 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1419 bdaddr_t ba;
1420 int ret;
1421
1422 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1423 (u8 *)&ba, sizeof(ba));
1424 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1425 return;
1426
1427 bacpy(&hdev->public_addr, &ba);
1428}
1429
cbed0ca1 1430static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1431{
1da177e4
LT
1432 int ret = 0;
1433
1da177e4
LT
1434 BT_DBG("%s %p", hdev->name, hdev);
1435
b504430c 1436 hci_req_sync_lock(hdev);
1da177e4 1437
d7a5a11d 1438 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1439 ret = -ENODEV;
1440 goto done;
1441 }
1442
d7a5a11d
MH
1443 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1444 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1445 /* Check for rfkill but allow the HCI setup stage to
1446 * proceed (which in itself doesn't cause any RF activity).
1447 */
d7a5a11d 1448 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1449 ret = -ERFKILL;
1450 goto done;
1451 }
1452
1453 /* Check for valid public address or a configured static
1454 * random adddress, but let the HCI setup proceed to
1455 * be able to determine if there is a public address
1456 * or not.
1457 *
c6beca0e
MH
1458 * In case of user channel usage, it is not important
1459 * if a public address or static random address is
1460 * available.
1461 *
a5c8f270
MH
1462 * This check is only valid for BR/EDR controllers
1463 * since AMP controllers do not have an address.
1464 */
d7a5a11d 1465 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
ca8bee5d 1466 hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
1467 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1468 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1469 ret = -EADDRNOTAVAIL;
1470 goto done;
1471 }
611b30f7
MH
1472 }
1473
1da177e4
LT
1474 if (test_bit(HCI_UP, &hdev->flags)) {
1475 ret = -EALREADY;
1476 goto done;
1477 }
1478
1da177e4
LT
1479 if (hdev->open(hdev)) {
1480 ret = -EIO;
1481 goto done;
1482 }
1483
e9ca8bf1 1484 set_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1485 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
4a3f95b7 1486
f41c70c4
MH
1487 atomic_set(&hdev->cmd_cnt, 1);
1488 set_bit(HCI_INIT, &hdev->flags);
1489
740011cf
SW
1490 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1491 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
7fdf6c6a
MH
1492 bool invalid_bdaddr;
1493
e131d74a
MH
1494 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1495
af202f84
MH
1496 if (hdev->setup)
1497 ret = hdev->setup(hdev);
f41c70c4 1498
7fdf6c6a
MH
1499 /* The transport driver can set the quirk to mark the
1500 * BD_ADDR invalid before creating the HCI device or in
1501 * its setup callback.
1502 */
1503 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1504 &hdev->quirks);
1505
7a0e5b15
MK
1506 if (ret)
1507 goto setup_failed;
1508
1509 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1510 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1511 hci_dev_get_bd_addr_from_property(hdev);
1512
1513 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
7fdf6c6a 1514 hdev->set_bdaddr) {
7a0e5b15
MK
1515 ret = hdev->set_bdaddr(hdev,
1516 &hdev->public_addr);
7fdf6c6a
MH
1517
1518 /* If setting of the BD_ADDR from the device
1519 * property succeeds, then treat the address
1520 * as valid even if the invalid BD_ADDR
1521 * quirk indicates otherwise.
1522 */
1523 if (!ret)
1524 invalid_bdaddr = false;
1525 }
7a0e5b15
MK
1526 }
1527
1528setup_failed:
af202f84
MH
1529 /* The transport driver can set these quirks before
1530 * creating the HCI device or in its setup callback.
1531 *
7fdf6c6a
MH
1532 * For the invalid BD_ADDR quirk it is possible that
1533 * it becomes a valid address if the bootloader does
1534 * provide it (see above).
1535 *
af202f84
MH
1536 * In case any of them is set, the controller has to
1537 * start up as unconfigured.
1538 */
eb1904f4 1539 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
7fdf6c6a 1540 invalid_bdaddr)
a1536da2 1541 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1542
0ebca7d6
MH
1543 /* For an unconfigured controller it is required to
1544 * read at least the version information provided by
1545 * the Read Local Version Information command.
1546 *
1547 * If the set_bdaddr driver callback is provided, then
1548 * also the original Bluetooth public device address
1549 * will be read using the Read BD Address command.
1550 */
d7a5a11d 1551 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1552 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1553 }
1554
d7a5a11d 1555 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1556 /* If public address change is configured, ensure that
1557 * the address gets programmed. If the driver does not
1558 * support changing the public address, fail the power
1559 * on procedure.
1560 */
1561 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1562 hdev->set_bdaddr)
24c457e2
MH
1563 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1564 else
1565 ret = -EADDRNOTAVAIL;
1566 }
1567
f41c70c4 1568 if (!ret) {
d7a5a11d 1569 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
98a63aaf 1570 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
f41c70c4 1571 ret = __hci_init(hdev);
98a63aaf
MH
1572 if (!ret && hdev->post_init)
1573 ret = hdev->post_init(hdev);
1574 }
1da177e4
LT
1575 }
1576
7e995b9e
MH
1577 /* If the HCI Reset command is clearing all diagnostic settings,
1578 * then they need to be reprogrammed after the init procedure
1579 * completed.
1580 */
1581 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
b56c7b25 1582 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
7e995b9e
MH
1583 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1584 ret = hdev->set_diag(hdev, true);
1585
145373cb
MC
1586 msft_do_open(hdev);
1587
f41c70c4
MH
1588 clear_bit(HCI_INIT, &hdev->flags);
1589
1da177e4
LT
1590 if (!ret) {
1591 hci_dev_hold(hdev);
a1536da2 1592 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
a73c046a 1593 hci_adv_instances_set_rpa_expired(hdev, true);
1da177e4 1594 set_bit(HCI_UP, &hdev->flags);
05fcd4c4 1595 hci_sock_dev_event(hdev, HCI_DEV_UP);
6d5d2ee6 1596 hci_leds_update_powered(hdev, true);
d7a5a11d
MH
1597 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1598 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1599 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1600 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
2ff13894 1601 hci_dev_test_flag(hdev, HCI_MGMT) &&
ca8bee5d 1602 hdev->dev_type == HCI_PRIMARY) {
2ff13894
JH
1603 ret = __hci_req_hci_power_on(hdev);
1604 mgmt_power_on(hdev, ret);
56e5cb86 1605 }
8e87d142 1606 } else {
1da177e4 1607 /* Init failed, cleanup */
3eff45ea 1608 flush_work(&hdev->tx_work);
c347b765 1609 flush_work(&hdev->cmd_work);
b78752cc 1610 flush_work(&hdev->rx_work);
1da177e4
LT
1611
1612 skb_queue_purge(&hdev->cmd_q);
1613 skb_queue_purge(&hdev->rx_q);
1614
1615 if (hdev->flush)
1616 hdev->flush(hdev);
1617
1618 if (hdev->sent_cmd) {
1619 kfree_skb(hdev->sent_cmd);
1620 hdev->sent_cmd = NULL;
1621 }
1622
e9ca8bf1 1623 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1624 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1625
1da177e4 1626 hdev->close(hdev);
fee746b0 1627 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1628 }
1629
1630done:
b504430c 1631 hci_req_sync_unlock(hdev);
1da177e4
LT
1632 return ret;
1633}
1634
cbed0ca1
JH
1635/* ---- HCI ioctl helpers ---- */
1636
1637int hci_dev_open(__u16 dev)
1638{
1639 struct hci_dev *hdev;
1640 int err;
1641
1642 hdev = hci_dev_get(dev);
1643 if (!hdev)
1644 return -ENODEV;
1645
4a964404 1646 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1647 * up as user channel. Trying to bring them up as normal devices
1648 * will result into a failure. Only user channel operation is
1649 * possible.
1650 *
1651 * When this function is called for a user channel, the flag
1652 * HCI_USER_CHANNEL will be set first before attempting to
1653 * open the device.
1654 */
d7a5a11d
MH
1655 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1656 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1657 err = -EOPNOTSUPP;
1658 goto done;
1659 }
1660
e1d08f40
JH
1661 /* We need to ensure that no other power on/off work is pending
1662 * before proceeding to call hci_dev_do_open. This is
1663 * particularly important if the setup procedure has not yet
1664 * completed.
1665 */
a69d8927 1666 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1667 cancel_delayed_work(&hdev->power_off);
1668
a5c8f270
MH
1669 /* After this call it is guaranteed that the setup procedure
1670 * has finished. This means that error conditions like RFKILL
1671 * or no valid public or static random address apply.
1672 */
e1d08f40
JH
1673 flush_workqueue(hdev->req_workqueue);
1674
12aa4f0a 1675 /* For controllers not using the management interface and that
b6ae8457 1676 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1677 * so that pairing works for them. Once the management interface
1678 * is in use this bit will be cleared again and userspace has
1679 * to explicitly enable it.
1680 */
d7a5a11d
MH
1681 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1682 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1683 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1684
cbed0ca1
JH
1685 err = hci_dev_do_open(hdev);
1686
fee746b0 1687done:
cbed0ca1 1688 hci_dev_put(hdev);
cbed0ca1
JH
1689 return err;
1690}
1691
d7347f3c
JH
1692/* This function requires the caller holds hdev->lock */
1693static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1694{
1695 struct hci_conn_params *p;
1696
f161dd41
JH
1697 list_for_each_entry(p, &hdev->le_conn_params, list) {
1698 if (p->conn) {
1699 hci_conn_drop(p->conn);
f8aaf9b6 1700 hci_conn_put(p->conn);
f161dd41
JH
1701 p->conn = NULL;
1702 }
d7347f3c 1703 list_del_init(&p->action);
f161dd41 1704 }
d7347f3c
JH
1705
1706 BT_DBG("All LE pending actions cleared");
1707}
1708
6b3cc1db 1709int hci_dev_do_close(struct hci_dev *hdev)
1da177e4 1710{
acc649c6
MH
1711 bool auto_off;
1712
1da177e4
LT
1713 BT_DBG("%s %p", hdev->name, hdev);
1714
d24d8144 1715 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1716 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1717 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1718 /* Execute vendor specific shutdown routine */
1719 if (hdev->shutdown)
1720 hdev->shutdown(hdev);
1721 }
1722
78c04c0b
VCG
1723 cancel_delayed_work(&hdev->power_off);
1724
7df0f73e 1725 hci_request_cancel_all(hdev);
b504430c 1726 hci_req_sync_lock(hdev);
1da177e4
LT
1727
1728 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1729 cancel_delayed_work_sync(&hdev->cmd_timer);
b504430c 1730 hci_req_sync_unlock(hdev);
1da177e4
LT
1731 return 0;
1732 }
1733
6d5d2ee6
HK
1734 hci_leds_update_powered(hdev, false);
1735
3eff45ea
GP
1736 /* Flush RX and TX works */
1737 flush_work(&hdev->tx_work);
b78752cc 1738 flush_work(&hdev->rx_work);
1da177e4 1739
16ab91ab 1740 if (hdev->discov_timeout > 0) {
16ab91ab 1741 hdev->discov_timeout = 0;
a358dc11
MH
1742 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1743 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1744 }
1745
a69d8927 1746 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1747 cancel_delayed_work(&hdev->service_cache);
1748
a73c046a
JK
1749 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1750 struct adv_info *adv_instance;
1751
4518bb0f 1752 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1753
a73c046a
JK
1754 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1755 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1756 }
1757
76727c02
JH
1758 /* Avoid potential lockdep warnings from the *_flush() calls by
1759 * ensuring the workqueue is empty up front.
1760 */
1761 drain_workqueue(hdev->workqueue);
1762
09fd0de5 1763 hci_dev_lock(hdev);
1aeb9c65 1764
8f502f84
JH
1765 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1766
acc649c6
MH
1767 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1768
ca8bee5d 1769 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
baab7932 1770 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
2ff13894
JH
1771 hci_dev_test_flag(hdev, HCI_MGMT))
1772 __mgmt_power_off(hdev);
1aeb9c65 1773
1f9b9a5d 1774 hci_inquiry_cache_flush(hdev);
d7347f3c 1775 hci_pend_le_actions_clear(hdev);
f161dd41 1776 hci_conn_hash_flush(hdev);
09fd0de5 1777 hci_dev_unlock(hdev);
1da177e4 1778
64dae967
MH
1779 smp_unregister(hdev);
1780
05fcd4c4 1781 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1da177e4 1782
145373cb
MC
1783 msft_do_close(hdev);
1784
1da177e4
LT
1785 if (hdev->flush)
1786 hdev->flush(hdev);
1787
1788 /* Reset device */
1789 skb_queue_purge(&hdev->cmd_q);
1790 atomic_set(&hdev->cmd_cnt, 1);
acc649c6
MH
1791 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1792 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4 1793 set_bit(HCI_INIT, &hdev->flags);
4ebeee2d 1794 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1da177e4
LT
1795 clear_bit(HCI_INIT, &hdev->flags);
1796 }
1797
c347b765
GP
1798 /* flush cmd work */
1799 flush_work(&hdev->cmd_work);
1da177e4
LT
1800
1801 /* Drop queues */
1802 skb_queue_purge(&hdev->rx_q);
1803 skb_queue_purge(&hdev->cmd_q);
1804 skb_queue_purge(&hdev->raw_q);
1805
1806 /* Drop last sent command */
1807 if (hdev->sent_cmd) {
65cc2b49 1808 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1809 kfree_skb(hdev->sent_cmd);
1810 hdev->sent_cmd = NULL;
1811 }
1812
e9ca8bf1 1813 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1814 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1815
9952d90e
APS
1816 if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1817 wake_up(&hdev->suspend_wait_q);
1818
1da177e4
LT
1819 /* After this point our queues are empty
1820 * and no tasks are scheduled. */
1821 hdev->close(hdev);
1822
35b973c9 1823 /* Clear flags */
fee746b0 1824 hdev->flags &= BIT(HCI_RAW);
eacb44df 1825 hci_dev_clear_volatile_flags(hdev);
35b973c9 1826
ced5c338 1827 /* Controller radio is available but is currently powered down */
536619e8 1828 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1829
e59fda8d 1830 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1831 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1832 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1833
b504430c 1834 hci_req_sync_unlock(hdev);
1da177e4
LT
1835
1836 hci_dev_put(hdev);
1837 return 0;
1838}
1839
1840int hci_dev_close(__u16 dev)
1841{
1842 struct hci_dev *hdev;
1843 int err;
1844
70f23020
AE
1845 hdev = hci_dev_get(dev);
1846 if (!hdev)
1da177e4 1847 return -ENODEV;
8ee56540 1848
d7a5a11d 1849 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1850 err = -EBUSY;
1851 goto done;
1852 }
1853
a69d8927 1854 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1855 cancel_delayed_work(&hdev->power_off);
1856
1da177e4 1857 err = hci_dev_do_close(hdev);
8ee56540 1858
0736cfa8 1859done:
1da177e4
LT
1860 hci_dev_put(hdev);
1861 return err;
1862}
1863
5c912495 1864static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1865{
5c912495 1866 int ret;
1da177e4 1867
5c912495 1868 BT_DBG("%s %p", hdev->name, hdev);
1da177e4 1869
b504430c 1870 hci_req_sync_lock(hdev);
1da177e4 1871
1da177e4
LT
1872 /* Drop queues */
1873 skb_queue_purge(&hdev->rx_q);
1874 skb_queue_purge(&hdev->cmd_q);
1875
76727c02
JH
1876 /* Avoid potential lockdep warnings from the *_flush() calls by
1877 * ensuring the workqueue is empty up front.
1878 */
1879 drain_workqueue(hdev->workqueue);
1880
09fd0de5 1881 hci_dev_lock(hdev);
1f9b9a5d 1882 hci_inquiry_cache_flush(hdev);
1da177e4 1883 hci_conn_hash_flush(hdev);
09fd0de5 1884 hci_dev_unlock(hdev);
1da177e4
LT
1885
1886 if (hdev->flush)
1887 hdev->flush(hdev);
1888
8e87d142 1889 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1890 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1891
4ebeee2d 1892 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1da177e4 1893
b504430c 1894 hci_req_sync_unlock(hdev);
1da177e4
LT
1895 return ret;
1896}
1897
5c912495
MH
1898int hci_dev_reset(__u16 dev)
1899{
1900 struct hci_dev *hdev;
1901 int err;
1902
1903 hdev = hci_dev_get(dev);
1904 if (!hdev)
1905 return -ENODEV;
1906
1907 if (!test_bit(HCI_UP, &hdev->flags)) {
1908 err = -ENETDOWN;
1909 goto done;
1910 }
1911
d7a5a11d 1912 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1913 err = -EBUSY;
1914 goto done;
1915 }
1916
d7a5a11d 1917 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1918 err = -EOPNOTSUPP;
1919 goto done;
1920 }
1921
1922 err = hci_dev_do_reset(hdev);
1923
1924done:
1925 hci_dev_put(hdev);
1926 return err;
1927}
1928
1da177e4
LT
1929int hci_dev_reset_stat(__u16 dev)
1930{
1931 struct hci_dev *hdev;
1932 int ret = 0;
1933
70f23020
AE
1934 hdev = hci_dev_get(dev);
1935 if (!hdev)
1da177e4
LT
1936 return -ENODEV;
1937
d7a5a11d 1938 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1939 ret = -EBUSY;
1940 goto done;
1941 }
1942
d7a5a11d 1943 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1944 ret = -EOPNOTSUPP;
1945 goto done;
1946 }
1947
1da177e4
LT
1948 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1949
0736cfa8 1950done:
1da177e4 1951 hci_dev_put(hdev);
1da177e4
LT
1952 return ret;
1953}
1954
123abc08
JH
1955static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1956{
bc6d2d04 1957 bool conn_changed, discov_changed;
123abc08
JH
1958
1959 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1960
1961 if ((scan & SCAN_PAGE))
238be788
MH
1962 conn_changed = !hci_dev_test_and_set_flag(hdev,
1963 HCI_CONNECTABLE);
123abc08 1964 else
a69d8927
MH
1965 conn_changed = hci_dev_test_and_clear_flag(hdev,
1966 HCI_CONNECTABLE);
123abc08 1967
bc6d2d04 1968 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1969 discov_changed = !hci_dev_test_and_set_flag(hdev,
1970 HCI_DISCOVERABLE);
bc6d2d04 1971 } else {
a358dc11 1972 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1973 discov_changed = hci_dev_test_and_clear_flag(hdev,
1974 HCI_DISCOVERABLE);
bc6d2d04
JH
1975 }
1976
d7a5a11d 1977 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1978 return;
1979
bc6d2d04
JH
1980 if (conn_changed || discov_changed) {
1981 /* In case this was disabled through mgmt */
a1536da2 1982 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1983
d7a5a11d 1984 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
cab054ab 1985 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
bc6d2d04 1986
123abc08 1987 mgmt_new_settings(hdev);
bc6d2d04 1988 }
123abc08
JH
1989}
1990
1da177e4
LT
1991int hci_dev_cmd(unsigned int cmd, void __user *arg)
1992{
1993 struct hci_dev *hdev;
1994 struct hci_dev_req dr;
1995 int err = 0;
1996
1997 if (copy_from_user(&dr, arg, sizeof(dr)))
1998 return -EFAULT;
1999
70f23020
AE
2000 hdev = hci_dev_get(dr.dev_id);
2001 if (!hdev)
1da177e4
LT
2002 return -ENODEV;
2003
d7a5a11d 2004 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
2005 err = -EBUSY;
2006 goto done;
2007 }
2008
d7a5a11d 2009 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
2010 err = -EOPNOTSUPP;
2011 goto done;
2012 }
2013
ca8bee5d 2014 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
2015 err = -EOPNOTSUPP;
2016 goto done;
2017 }
2018
d7a5a11d 2019 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
2020 err = -EOPNOTSUPP;
2021 goto done;
2022 }
2023
1da177e4
LT
2024 switch (cmd) {
2025 case HCISETAUTH:
01178cd4 2026 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 2027 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2028 break;
2029
2030 case HCISETENCRYPT:
2031 if (!lmp_encrypt_capable(hdev)) {
2032 err = -EOPNOTSUPP;
2033 break;
2034 }
2035
2036 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2037 /* Auth must be enabled first */
01178cd4 2038 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 2039 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2040 if (err)
2041 break;
2042 }
2043
01178cd4 2044 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
4ebeee2d 2045 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2046 break;
2047
2048 case HCISETSCAN:
01178cd4 2049 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
4ebeee2d 2050 HCI_INIT_TIMEOUT, NULL);
91a668b0 2051
bc6d2d04
JH
2052 /* Ensure that the connectable and discoverable states
2053 * get correctly modified as this was a non-mgmt change.
91a668b0 2054 */
123abc08
JH
2055 if (!err)
2056 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
2057 break;
2058
1da177e4 2059 case HCISETLINKPOL:
01178cd4 2060 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
4ebeee2d 2061 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2062 break;
2063
2064 case HCISETLINKMODE:
e4e8e37c
MH
2065 hdev->link_mode = ((__u16) dr.dev_opt) &
2066 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2067 break;
2068
2069 case HCISETPTYPE:
b7c23df8
JK
2070 if (hdev->pkt_type == (__u16) dr.dev_opt)
2071 break;
2072
e4e8e37c 2073 hdev->pkt_type = (__u16) dr.dev_opt;
b7c23df8 2074 mgmt_phy_configuration_changed(hdev, NULL);
1da177e4
LT
2075 break;
2076
2077 case HCISETACLMTU:
e4e8e37c
MH
2078 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2079 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2080 break;
2081
2082 case HCISETSCOMTU:
e4e8e37c
MH
2083 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2084 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2085 break;
2086
2087 default:
2088 err = -EINVAL;
2089 break;
2090 }
e4e8e37c 2091
0736cfa8 2092done:
1da177e4
LT
2093 hci_dev_put(hdev);
2094 return err;
2095}
2096
2097int hci_get_dev_list(void __user *arg)
2098{
8035ded4 2099 struct hci_dev *hdev;
1da177e4
LT
2100 struct hci_dev_list_req *dl;
2101 struct hci_dev_req *dr;
1da177e4
LT
2102 int n = 0, size, err;
2103 __u16 dev_num;
2104
2105 if (get_user(dev_num, (__u16 __user *) arg))
2106 return -EFAULT;
2107
2108 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2109 return -EINVAL;
2110
2111 size = sizeof(*dl) + dev_num * sizeof(*dr);
2112
70f23020
AE
2113 dl = kzalloc(size, GFP_KERNEL);
2114 if (!dl)
1da177e4
LT
2115 return -ENOMEM;
2116
2117 dr = dl->dev_req;
2118
f20d09d5 2119 read_lock(&hci_dev_list_lock);
8035ded4 2120 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2121 unsigned long flags = hdev->flags;
c542a06c 2122
2e84d8db
MH
2123 /* When the auto-off is configured it means the transport
2124 * is running, but in that case still indicate that the
2125 * device is actually down.
2126 */
d7a5a11d 2127 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 2128 flags &= ~BIT(HCI_UP);
c542a06c 2129
1da177e4 2130 (dr + n)->dev_id = hdev->id;
2e84d8db 2131 (dr + n)->dev_opt = flags;
c542a06c 2132
1da177e4
LT
2133 if (++n >= dev_num)
2134 break;
2135 }
f20d09d5 2136 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2137
2138 dl->dev_num = n;
2139 size = sizeof(*dl) + n * sizeof(*dr);
2140
2141 err = copy_to_user(arg, dl, size);
2142 kfree(dl);
2143
2144 return err ? -EFAULT : 0;
2145}
2146
2147int hci_get_dev_info(void __user *arg)
2148{
2149 struct hci_dev *hdev;
2150 struct hci_dev_info di;
2e84d8db 2151 unsigned long flags;
1da177e4
LT
2152 int err = 0;
2153
2154 if (copy_from_user(&di, arg, sizeof(di)))
2155 return -EFAULT;
2156
70f23020
AE
2157 hdev = hci_dev_get(di.dev_id);
2158 if (!hdev)
1da177e4
LT
2159 return -ENODEV;
2160
2e84d8db
MH
2161 /* When the auto-off is configured it means the transport
2162 * is running, but in that case still indicate that the
2163 * device is actually down.
2164 */
d7a5a11d 2165 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
2166 flags = hdev->flags & ~BIT(HCI_UP);
2167 else
2168 flags = hdev->flags;
c542a06c 2169
1da177e4
LT
2170 strcpy(di.name, hdev->name);
2171 di.bdaddr = hdev->bdaddr;
60f2a3ed 2172 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2173 di.flags = flags;
1da177e4 2174 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2175 if (lmp_bredr_capable(hdev)) {
2176 di.acl_mtu = hdev->acl_mtu;
2177 di.acl_pkts = hdev->acl_pkts;
2178 di.sco_mtu = hdev->sco_mtu;
2179 di.sco_pkts = hdev->sco_pkts;
2180 } else {
2181 di.acl_mtu = hdev->le_mtu;
2182 di.acl_pkts = hdev->le_pkts;
2183 di.sco_mtu = 0;
2184 di.sco_pkts = 0;
2185 }
1da177e4
LT
2186 di.link_policy = hdev->link_policy;
2187 di.link_mode = hdev->link_mode;
2188
2189 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2190 memcpy(&di.features, &hdev->features, sizeof(di.features));
2191
2192 if (copy_to_user(arg, &di, sizeof(di)))
2193 err = -EFAULT;
2194
2195 hci_dev_put(hdev);
2196
2197 return err;
2198}
2199
2200/* ---- Interface to HCI drivers ---- */
2201
611b30f7
MH
2202static int hci_rfkill_set_block(void *data, bool blocked)
2203{
2204 struct hci_dev *hdev = data;
2205
2206 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2207
d7a5a11d 2208 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2209 return -EBUSY;
2210
5e130367 2211 if (blocked) {
a1536da2 2212 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2213 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2214 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2215 hci_dev_do_close(hdev);
5e130367 2216 } else {
a358dc11 2217 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2218 }
611b30f7
MH
2219
2220 return 0;
2221}
2222
2223static const struct rfkill_ops hci_rfkill_ops = {
2224 .set_block = hci_rfkill_set_block,
2225};
2226
ab81cbf9
JH
2227static void hci_power_on(struct work_struct *work)
2228{
2229 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2230 int err;
ab81cbf9
JH
2231
2232 BT_DBG("%s", hdev->name);
2233
2ff13894
JH
2234 if (test_bit(HCI_UP, &hdev->flags) &&
2235 hci_dev_test_flag(hdev, HCI_MGMT) &&
2236 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
d82142a8 2237 cancel_delayed_work(&hdev->power_off);
2ff13894
JH
2238 hci_req_sync_lock(hdev);
2239 err = __hci_req_hci_power_on(hdev);
2240 hci_req_sync_unlock(hdev);
2241 mgmt_power_on(hdev, err);
2242 return;
2243 }
2244
cbed0ca1 2245 err = hci_dev_do_open(hdev);
96570ffc 2246 if (err < 0) {
3ad67582 2247 hci_dev_lock(hdev);
96570ffc 2248 mgmt_set_powered_failed(hdev, err);
3ad67582 2249 hci_dev_unlock(hdev);
ab81cbf9 2250 return;
96570ffc 2251 }
ab81cbf9 2252
a5c8f270
MH
2253 /* During the HCI setup phase, a few error conditions are
2254 * ignored and they need to be checked now. If they are still
2255 * valid, it is important to turn the device back off.
2256 */
d7a5a11d
MH
2257 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2258 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
ca8bee5d 2259 (hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
2260 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2261 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2262 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2263 hci_dev_do_close(hdev);
d7a5a11d 2264 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2265 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2266 HCI_AUTO_OFF_TIMEOUT);
bf543036 2267 }
ab81cbf9 2268
a69d8927 2269 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2270 /* For unconfigured devices, set the HCI_RAW flag
2271 * so that userspace can easily identify them.
4a964404 2272 */
d7a5a11d 2273 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2274 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2275
2276 /* For fully configured devices, this will send
2277 * the Index Added event. For unconfigured devices,
2278 * it will send Unconfigued Index Added event.
2279 *
2280 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2281 * and no event will be send.
2282 */
2283 mgmt_index_added(hdev);
a69d8927 2284 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2285 /* When the controller is now configured, then it
2286 * is important to clear the HCI_RAW flag.
2287 */
d7a5a11d 2288 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2289 clear_bit(HCI_RAW, &hdev->flags);
2290
d603b76b
MH
2291 /* Powering on the controller with HCI_CONFIG set only
2292 * happens with the transition from unconfigured to
2293 * configured. This will send the Index Added event.
2294 */
744cf19e 2295 mgmt_index_added(hdev);
fee746b0 2296 }
ab81cbf9
JH
2297}
2298
2299static void hci_power_off(struct work_struct *work)
2300{
3243553f 2301 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2302 power_off.work);
ab81cbf9
JH
2303
2304 BT_DBG("%s", hdev->name);
2305
8ee56540 2306 hci_dev_do_close(hdev);
ab81cbf9
JH
2307}
2308
c7741d16
MH
2309static void hci_error_reset(struct work_struct *work)
2310{
2311 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2312
2313 BT_DBG("%s", hdev->name);
2314
2315 if (hdev->hw_error)
2316 hdev->hw_error(hdev, hdev->hw_error_code);
2317 else
2064ee33 2318 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
c7741d16
MH
2319
2320 if (hci_dev_do_close(hdev))
2321 return;
2322
c7741d16
MH
2323 hci_dev_do_open(hdev);
2324}
2325
35f7498a 2326void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2327{
4821002c 2328 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2329
4821002c
JH
2330 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2331 list_del(&uuid->list);
2aeb9a1a
JH
2332 kfree(uuid);
2333 }
2aeb9a1a
JH
2334}
2335
35f7498a 2336void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2337{
0378b597 2338 struct link_key *key;
55ed8ca1 2339
d7d41682 2340 list_for_each_entry(key, &hdev->link_keys, list) {
0378b597
JH
2341 list_del_rcu(&key->list);
2342 kfree_rcu(key, rcu);
55ed8ca1 2343 }
55ed8ca1
JH
2344}
2345
35f7498a 2346void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2347{
970d0f1b 2348 struct smp_ltk *k;
b899efaf 2349
d7d41682 2350 list_for_each_entry(k, &hdev->long_term_keys, list) {
970d0f1b
JH
2351 list_del_rcu(&k->list);
2352 kfree_rcu(k, rcu);
b899efaf 2353 }
b899efaf
VCG
2354}
2355
970c4e46
JH
2356void hci_smp_irks_clear(struct hci_dev *hdev)
2357{
adae20cb 2358 struct smp_irk *k;
970c4e46 2359
d7d41682 2360 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
adae20cb
JH
2361 list_del_rcu(&k->list);
2362 kfree_rcu(k, rcu);
970c4e46
JH
2363 }
2364}
2365
600a8749
AM
2366void hci_blocked_keys_clear(struct hci_dev *hdev)
2367{
2368 struct blocked_key *b;
2369
d7d41682 2370 list_for_each_entry(b, &hdev->blocked_keys, list) {
600a8749
AM
2371 list_del_rcu(&b->list);
2372 kfree_rcu(b, rcu);
2373 }
2374}
2375
2376bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2377{
2378 bool blocked = false;
2379 struct blocked_key *b;
2380
2381 rcu_read_lock();
0c2ac7d4 2382 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
600a8749
AM
2383 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2384 blocked = true;
2385 break;
2386 }
2387 }
2388
2389 rcu_read_unlock();
2390 return blocked;
2391}
2392
55ed8ca1
JH
2393struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2394{
8035ded4 2395 struct link_key *k;
55ed8ca1 2396
0378b597
JH
2397 rcu_read_lock();
2398 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2399 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2400 rcu_read_unlock();
600a8749
AM
2401
2402 if (hci_is_blocked_key(hdev,
2403 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2404 k->val)) {
2405 bt_dev_warn_ratelimited(hdev,
2406 "Link key blocked for %pMR",
2407 &k->bdaddr);
2408 return NULL;
2409 }
2410
55ed8ca1 2411 return k;
0378b597
JH
2412 }
2413 }
2414 rcu_read_unlock();
55ed8ca1
JH
2415
2416 return NULL;
2417}
2418
745c0ce3 2419static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2420 u8 key_type, u8 old_key_type)
d25e28ab
JH
2421{
2422 /* Legacy key */
2423 if (key_type < 0x03)
745c0ce3 2424 return true;
d25e28ab
JH
2425
2426 /* Debug keys are insecure so don't store them persistently */
2427 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2428 return false;
d25e28ab
JH
2429
2430 /* Changed combination key and there's no previous one */
2431 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2432 return false;
d25e28ab
JH
2433
2434 /* Security mode 3 case */
2435 if (!conn)
745c0ce3 2436 return true;
d25e28ab 2437
e3befab9
JH
2438 /* BR/EDR key derived using SC from an LE link */
2439 if (conn->type == LE_LINK)
2440 return true;
2441
d25e28ab
JH
2442 /* Neither local nor remote side had no-bonding as requirement */
2443 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2444 return true;
d25e28ab
JH
2445
2446 /* Local side had dedicated bonding as requirement */
2447 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2448 return true;
d25e28ab
JH
2449
2450 /* Remote side had dedicated bonding as requirement */
2451 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2452 return true;
d25e28ab
JH
2453
2454 /* If none of the above criteria match, then don't store the key
2455 * persistently */
745c0ce3 2456 return false;
d25e28ab
JH
2457}
2458
e804d25d 2459static u8 ltk_role(u8 type)
98a0b845 2460{
e804d25d
JH
2461 if (type == SMP_LTK)
2462 return HCI_ROLE_MASTER;
98a0b845 2463
e804d25d 2464 return HCI_ROLE_SLAVE;
98a0b845
JH
2465}
2466
f3a73d97
JH
2467struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2468 u8 addr_type, u8 role)
75d262c2 2469{
c9839a11 2470 struct smp_ltk *k;
75d262c2 2471
970d0f1b
JH
2472 rcu_read_lock();
2473 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2474 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2475 continue;
2476
923e2414 2477 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2478 rcu_read_unlock();
600a8749
AM
2479
2480 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2481 k->val)) {
2482 bt_dev_warn_ratelimited(hdev,
2483 "LTK blocked for %pMR",
2484 &k->bdaddr);
2485 return NULL;
2486 }
2487
75d262c2 2488 return k;
970d0f1b
JH
2489 }
2490 }
2491 rcu_read_unlock();
75d262c2
VCG
2492
2493 return NULL;
2494}
75d262c2 2495
970c4e46
JH
2496struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2497{
600a8749 2498 struct smp_irk *irk_to_return = NULL;
970c4e46
JH
2499 struct smp_irk *irk;
2500
adae20cb
JH
2501 rcu_read_lock();
2502 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2503 if (!bacmp(&irk->rpa, rpa)) {
600a8749
AM
2504 irk_to_return = irk;
2505 goto done;
adae20cb 2506 }
970c4e46
JH
2507 }
2508
adae20cb 2509 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2510 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2511 bacpy(&irk->rpa, rpa);
600a8749
AM
2512 irk_to_return = irk;
2513 goto done;
970c4e46
JH
2514 }
2515 }
600a8749
AM
2516
2517done:
2518 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2519 irk_to_return->val)) {
2520 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2521 &irk_to_return->bdaddr);
2522 irk_to_return = NULL;
2523 }
2524
adae20cb 2525 rcu_read_unlock();
970c4e46 2526
600a8749 2527 return irk_to_return;
970c4e46
JH
2528}
2529
2530struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2531 u8 addr_type)
2532{
600a8749 2533 struct smp_irk *irk_to_return = NULL;
970c4e46
JH
2534 struct smp_irk *irk;
2535
6cfc9988
JH
2536 /* Identity Address must be public or static random */
2537 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2538 return NULL;
2539
adae20cb
JH
2540 rcu_read_lock();
2541 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2542 if (addr_type == irk->addr_type &&
adae20cb 2543 bacmp(bdaddr, &irk->bdaddr) == 0) {
600a8749
AM
2544 irk_to_return = irk;
2545 goto done;
adae20cb 2546 }
970c4e46 2547 }
600a8749
AM
2548
2549done:
2550
2551 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2552 irk_to_return->val)) {
2553 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2554 &irk_to_return->bdaddr);
2555 irk_to_return = NULL;
2556 }
2557
adae20cb 2558 rcu_read_unlock();
970c4e46 2559
600a8749 2560 return irk_to_return;
970c4e46
JH
2561}
2562
567fa2aa 2563struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2564 bdaddr_t *bdaddr, u8 *val, u8 type,
2565 u8 pin_len, bool *persistent)
55ed8ca1
JH
2566{
2567 struct link_key *key, *old_key;
745c0ce3 2568 u8 old_key_type;
55ed8ca1
JH
2569
2570 old_key = hci_find_link_key(hdev, bdaddr);
2571 if (old_key) {
2572 old_key_type = old_key->type;
2573 key = old_key;
2574 } else {
12adcf3a 2575 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2576 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2577 if (!key)
567fa2aa 2578 return NULL;
0378b597 2579 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2580 }
2581
6ed93dc6 2582 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2583
d25e28ab
JH
2584 /* Some buggy controller combinations generate a changed
2585 * combination key for legacy pairing even when there's no
2586 * previous key */
2587 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2588 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2589 type = HCI_LK_COMBINATION;
655fe6ec
JH
2590 if (conn)
2591 conn->key_type = type;
2592 }
d25e28ab 2593
55ed8ca1 2594 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2595 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2596 key->pin_len = pin_len;
2597
b6020ba0 2598 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2599 key->type = old_key_type;
4748fed2
JH
2600 else
2601 key->type = type;
2602
7652ff6a
JH
2603 if (persistent)
2604 *persistent = hci_persistent_key(hdev, conn, type,
2605 old_key_type);
4df378a1 2606
567fa2aa 2607 return key;
55ed8ca1
JH
2608}
2609
ca9142b8 2610struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2611 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2612 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2613{
c9839a11 2614 struct smp_ltk *key, *old_key;
e804d25d 2615 u8 role = ltk_role(type);
75d262c2 2616
f3a73d97 2617 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2618 if (old_key)
75d262c2 2619 key = old_key;
c9839a11 2620 else {
0a14ab41 2621 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2622 if (!key)
ca9142b8 2623 return NULL;
970d0f1b 2624 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2625 }
2626
75d262c2 2627 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2628 key->bdaddr_type = addr_type;
2629 memcpy(key->val, tk, sizeof(key->val));
2630 key->authenticated = authenticated;
2631 key->ediv = ediv;
fe39c7b2 2632 key->rand = rand;
c9839a11
VCG
2633 key->enc_size = enc_size;
2634 key->type = type;
75d262c2 2635
ca9142b8 2636 return key;
75d262c2
VCG
2637}
2638
ca9142b8
JH
2639struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2640 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2641{
2642 struct smp_irk *irk;
2643
2644 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2645 if (!irk) {
2646 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2647 if (!irk)
ca9142b8 2648 return NULL;
970c4e46
JH
2649
2650 bacpy(&irk->bdaddr, bdaddr);
2651 irk->addr_type = addr_type;
2652
adae20cb 2653 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2654 }
2655
2656 memcpy(irk->val, val, 16);
2657 bacpy(&irk->rpa, rpa);
2658
ca9142b8 2659 return irk;
970c4e46
JH
2660}
2661
55ed8ca1
JH
2662int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2663{
2664 struct link_key *key;
2665
2666 key = hci_find_link_key(hdev, bdaddr);
2667 if (!key)
2668 return -ENOENT;
2669
6ed93dc6 2670 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2671
0378b597
JH
2672 list_del_rcu(&key->list);
2673 kfree_rcu(key, rcu);
55ed8ca1
JH
2674
2675 return 0;
2676}
2677
e0b2b27e 2678int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2679{
970d0f1b 2680 struct smp_ltk *k;
c51ffa0b 2681 int removed = 0;
b899efaf 2682
970d0f1b 2683 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2684 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2685 continue;
2686
6ed93dc6 2687 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2688
970d0f1b
JH
2689 list_del_rcu(&k->list);
2690 kfree_rcu(k, rcu);
c51ffa0b 2691 removed++;
b899efaf
VCG
2692 }
2693
c51ffa0b 2694 return removed ? 0 : -ENOENT;
b899efaf
VCG
2695}
2696
a7ec7338
JH
2697void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2698{
adae20cb 2699 struct smp_irk *k;
a7ec7338 2700
adae20cb 2701 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2702 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2703 continue;
2704
2705 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2706
adae20cb
JH
2707 list_del_rcu(&k->list);
2708 kfree_rcu(k, rcu);
a7ec7338
JH
2709 }
2710}
2711
55e76b38
JH
2712bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2713{
2714 struct smp_ltk *k;
4ba9faf3 2715 struct smp_irk *irk;
55e76b38
JH
2716 u8 addr_type;
2717
2718 if (type == BDADDR_BREDR) {
2719 if (hci_find_link_key(hdev, bdaddr))
2720 return true;
2721 return false;
2722 }
2723
2724 /* Convert to HCI addr type which struct smp_ltk uses */
2725 if (type == BDADDR_LE_PUBLIC)
2726 addr_type = ADDR_LE_DEV_PUBLIC;
2727 else
2728 addr_type = ADDR_LE_DEV_RANDOM;
2729
4ba9faf3
JH
2730 irk = hci_get_irk(hdev, bdaddr, addr_type);
2731 if (irk) {
2732 bdaddr = &irk->bdaddr;
2733 addr_type = irk->addr_type;
2734 }
2735
55e76b38
JH
2736 rcu_read_lock();
2737 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2738 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2739 rcu_read_unlock();
55e76b38 2740 return true;
87c8b28d 2741 }
55e76b38
JH
2742 }
2743 rcu_read_unlock();
2744
2745 return false;
2746}
2747
6bd32326 2748/* HCI command timer function */
65cc2b49 2749static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2750{
65cc2b49
MH
2751 struct hci_dev *hdev = container_of(work, struct hci_dev,
2752 cmd_timer.work);
6bd32326 2753
bda4f23a
AE
2754 if (hdev->sent_cmd) {
2755 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2756 u16 opcode = __le16_to_cpu(sent->opcode);
2757
2064ee33 2758 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
bda4f23a 2759 } else {
2064ee33 2760 bt_dev_err(hdev, "command tx timeout");
bda4f23a
AE
2761 }
2762
e2bef384
RJ
2763 if (hdev->cmd_timeout)
2764 hdev->cmd_timeout(hdev);
2765
6bd32326 2766 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2767 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2768}
2769
2763eda6 2770struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2771 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2772{
2773 struct oob_data *data;
2774
6928a924
JH
2775 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2776 if (bacmp(bdaddr, &data->bdaddr) != 0)
2777 continue;
2778 if (data->bdaddr_type != bdaddr_type)
2779 continue;
2780 return data;
2781 }
2763eda6
SJ
2782
2783 return NULL;
2784}
2785
6928a924
JH
2786int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2787 u8 bdaddr_type)
2763eda6
SJ
2788{
2789 struct oob_data *data;
2790
6928a924 2791 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2792 if (!data)
2793 return -ENOENT;
2794
6928a924 2795 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2796
2797 list_del(&data->list);
2798 kfree(data);
2799
2800 return 0;
2801}
2802
35f7498a 2803void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2804{
2805 struct oob_data *data, *n;
2806
2807 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2808 list_del(&data->list);
2809 kfree(data);
2810 }
2763eda6
SJ
2811}
2812
0798872e 2813int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2814 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2815 u8 *hash256, u8 *rand256)
2763eda6
SJ
2816{
2817 struct oob_data *data;
2818
6928a924 2819 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2820 if (!data) {
0a14ab41 2821 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2822 if (!data)
2823 return -ENOMEM;
2824
2825 bacpy(&data->bdaddr, bdaddr);
6928a924 2826 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2827 list_add(&data->list, &hdev->remote_oob_data);
2828 }
2829
81328d5c
JH
2830 if (hash192 && rand192) {
2831 memcpy(data->hash192, hash192, sizeof(data->hash192));
2832 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2833 if (hash256 && rand256)
2834 data->present = 0x03;
81328d5c
JH
2835 } else {
2836 memset(data->hash192, 0, sizeof(data->hash192));
2837 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2838 if (hash256 && rand256)
2839 data->present = 0x02;
2840 else
2841 data->present = 0x00;
0798872e
MH
2842 }
2843
81328d5c
JH
2844 if (hash256 && rand256) {
2845 memcpy(data->hash256, hash256, sizeof(data->hash256));
2846 memcpy(data->rand256, rand256, sizeof(data->rand256));
2847 } else {
2848 memset(data->hash256, 0, sizeof(data->hash256));
2849 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2850 if (hash192 && rand192)
2851 data->present = 0x01;
81328d5c 2852 }
0798872e 2853
6ed93dc6 2854 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2855
2856 return 0;
2857}
2858
d2609b34
FG
2859/* This function requires the caller holds hdev->lock */
2860struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2861{
2862 struct adv_info *adv_instance;
2863
2864 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2865 if (adv_instance->instance == instance)
2866 return adv_instance;
2867 }
2868
2869 return NULL;
2870}
2871
2872/* This function requires the caller holds hdev->lock */
74b93e9f
PK
2873struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2874{
d2609b34
FG
2875 struct adv_info *cur_instance;
2876
2877 cur_instance = hci_find_adv_instance(hdev, instance);
2878 if (!cur_instance)
2879 return NULL;
2880
2881 if (cur_instance == list_last_entry(&hdev->adv_instances,
2882 struct adv_info, list))
2883 return list_first_entry(&hdev->adv_instances,
2884 struct adv_info, list);
2885 else
2886 return list_next_entry(cur_instance, list);
2887}
2888
2889/* This function requires the caller holds hdev->lock */
2890int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2891{
2892 struct adv_info *adv_instance;
2893
2894 adv_instance = hci_find_adv_instance(hdev, instance);
2895 if (!adv_instance)
2896 return -ENOENT;
2897
2898 BT_DBG("%s removing %dMR", hdev->name, instance);
2899
cab054ab
JH
2900 if (hdev->cur_adv_instance == instance) {
2901 if (hdev->adv_instance_timeout) {
2902 cancel_delayed_work(&hdev->adv_instance_expire);
2903 hdev->adv_instance_timeout = 0;
2904 }
2905 hdev->cur_adv_instance = 0x00;
5d900e46
FG
2906 }
2907
a73c046a
JK
2908 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2909
d2609b34
FG
2910 list_del(&adv_instance->list);
2911 kfree(adv_instance);
2912
2913 hdev->adv_instance_cnt--;
2914
2915 return 0;
2916}
2917
a73c046a
JK
2918void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2919{
2920 struct adv_info *adv_instance, *n;
2921
2922 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2923 adv_instance->rpa_expired = rpa_expired;
2924}
2925
d2609b34
FG
2926/* This function requires the caller holds hdev->lock */
2927void hci_adv_instances_clear(struct hci_dev *hdev)
2928{
2929 struct adv_info *adv_instance, *n;
2930
5d900e46
FG
2931 if (hdev->adv_instance_timeout) {
2932 cancel_delayed_work(&hdev->adv_instance_expire);
2933 hdev->adv_instance_timeout = 0;
2934 }
2935
d2609b34 2936 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
a73c046a 2937 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
d2609b34
FG
2938 list_del(&adv_instance->list);
2939 kfree(adv_instance);
2940 }
2941
2942 hdev->adv_instance_cnt = 0;
cab054ab 2943 hdev->cur_adv_instance = 0x00;
d2609b34
FG
2944}
2945
a73c046a
JK
2946static void adv_instance_rpa_expired(struct work_struct *work)
2947{
2948 struct adv_info *adv_instance = container_of(work, struct adv_info,
2949 rpa_expired_cb.work);
2950
2951 BT_DBG("");
2952
2953 adv_instance->rpa_expired = true;
2954}
2955
d2609b34
FG
2956/* This function requires the caller holds hdev->lock */
2957int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2958 u16 adv_data_len, u8 *adv_data,
2959 u16 scan_rsp_len, u8 *scan_rsp_data,
9bf9f4b6
DW
2960 u16 timeout, u16 duration, s8 tx_power,
2961 u32 min_interval, u32 max_interval)
d2609b34
FG
2962{
2963 struct adv_info *adv_instance;
2964
2965 adv_instance = hci_find_adv_instance(hdev, instance);
2966 if (adv_instance) {
2967 memset(adv_instance->adv_data, 0,
2968 sizeof(adv_instance->adv_data));
2969 memset(adv_instance->scan_rsp_data, 0,
2970 sizeof(adv_instance->scan_rsp_data));
2971 } else {
1d0fac2c 2972 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
87597482 2973 instance < 1 || instance > hdev->le_num_of_adv_sets)
d2609b34
FG
2974 return -EOVERFLOW;
2975
39ecfad6 2976 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2977 if (!adv_instance)
2978 return -ENOMEM;
2979
fffd38bc 2980 adv_instance->pending = true;
d2609b34
FG
2981 adv_instance->instance = instance;
2982 list_add(&adv_instance->list, &hdev->adv_instances);
2983 hdev->adv_instance_cnt++;
2984 }
2985
2986 adv_instance->flags = flags;
2987 adv_instance->adv_data_len = adv_data_len;
2988 adv_instance->scan_rsp_len = scan_rsp_len;
9bf9f4b6
DW
2989 adv_instance->min_interval = min_interval;
2990 adv_instance->max_interval = max_interval;
2991 adv_instance->tx_power = tx_power;
d2609b34
FG
2992
2993 if (adv_data_len)
2994 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2995
2996 if (scan_rsp_len)
2997 memcpy(adv_instance->scan_rsp_data,
2998 scan_rsp_data, scan_rsp_len);
2999
3000 adv_instance->timeout = timeout;
5d900e46 3001 adv_instance->remaining_time = timeout;
d2609b34
FG
3002
3003 if (duration == 0)
10873f99 3004 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
d2609b34
FG
3005 else
3006 adv_instance->duration = duration;
3007
a73c046a
JK
3008 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3009 adv_instance_rpa_expired);
3010
d2609b34
FG
3011 BT_DBG("%s for %dMR", hdev->name, instance);
3012
3013 return 0;
3014}
3015
31aab5c2
DW
3016/* This function requires the caller holds hdev->lock */
3017int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3018 u16 adv_data_len, u8 *adv_data,
3019 u16 scan_rsp_len, u8 *scan_rsp_data)
3020{
3021 struct adv_info *adv_instance;
3022
3023 adv_instance = hci_find_adv_instance(hdev, instance);
3024
3025 /* If advertisement doesn't exist, we can't modify its data */
3026 if (!adv_instance)
3027 return -ENOENT;
3028
3029 if (adv_data_len) {
3030 memset(adv_instance->adv_data, 0,
3031 sizeof(adv_instance->adv_data));
3032 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3033 adv_instance->adv_data_len = adv_data_len;
3034 }
3035
3036 if (scan_rsp_len) {
3037 memset(adv_instance->scan_rsp_data, 0,
3038 sizeof(adv_instance->scan_rsp_data));
3039 memcpy(adv_instance->scan_rsp_data,
3040 scan_rsp_data, scan_rsp_len);
3041 adv_instance->scan_rsp_len = scan_rsp_len;
3042 }
3043
3044 return 0;
3045}
3046
e5e1e7fd
MC
3047/* This function requires the caller holds hdev->lock */
3048void hci_adv_monitors_clear(struct hci_dev *hdev)
3049{
b139553d
MC
3050 struct adv_monitor *monitor;
3051 int handle;
3052
3053 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3054 hci_free_adv_monitor(monitor);
3055
e5e1e7fd
MC
3056 idr_destroy(&hdev->adv_monitors_idr);
3057}
3058
b139553d
MC
3059void hci_free_adv_monitor(struct adv_monitor *monitor)
3060{
3061 struct adv_pattern *pattern;
3062 struct adv_pattern *tmp;
3063
3064 if (!monitor)
3065 return;
3066
3067 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list)
3068 kfree(pattern);
3069
3070 kfree(monitor);
3071}
3072
a2a4dedf
AP
3073int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3074{
3075 return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3076}
3077
3078/* Assigns handle to a monitor, and if offloading is supported and power is on,
3079 * also attempts to forward the request to the controller.
3080 * Returns true if request is forwarded (result is pending), false otherwise.
3081 * This function requires the caller holds hdev->lock.
3082 */
3083bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3084 int *err)
b139553d
MC
3085{
3086 int min, max, handle;
3087
a2a4dedf
AP
3088 *err = 0;
3089
3090 if (!monitor) {
3091 *err = -EINVAL;
3092 return false;
3093 }
b139553d
MC
3094
3095 min = HCI_MIN_ADV_MONITOR_HANDLE;
3096 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3097 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3098 GFP_KERNEL);
a2a4dedf
AP
3099 if (handle < 0) {
3100 *err = handle;
3101 return false;
3102 }
b139553d 3103
b139553d 3104 monitor->handle = handle;
8208f5a9 3105
a2a4dedf
AP
3106 if (!hdev_is_powered(hdev))
3107 return false;
8208f5a9 3108
a2a4dedf
AP
3109 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3110 case HCI_ADV_MONITOR_EXT_NONE:
3111 hci_update_background_scan(hdev);
3112 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3113 /* Message was not forwarded to controller - not an error */
3114 return false;
3115 case HCI_ADV_MONITOR_EXT_MSFT:
3116 *err = msft_add_monitor_pattern(hdev, monitor);
3117 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3118 *err);
3119 break;
3120 }
3121
3122 return (*err == 0);
b139553d
MC
3123}
3124
bd2fbc6c
MC
3125static int free_adv_monitor(int id, void *ptr, void *data)
3126{
3127 struct hci_dev *hdev = data;
3128 struct adv_monitor *monitor = ptr;
3129
3130 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3131 hci_free_adv_monitor(monitor);
c88e3979 3132 hdev->adv_monitors_cnt--;
bd2fbc6c
MC
3133
3134 return 0;
3135}
3136
3137/* This function requires the caller holds hdev->lock */
3138int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle)
3139{
3140 struct adv_monitor *monitor;
3141
3142 if (handle) {
3143 monitor = idr_find(&hdev->adv_monitors_idr, handle);
3144 if (!monitor)
3145 return -ENOENT;
3146
3147 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3148 hci_free_adv_monitor(monitor);
c88e3979 3149 hdev->adv_monitors_cnt--;
bd2fbc6c
MC
3150 } else {
3151 /* Remove all monitors if handle is 0. */
3152 idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev);
3153 }
3154
8208f5a9
MC
3155 hci_update_background_scan(hdev);
3156
bd2fbc6c
MC
3157 return 0;
3158}
3159
8208f5a9
MC
3160/* This function requires the caller holds hdev->lock */
3161bool hci_is_adv_monitoring(struct hci_dev *hdev)
3162{
3163 return !idr_is_empty(&hdev->adv_monitors_idr);
3164}
3165
a2a4dedf
AP
3166int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3167{
3168 if (msft_monitor_supported(hdev))
3169 return HCI_ADV_MONITOR_EXT_MSFT;
3170
3171 return HCI_ADV_MONITOR_EXT_NONE;
3172}
3173
dcc36c16 3174struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 3175 bdaddr_t *bdaddr, u8 type)
b2a66aad 3176{
8035ded4 3177 struct bdaddr_list *b;
b2a66aad 3178
dcc36c16 3179 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 3180 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3181 return b;
b9ee0a78 3182 }
b2a66aad
AJ
3183
3184 return NULL;
3185}
3186
b950aa88
AN
3187struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3188 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3189 u8 type)
3190{
3191 struct bdaddr_list_with_irk *b;
3192
3193 list_for_each_entry(b, bdaddr_list, list) {
3194 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3195 return b;
3196 }
3197
3198 return NULL;
3199}
3200
8baaa403
APS
3201struct bdaddr_list_with_flags *
3202hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3203 bdaddr_t *bdaddr, u8 type)
3204{
3205 struct bdaddr_list_with_flags *b;
3206
3207 list_for_each_entry(b, bdaddr_list, list) {
3208 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3209 return b;
3210 }
3211
3212 return NULL;
3213}
3214
dcc36c16 3215void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad 3216{
7eb7404f 3217 struct bdaddr_list *b, *n;
b2a66aad 3218
7eb7404f
GT
3219 list_for_each_entry_safe(b, n, bdaddr_list, list) {
3220 list_del(&b->list);
b2a66aad
AJ
3221 kfree(b);
3222 }
b2a66aad
AJ
3223}
3224
dcc36c16 3225int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3226{
3227 struct bdaddr_list *entry;
b2a66aad 3228
b9ee0a78 3229 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3230 return -EBADF;
3231
dcc36c16 3232 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 3233 return -EEXIST;
b2a66aad 3234
27f70f3e 3235 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
3236 if (!entry)
3237 return -ENOMEM;
b2a66aad
AJ
3238
3239 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3240 entry->bdaddr_type = type;
b2a66aad 3241
dcc36c16 3242 list_add(&entry->list, list);
b2a66aad 3243
2a8357f2 3244 return 0;
b2a66aad
AJ
3245}
3246
b950aa88
AN
3247int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3248 u8 type, u8 *peer_irk, u8 *local_irk)
3249{
3250 struct bdaddr_list_with_irk *entry;
3251
3252 if (!bacmp(bdaddr, BDADDR_ANY))
3253 return -EBADF;
3254
3255 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3256 return -EEXIST;
3257
3258 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3259 if (!entry)
3260 return -ENOMEM;
3261
3262 bacpy(&entry->bdaddr, bdaddr);
3263 entry->bdaddr_type = type;
3264
3265 if (peer_irk)
3266 memcpy(entry->peer_irk, peer_irk, 16);
3267
3268 if (local_irk)
3269 memcpy(entry->local_irk, local_irk, 16);
3270
3271 list_add(&entry->list, list);
3272
3273 return 0;
3274}
3275
8baaa403
APS
3276int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3277 u8 type, u32 flags)
3278{
3279 struct bdaddr_list_with_flags *entry;
3280
3281 if (!bacmp(bdaddr, BDADDR_ANY))
3282 return -EBADF;
3283
3284 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3285 return -EEXIST;
3286
3287 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3288 if (!entry)
3289 return -ENOMEM;
3290
3291 bacpy(&entry->bdaddr, bdaddr);
3292 entry->bdaddr_type = type;
3293 entry->current_flags = flags;
3294
3295 list_add(&entry->list, list);
3296
3297 return 0;
3298}
3299
dcc36c16 3300int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3301{
3302 struct bdaddr_list *entry;
b2a66aad 3303
35f7498a 3304 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 3305 hci_bdaddr_list_clear(list);
35f7498a
JH
3306 return 0;
3307 }
b2a66aad 3308
dcc36c16 3309 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
3310 if (!entry)
3311 return -ENOENT;
3312
3313 list_del(&entry->list);
3314 kfree(entry);
3315
3316 return 0;
3317}
3318
b950aa88
AN
3319int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3320 u8 type)
3321{
3322 struct bdaddr_list_with_irk *entry;
3323
3324 if (!bacmp(bdaddr, BDADDR_ANY)) {
3325 hci_bdaddr_list_clear(list);
3326 return 0;
3327 }
3328
3329 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3330 if (!entry)
3331 return -ENOENT;
3332
3333 list_del(&entry->list);
3334 kfree(entry);
3335
3336 return 0;
3337}
3338
8baaa403
APS
3339int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3340 u8 type)
3341{
3342 struct bdaddr_list_with_flags *entry;
3343
3344 if (!bacmp(bdaddr, BDADDR_ANY)) {
3345 hci_bdaddr_list_clear(list);
3346 return 0;
3347 }
3348
3349 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3350 if (!entry)
3351 return -ENOENT;
3352
3353 list_del(&entry->list);
3354 kfree(entry);
3355
3356 return 0;
3357}
3358
15819a70
AG
3359/* This function requires the caller holds hdev->lock */
3360struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3361 bdaddr_t *addr, u8 addr_type)
3362{
3363 struct hci_conn_params *params;
3364
3365 list_for_each_entry(params, &hdev->le_conn_params, list) {
3366 if (bacmp(&params->addr, addr) == 0 &&
3367 params->addr_type == addr_type) {
3368 return params;
3369 }
3370 }
3371
3372 return NULL;
3373}
3374
4b10966f 3375/* This function requires the caller holds hdev->lock */
501f8827
JH
3376struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3377 bdaddr_t *addr, u8 addr_type)
a9b0a04c 3378{
912b42ef 3379 struct hci_conn_params *param;
a9b0a04c 3380
6540351e
MH
3381 switch (addr_type) {
3382 case ADDR_LE_DEV_PUBLIC_RESOLVED:
3383 addr_type = ADDR_LE_DEV_PUBLIC;
3384 break;
3385 case ADDR_LE_DEV_RANDOM_RESOLVED:
3386 addr_type = ADDR_LE_DEV_RANDOM;
3387 break;
3388 }
3389
501f8827 3390 list_for_each_entry(param, list, action) {
912b42ef
JH
3391 if (bacmp(&param->addr, addr) == 0 &&
3392 param->addr_type == addr_type)
3393 return param;
4b10966f
MH
3394 }
3395
3396 return NULL;
a9b0a04c
AG
3397}
3398
15819a70 3399/* This function requires the caller holds hdev->lock */
51d167c0
MH
3400struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3401 bdaddr_t *addr, u8 addr_type)
15819a70
AG
3402{
3403 struct hci_conn_params *params;
3404
3405 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 3406 if (params)
51d167c0 3407 return params;
15819a70
AG
3408
3409 params = kzalloc(sizeof(*params), GFP_KERNEL);
3410 if (!params) {
2064ee33 3411 bt_dev_err(hdev, "out of memory");
51d167c0 3412 return NULL;
15819a70
AG
3413 }
3414
3415 bacpy(&params->addr, addr);
3416 params->addr_type = addr_type;
cef952ce
AG
3417
3418 list_add(&params->list, &hdev->le_conn_params);
93450c75 3419 INIT_LIST_HEAD(&params->action);
cef952ce 3420
bf5b3c8b
MH
3421 params->conn_min_interval = hdev->le_conn_min_interval;
3422 params->conn_max_interval = hdev->le_conn_max_interval;
3423 params->conn_latency = hdev->le_conn_latency;
3424 params->supervision_timeout = hdev->le_supv_timeout;
3425 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3426
3427 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3428
51d167c0 3429 return params;
bf5b3c8b
MH
3430}
3431
f6c63249 3432static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 3433{
f8aaf9b6 3434 if (params->conn) {
f161dd41 3435 hci_conn_drop(params->conn);
f8aaf9b6
JH
3436 hci_conn_put(params->conn);
3437 }
f161dd41 3438
95305baa 3439 list_del(&params->action);
15819a70
AG
3440 list_del(&params->list);
3441 kfree(params);
f6c63249
JH
3442}
3443
3444/* This function requires the caller holds hdev->lock */
3445void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3446{
3447 struct hci_conn_params *params;
3448
3449 params = hci_conn_params_lookup(hdev, addr, addr_type);
3450 if (!params)
3451 return;
3452
3453 hci_conn_params_free(params);
15819a70 3454
95305baa
JH
3455 hci_update_background_scan(hdev);
3456
15819a70
AG
3457 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3458}
3459
3460/* This function requires the caller holds hdev->lock */
55af49a8 3461void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3462{
3463 struct hci_conn_params *params, *tmp;
3464
3465 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3466 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3467 continue;
f75113a2
JP
3468
3469 /* If trying to estabilish one time connection to disabled
3470 * device, leave the params, but mark them as just once.
3471 */
3472 if (params->explicit_connect) {
3473 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3474 continue;
3475 }
3476
15819a70
AG
3477 list_del(&params->list);
3478 kfree(params);
3479 }
3480
55af49a8 3481 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3482}
3483
3484/* This function requires the caller holds hdev->lock */
030e7f81 3485static void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3486{
15819a70 3487 struct hci_conn_params *params, *tmp;
77a77a30 3488
f6c63249
JH
3489 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3490 hci_conn_params_free(params);
77a77a30 3491
15819a70 3492 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3493}
3494
a1f4c318
JH
3495/* Copy the Identity Address of the controller.
3496 *
3497 * If the controller has a public BD_ADDR, then by default use that one.
3498 * If this is a LE only controller without a public address, default to
3499 * the static random address.
3500 *
3501 * For debugging purposes it is possible to force controllers with a
3502 * public address to use the static random address instead.
50b5b952
MH
3503 *
3504 * In case BR/EDR has been disabled on a dual-mode controller and
3505 * userspace has configured a static address, then that address
3506 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
3507 */
3508void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3509 u8 *bdaddr_type)
3510{
b7cb93e5 3511 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 3512 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 3513 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 3514 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
3515 bacpy(bdaddr, &hdev->static_addr);
3516 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3517 } else {
3518 bacpy(bdaddr, &hdev->bdaddr);
3519 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3520 }
3521}
3522
0e995280
APS
3523static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3524{
3525 int i;
3526
3527 for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3528 clear_bit(i, hdev->suspend_tasks);
3529
3530 wake_up(&hdev->suspend_wait_q);
3531}
3532
9952d90e
APS
3533static int hci_suspend_wait_event(struct hci_dev *hdev)
3534{
3535#define WAKE_COND \
3536 (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
3537 __SUSPEND_NUM_TASKS)
3538
3539 int i;
3540 int ret = wait_event_timeout(hdev->suspend_wait_q,
3541 WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3542
3543 if (ret == 0) {
a9ec8423 3544 bt_dev_err(hdev, "Timed out waiting for suspend events");
9952d90e
APS
3545 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3546 if (test_bit(i, hdev->suspend_tasks))
a9ec8423 3547 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
9952d90e
APS
3548 clear_bit(i, hdev->suspend_tasks);
3549 }
3550
3551 ret = -ETIMEDOUT;
3552 } else {
3553 ret = 0;
3554 }
3555
3556 return ret;
3557}
3558
3559static void hci_prepare_suspend(struct work_struct *work)
3560{
3561 struct hci_dev *hdev =
3562 container_of(work, struct hci_dev, suspend_prepare);
3563
3564 hci_dev_lock(hdev);
3565 hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3566 hci_dev_unlock(hdev);
3567}
3568
8731840a
APS
3569static int hci_change_suspend_state(struct hci_dev *hdev,
3570 enum suspended_state next)
3571{
3572 hdev->suspend_state_next = next;
3573 set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3574 queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3575 return hci_suspend_wait_event(hdev);
3576}
3577
2f20216c
APS
3578static void hci_clear_wake_reason(struct hci_dev *hdev)
3579{
3580 hci_dev_lock(hdev);
3581
3582 hdev->wake_reason = 0;
3583 bacpy(&hdev->wake_addr, BDADDR_ANY);
3584 hdev->wake_addr_type = 0;
3585
3586 hci_dev_unlock(hdev);
3587}
3588
9952d90e
APS
3589static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3590 void *data)
3591{
3592 struct hci_dev *hdev =
3593 container_of(nb, struct hci_dev, suspend_notifier);
3594 int ret = 0;
2f20216c 3595 u8 state = BT_RUNNING;
9952d90e
APS
3596
3597 /* If powering down, wait for completion. */
3598 if (mgmt_powering_down(hdev)) {
3599 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3600 ret = hci_suspend_wait_event(hdev);
3601 if (ret)
3602 goto done;
3603 }
3604
3605 /* Suspend notifier should only act on events when powered. */
3606 if (!hdev_is_powered(hdev))
3607 goto done;
3608
3609 if (action == PM_SUSPEND_PREPARE) {
4f40afc6
APS
3610 /* Suspend consists of two actions:
3611 * - First, disconnect everything and make the controller not
3612 * connectable (disabling scanning)
3613 * - Second, program event filter/whitelist and enable scan
3614 */
8731840a 3615 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
2f20216c
APS
3616 if (!ret)
3617 state = BT_SUSPEND_DISCONNECT;
4f40afc6 3618
81dafad5
APS
3619 /* Only configure whitelist if disconnect succeeded and wake
3620 * isn't being prevented.
3621 */
2f20216c 3622 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
8731840a 3623 ret = hci_change_suspend_state(hdev,
0d2c9825 3624 BT_SUSPEND_CONFIGURE_WAKE);
2f20216c
APS
3625 if (!ret)
3626 state = BT_SUSPEND_CONFIGURE_WAKE;
3627 }
3628
3629 hci_clear_wake_reason(hdev);
3630 mgmt_suspending(hdev, state);
3631
9952d90e 3632 } else if (action == PM_POST_SUSPEND) {
8731840a 3633 ret = hci_change_suspend_state(hdev, BT_RUNNING);
2f20216c
APS
3634
3635 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3636 hdev->wake_addr_type);
9952d90e
APS
3637 }
3638
3639done:
a9ec8423
APS
3640 /* We always allow suspend even if suspend preparation failed and
3641 * attempt to recover in resume.
3642 */
3643 if (ret)
3644 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3645 action, ret);
3646
24b06572 3647 return NOTIFY_DONE;
9952d90e 3648}
8731840a 3649
9be0dab7
DH
3650/* Alloc HCI device */
3651struct hci_dev *hci_alloc_dev(void)
3652{
3653 struct hci_dev *hdev;
3654
27f70f3e 3655 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3656 if (!hdev)
3657 return NULL;
3658
b1b813d4
DH
3659 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3660 hdev->esco_type = (ESCO_HV1);
3661 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3662 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3663 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3664 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3665 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3666 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
3667 hdev->adv_instance_cnt = 0;
3668 hdev->cur_adv_instance = 0x00;
5d900e46 3669 hdev->adv_instance_timeout = 0;
b1b813d4 3670
c4f1f408
HC
3671 hdev->advmon_allowlist_duration = 300;
3672 hdev->advmon_no_filter_duration = 500;
80af16a3 3673 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
c4f1f408 3674
b1b813d4
DH
3675 hdev->sniff_max_interval = 800;
3676 hdev->sniff_min_interval = 80;
3677
3f959d46 3678 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3679 hdev->le_adv_min_interval = 0x0800;
3680 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3681 hdev->le_scan_interval = 0x0060;
3682 hdev->le_scan_window = 0x0030;
10873f99
AM
3683 hdev->le_scan_int_suspend = 0x0400;
3684 hdev->le_scan_window_suspend = 0x0012;
3685 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3686 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3687 hdev->le_scan_int_connect = 0x0060;
3688 hdev->le_scan_window_connect = 0x0060;
b48c3b59
JH
3689 hdev->le_conn_min_interval = 0x0018;
3690 hdev->le_conn_max_interval = 0x0028;
04fb7d90
MH
3691 hdev->le_conn_latency = 0x0000;
3692 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3693 hdev->le_def_tx_len = 0x001b;
3694 hdev->le_def_tx_time = 0x0148;
3695 hdev->le_max_tx_len = 0x001b;
3696 hdev->le_max_tx_time = 0x0148;
3697 hdev->le_max_rx_len = 0x001b;
3698 hdev->le_max_rx_time = 0x0148;
30d65e08
MK
3699 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3700 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
6decb5b4
JK
3701 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3702 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
1d0fac2c 3703 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
10873f99 3704 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
49b020c1 3705 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
7c395ea5
DW
3706 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3707 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
bef64738 3708
d6bfd59c 3709 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3710 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3711 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3712 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
302975cb 3713 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
58a96fc3 3714 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
d6bfd59c 3715
10873f99
AM
3716 /* default 1.28 sec page scan */
3717 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3718 hdev->def_page_scan_int = 0x0800;
3719 hdev->def_page_scan_window = 0x0012;
3720
b1b813d4
DH
3721 mutex_init(&hdev->lock);
3722 mutex_init(&hdev->req_lock);
3723
3724 INIT_LIST_HEAD(&hdev->mgmt_pending);
3725 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3726 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3727 INIT_LIST_HEAD(&hdev->uuids);
3728 INIT_LIST_HEAD(&hdev->link_keys);
3729 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3730 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3731 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3732 INIT_LIST_HEAD(&hdev->le_white_list);
cfdb0c2d 3733 INIT_LIST_HEAD(&hdev->le_resolv_list);
15819a70 3734 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3735 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3736 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3737 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3738 INIT_LIST_HEAD(&hdev->adv_instances);
600a8749 3739 INIT_LIST_HEAD(&hdev->blocked_keys);
b1b813d4
DH
3740
3741 INIT_WORK(&hdev->rx_work, hci_rx_work);
3742 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3743 INIT_WORK(&hdev->tx_work, hci_tx_work);
3744 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3745 INIT_WORK(&hdev->error_reset, hci_error_reset);
9952d90e 3746 INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
b1b813d4 3747
b1b813d4 3748 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
b1b813d4 3749
b1b813d4
DH
3750 skb_queue_head_init(&hdev->rx_q);
3751 skb_queue_head_init(&hdev->cmd_q);
3752 skb_queue_head_init(&hdev->raw_q);
3753
3754 init_waitqueue_head(&hdev->req_wait_q);
9952d90e 3755 init_waitqueue_head(&hdev->suspend_wait_q);
b1b813d4 3756
65cc2b49 3757 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3758
5fc16cc4
JH
3759 hci_request_setup(hdev);
3760
b1b813d4
DH
3761 hci_init_sysfs(hdev);
3762 discovery_init(hdev);
9be0dab7
DH
3763
3764 return hdev;
3765}
3766EXPORT_SYMBOL(hci_alloc_dev);
3767
3768/* Free HCI device */
3769void hci_free_dev(struct hci_dev *hdev)
3770{
9be0dab7
DH
3771 /* will free via device release */
3772 put_device(&hdev->dev);
3773}
3774EXPORT_SYMBOL(hci_free_dev);
3775
1da177e4
LT
3776/* Register HCI device */
3777int hci_register_dev(struct hci_dev *hdev)
3778{
b1b813d4 3779 int id, error;
1da177e4 3780
74292d5a 3781 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3782 return -EINVAL;
3783
08add513
MM
3784 /* Do not allow HCI_AMP devices to register at index 0,
3785 * so the index can be used as the AMP controller ID.
3786 */
3df92b31 3787 switch (hdev->dev_type) {
ca8bee5d 3788 case HCI_PRIMARY:
3df92b31
SL
3789 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3790 break;
3791 case HCI_AMP:
3792 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3793 break;
3794 default:
3795 return -EINVAL;
1da177e4 3796 }
8e87d142 3797
3df92b31
SL
3798 if (id < 0)
3799 return id;
3800
1da177e4
LT
3801 sprintf(hdev->name, "hci%d", id);
3802 hdev->id = id;
2d8b3a11
AE
3803
3804 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3805
29e2dd0d 3806 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
33ca954d
DH
3807 if (!hdev->workqueue) {
3808 error = -ENOMEM;
3809 goto err;
3810 }
f48fd9c8 3811
29e2dd0d
TH
3812 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3813 hdev->name);
6ead1bbc
JH
3814 if (!hdev->req_workqueue) {
3815 destroy_workqueue(hdev->workqueue);
3816 error = -ENOMEM;
3817 goto err;
3818 }
3819
0153e2ec
MH
3820 if (!IS_ERR_OR_NULL(bt_debugfs))
3821 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3822
bdc3e0f1
MH
3823 dev_set_name(&hdev->dev, "%s", hdev->name);
3824
3825 error = device_add(&hdev->dev);
33ca954d 3826 if (error < 0)
54506918 3827 goto err_wqueue;
1da177e4 3828
6d5d2ee6
HK
3829 hci_leds_init(hdev);
3830
611b30f7 3831 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3832 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3833 hdev);
611b30f7
MH
3834 if (hdev->rfkill) {
3835 if (rfkill_register(hdev->rfkill) < 0) {
3836 rfkill_destroy(hdev->rfkill);
3837 hdev->rfkill = NULL;
3838 }
3839 }
3840
5e130367 3841 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3842 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3843
a1536da2
MH
3844 hci_dev_set_flag(hdev, HCI_SETUP);
3845 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3846
ca8bee5d 3847 if (hdev->dev_type == HCI_PRIMARY) {
56f87901
JH
3848 /* Assume BR/EDR support until proven otherwise (such as
3849 * through reading supported features during init.
3850 */
a1536da2 3851 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3852 }
ce2be9ac 3853
fcee3377
GP
3854 write_lock(&hci_dev_list_lock);
3855 list_add(&hdev->list, &hci_dev_list);
3856 write_unlock(&hci_dev_list_lock);
3857
4a964404
MH
3858 /* Devices that are marked for raw-only usage are unconfigured
3859 * and should not be included in normal operation.
fee746b0
MH
3860 */
3861 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3862 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3863
05fcd4c4 3864 hci_sock_dev_event(hdev, HCI_DEV_REG);
dc946bd8 3865 hci_dev_hold(hdev);
1da177e4 3866
9952d90e
APS
3867 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3868 error = register_pm_notifier(&hdev->suspend_notifier);
3869 if (error)
3870 goto err_wqueue;
3871
19202573 3872 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3873
e5e1e7fd
MC
3874 idr_init(&hdev->adv_monitors_idr);
3875
1da177e4 3876 return id;
f48fd9c8 3877
33ca954d
DH
3878err_wqueue:
3879 destroy_workqueue(hdev->workqueue);
6ead1bbc 3880 destroy_workqueue(hdev->req_workqueue);
33ca954d 3881err:
3df92b31 3882 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3883
33ca954d 3884 return error;
1da177e4
LT
3885}
3886EXPORT_SYMBOL(hci_register_dev);
3887
3888/* Unregister HCI device */
59735631 3889void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3890{
2d7cc19e 3891 int id;
ef222013 3892
c13854ce 3893 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3894
a1536da2 3895 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3896
3df92b31
SL
3897 id = hdev->id;
3898
f20d09d5 3899 write_lock(&hci_dev_list_lock);
1da177e4 3900 list_del(&hdev->list);
f20d09d5 3901 write_unlock(&hci_dev_list_lock);
1da177e4 3902
b9b5ef18
GP
3903 cancel_work_sync(&hdev->power_on);
3904
0e995280 3905 hci_suspend_clear_tasks(hdev);
9952d90e 3906 unregister_pm_notifier(&hdev->suspend_notifier);
4e8c36c3
APS
3907 cancel_work_sync(&hdev->suspend_prepare);
3908
3909 hci_dev_do_close(hdev);
9952d90e 3910
ab81cbf9 3911 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3912 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3913 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3914 hci_dev_lock(hdev);
744cf19e 3915 mgmt_index_removed(hdev);
09fd0de5 3916 hci_dev_unlock(hdev);
56e5cb86 3917 }
ab81cbf9 3918
2e58ef3e
JH
3919 /* mgmt_index_removed should take care of emptying the
3920 * pending list */
3921 BUG_ON(!list_empty(&hdev->mgmt_pending));
3922
05fcd4c4 3923 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
1da177e4 3924
611b30f7
MH
3925 if (hdev->rfkill) {
3926 rfkill_unregister(hdev->rfkill);
3927 rfkill_destroy(hdev->rfkill);
3928 }
3929
bdc3e0f1 3930 device_del(&hdev->dev);
147e2d59 3931
0153e2ec 3932 debugfs_remove_recursive(hdev->debugfs);
5177a838
MH
3933 kfree_const(hdev->hw_info);
3934 kfree_const(hdev->fw_info);
0153e2ec 3935
f48fd9c8 3936 destroy_workqueue(hdev->workqueue);
6ead1bbc 3937 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3938
09fd0de5 3939 hci_dev_lock(hdev);
dcc36c16 3940 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3941 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3942 hci_uuids_clear(hdev);
55ed8ca1 3943 hci_link_keys_clear(hdev);
b899efaf 3944 hci_smp_ltks_clear(hdev);
970c4e46 3945 hci_smp_irks_clear(hdev);
2763eda6 3946 hci_remote_oob_data_clear(hdev);
d2609b34 3947 hci_adv_instances_clear(hdev);
e5e1e7fd 3948 hci_adv_monitors_clear(hdev);
dcc36c16 3949 hci_bdaddr_list_clear(&hdev->le_white_list);
cfdb0c2d 3950 hci_bdaddr_list_clear(&hdev->le_resolv_list);
373110c5 3951 hci_conn_params_clear_all(hdev);
22078800 3952 hci_discovery_filter_clear(hdev);
600a8749 3953 hci_blocked_keys_clear(hdev);
09fd0de5 3954 hci_dev_unlock(hdev);
e2e0cacb 3955
dc946bd8 3956 hci_dev_put(hdev);
3df92b31
SL
3957
3958 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3959}
3960EXPORT_SYMBOL(hci_unregister_dev);
3961
3962/* Suspend HCI device */
3963int hci_suspend_dev(struct hci_dev *hdev)
3964{
05fcd4c4 3965 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
1da177e4
LT
3966 return 0;
3967}
3968EXPORT_SYMBOL(hci_suspend_dev);
3969
3970/* Resume HCI device */
3971int hci_resume_dev(struct hci_dev *hdev)
3972{
05fcd4c4 3973 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
1da177e4
LT
3974 return 0;
3975}
3976EXPORT_SYMBOL(hci_resume_dev);
3977
75e0569f
MH
3978/* Reset HCI device */
3979int hci_reset_dev(struct hci_dev *hdev)
3980{
1e4b6e91 3981 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
75e0569f
MH
3982 struct sk_buff *skb;
3983
3984 skb = bt_skb_alloc(3, GFP_ATOMIC);
3985 if (!skb)
3986 return -ENOMEM;
3987
d79f34e3 3988 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
59ae1d12 3989 skb_put_data(skb, hw_err, 3);
75e0569f
MH
3990
3991 /* Send Hardware Error to upper stack */
3992 return hci_recv_frame(hdev, skb);
3993}
3994EXPORT_SYMBOL(hci_reset_dev);
3995
76bca880 3996/* Receive frame from HCI drivers */
e1a26170 3997int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3998{
76bca880 3999 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4000 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4001 kfree_skb(skb);
4002 return -ENXIO;
4003 }
4004
d79f34e3
MH
4005 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4006 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
cc974003
MH
4007 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4008 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
fe806dce
MH
4009 kfree_skb(skb);
4010 return -EINVAL;
4011 }
4012
d82603c6 4013 /* Incoming skb */
76bca880
MH
4014 bt_cb(skb)->incoming = 1;
4015
4016 /* Time stamp */
4017 __net_timestamp(skb);
4018
76bca880 4019 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4020 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4021
76bca880
MH
4022 return 0;
4023}
4024EXPORT_SYMBOL(hci_recv_frame);
4025
e875ff84
MH
4026/* Receive diagnostic message from HCI drivers */
4027int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4028{
581d6fd6 4029 /* Mark as diagnostic packet */
d79f34e3 4030 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
581d6fd6 4031
e875ff84
MH
4032 /* Time stamp */
4033 __net_timestamp(skb);
4034
581d6fd6
MH
4035 skb_queue_tail(&hdev->rx_q, skb);
4036 queue_work(hdev->workqueue, &hdev->rx_work);
e875ff84 4037
e875ff84
MH
4038 return 0;
4039}
4040EXPORT_SYMBOL(hci_recv_diag);
4041
5177a838
MH
4042void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4043{
4044 va_list vargs;
4045
4046 va_start(vargs, fmt);
4047 kfree_const(hdev->hw_info);
4048 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4049 va_end(vargs);
4050}
4051EXPORT_SYMBOL(hci_set_hw_info);
4052
4053void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4054{
4055 va_list vargs;
4056
4057 va_start(vargs, fmt);
4058 kfree_const(hdev->fw_info);
4059 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4060 va_end(vargs);
4061}
4062EXPORT_SYMBOL(hci_set_fw_info);
4063
1da177e4
LT
4064/* ---- Interface to upper protocols ---- */
4065
1da177e4
LT
4066int hci_register_cb(struct hci_cb *cb)
4067{
4068 BT_DBG("%p name %s", cb, cb->name);
4069
fba7ecf0 4070 mutex_lock(&hci_cb_list_lock);
00629e0f 4071 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 4072 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
4073
4074 return 0;
4075}
4076EXPORT_SYMBOL(hci_register_cb);
4077
4078int hci_unregister_cb(struct hci_cb *cb)
4079{
4080 BT_DBG("%p name %s", cb, cb->name);
4081
fba7ecf0 4082 mutex_lock(&hci_cb_list_lock);
1da177e4 4083 list_del(&cb->list);
fba7ecf0 4084 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
4085
4086 return 0;
4087}
4088EXPORT_SYMBOL(hci_unregister_cb);
4089
51086991 4090static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4091{
cdc52faa
MH
4092 int err;
4093
d79f34e3
MH
4094 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4095 skb->len);
1da177e4 4096
cd82e61c
MH
4097 /* Time stamp */
4098 __net_timestamp(skb);
1da177e4 4099
cd82e61c
MH
4100 /* Send copy to monitor */
4101 hci_send_to_monitor(hdev, skb);
4102
4103 if (atomic_read(&hdev->promisc)) {
4104 /* Send copy to the sockets */
470fe1b5 4105 hci_send_to_sock(hdev, skb);
1da177e4
LT
4106 }
4107
4108 /* Get rid of skb owner, prior to sending to the driver. */
4109 skb_orphan(skb);
4110
73d0d3c8
MH
4111 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4112 kfree_skb(skb);
4113 return;
4114 }
4115
cdc52faa
MH
4116 err = hdev->send(hdev, skb);
4117 if (err < 0) {
2064ee33 4118 bt_dev_err(hdev, "sending frame failed (%d)", err);
cdc52faa
MH
4119 kfree_skb(skb);
4120 }
1da177e4
LT
4121}
4122
1ca3a9d0 4123/* Send HCI command */
07dc93dd
JH
4124int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4125 const void *param)
1ca3a9d0
JH
4126{
4127 struct sk_buff *skb;
4128
4129 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4130
4131 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4132 if (!skb) {
2064ee33 4133 bt_dev_err(hdev, "no memory for command");
1ca3a9d0
JH
4134 return -ENOMEM;
4135 }
4136
49c922bb 4137 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
4138 * single-command requests.
4139 */
44d27137 4140 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
11714b3d 4141
1da177e4 4142 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4143 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4144
4145 return 0;
4146}
1da177e4 4147
d6ee6ad7
LP
4148int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4149 const void *param)
4150{
4151 struct sk_buff *skb;
4152
4153 if (hci_opcode_ogf(opcode) != 0x3f) {
4154 /* A controller receiving a command shall respond with either
4155 * a Command Status Event or a Command Complete Event.
4156 * Therefore, all standard HCI commands must be sent via the
4157 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4158 * Some vendors do not comply with this rule for vendor-specific
4159 * commands and do not return any event. We want to support
4160 * unresponded commands for such cases only.
4161 */
4162 bt_dev_err(hdev, "unresponded command not supported");
4163 return -EINVAL;
4164 }
4165
4166 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4167 if (!skb) {
4168 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4169 opcode);
4170 return -ENOMEM;
4171 }
4172
4173 hci_send_frame(hdev, skb);
4174
4175 return 0;
4176}
4177EXPORT_SYMBOL(__hci_cmd_send);
4178
1da177e4 4179/* Get data from the previously sent command */
a9de9248 4180void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4181{
4182 struct hci_command_hdr *hdr;
4183
4184 if (!hdev->sent_cmd)
4185 return NULL;
4186
4187 hdr = (void *) hdev->sent_cmd->data;
4188
a9de9248 4189 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4190 return NULL;
4191
f0e09510 4192 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4193
4194 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4195}
4196
fbef168f
LP
4197/* Send HCI command and wait for command commplete event */
4198struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4199 const void *param, u32 timeout)
4200{
4201 struct sk_buff *skb;
4202
4203 if (!test_bit(HCI_UP, &hdev->flags))
4204 return ERR_PTR(-ENETDOWN);
4205
4206 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4207
b504430c 4208 hci_req_sync_lock(hdev);
fbef168f 4209 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
b504430c 4210 hci_req_sync_unlock(hdev);
fbef168f
LP
4211
4212 return skb;
4213}
4214EXPORT_SYMBOL(hci_cmd_sync);
4215
1da177e4
LT
4216/* Send ACL data */
4217static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4218{
4219 struct hci_acl_hdr *hdr;
4220 int len = skb->len;
4221
badff6d0
ACM
4222 skb_push(skb, HCI_ACL_HDR_SIZE);
4223 skb_reset_transport_header(skb);
9c70220b 4224 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4225 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4226 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4227}
4228
ee22be7e 4229static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4230 struct sk_buff *skb, __u16 flags)
1da177e4 4231{
ee22be7e 4232 struct hci_conn *conn = chan->conn;
1da177e4
LT
4233 struct hci_dev *hdev = conn->hdev;
4234 struct sk_buff *list;
4235
087bfd99
GP
4236 skb->len = skb_headlen(skb);
4237 skb->data_len = 0;
4238
d79f34e3 4239 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
204a6e54
AE
4240
4241 switch (hdev->dev_type) {
ca8bee5d 4242 case HCI_PRIMARY:
204a6e54
AE
4243 hci_add_acl_hdr(skb, conn->handle, flags);
4244 break;
4245 case HCI_AMP:
4246 hci_add_acl_hdr(skb, chan->handle, flags);
4247 break;
4248 default:
2064ee33 4249 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
204a6e54
AE
4250 return;
4251 }
087bfd99 4252
70f23020
AE
4253 list = skb_shinfo(skb)->frag_list;
4254 if (!list) {
1da177e4
LT
4255 /* Non fragmented */
4256 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4257
73d80deb 4258 skb_queue_tail(queue, skb);
1da177e4
LT
4259 } else {
4260 /* Fragmented */
4261 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4262
4263 skb_shinfo(skb)->frag_list = NULL;
4264
9cfd5a23
JR
4265 /* Queue all fragments atomically. We need to use spin_lock_bh
4266 * here because of 6LoWPAN links, as there this function is
4267 * called from softirq and using normal spin lock could cause
4268 * deadlocks.
4269 */
4270 spin_lock_bh(&queue->lock);
1da177e4 4271
73d80deb 4272 __skb_queue_tail(queue, skb);
e702112f
AE
4273
4274 flags &= ~ACL_START;
4275 flags |= ACL_CONT;
1da177e4
LT
4276 do {
4277 skb = list; list = list->next;
8e87d142 4278
d79f34e3 4279 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
e702112f 4280 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4281
4282 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4283
73d80deb 4284 __skb_queue_tail(queue, skb);
1da177e4
LT
4285 } while (list);
4286
9cfd5a23 4287 spin_unlock_bh(&queue->lock);
1da177e4 4288 }
73d80deb
LAD
4289}
4290
4291void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4292{
ee22be7e 4293 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4294
f0e09510 4295 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4296
ee22be7e 4297 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4298
3eff45ea 4299 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4300}
1da177e4
LT
4301
4302/* Send SCO data */
0d861d8b 4303void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4304{
4305 struct hci_dev *hdev = conn->hdev;
4306 struct hci_sco_hdr hdr;
4307
4308 BT_DBG("%s len %d", hdev->name, skb->len);
4309
aca3192c 4310 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4311 hdr.dlen = skb->len;
4312
badff6d0
ACM
4313 skb_push(skb, HCI_SCO_HDR_SIZE);
4314 skb_reset_transport_header(skb);
9c70220b 4315 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4316
d79f34e3 4317 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
c78ae283 4318
1da177e4 4319 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4320 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4321}
1da177e4
LT
4322
4323/* ---- HCI TX task (outgoing data) ---- */
4324
4325/* HCI Connection scheduler */
6039aa73
GP
4326static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4327 int *quote)
1da177e4
LT
4328{
4329 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4330 struct hci_conn *conn = NULL, *c;
abc5de8f 4331 unsigned int num = 0, min = ~0;
1da177e4 4332
8e87d142 4333 /* We don't have to lock device here. Connections are always
1da177e4 4334 * added and removed with TX task disabled. */
bf4c6325
GP
4335
4336 rcu_read_lock();
4337
4338 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4339 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4340 continue;
769be974
MH
4341
4342 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4343 continue;
4344
1da177e4
LT
4345 num++;
4346
4347 if (c->sent < min) {
4348 min = c->sent;
4349 conn = c;
4350 }
52087a79
LAD
4351
4352 if (hci_conn_num(hdev, type) == num)
4353 break;
1da177e4
LT
4354 }
4355
bf4c6325
GP
4356 rcu_read_unlock();
4357
1da177e4 4358 if (conn) {
6ed58ec5
VT
4359 int cnt, q;
4360
4361 switch (conn->type) {
4362 case ACL_LINK:
4363 cnt = hdev->acl_cnt;
4364 break;
4365 case SCO_LINK:
4366 case ESCO_LINK:
4367 cnt = hdev->sco_cnt;
4368 break;
4369 case LE_LINK:
4370 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4371 break;
4372 default:
4373 cnt = 0;
2064ee33 4374 bt_dev_err(hdev, "unknown link type %d", conn->type);
6ed58ec5
VT
4375 }
4376
4377 q = cnt / num;
1da177e4
LT
4378 *quote = q ? q : 1;
4379 } else
4380 *quote = 0;
4381
4382 BT_DBG("conn %p quote %d", conn, *quote);
4383 return conn;
4384}
4385
6039aa73 4386static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4387{
4388 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4389 struct hci_conn *c;
1da177e4 4390
2064ee33 4391 bt_dev_err(hdev, "link tx timeout");
1da177e4 4392
bf4c6325
GP
4393 rcu_read_lock();
4394
1da177e4 4395 /* Kill stalled connections */
bf4c6325 4396 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4397 if (c->type == type && c->sent) {
2064ee33
MH
4398 bt_dev_err(hdev, "killing stalled connection %pMR",
4399 &c->dst);
bed71748 4400 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4401 }
4402 }
bf4c6325
GP
4403
4404 rcu_read_unlock();
1da177e4
LT
4405}
4406
6039aa73
GP
4407static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4408 int *quote)
1da177e4 4409{
73d80deb
LAD
4410 struct hci_conn_hash *h = &hdev->conn_hash;
4411 struct hci_chan *chan = NULL;
abc5de8f 4412 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4413 struct hci_conn *conn;
73d80deb
LAD
4414 int cnt, q, conn_num = 0;
4415
4416 BT_DBG("%s", hdev->name);
4417
bf4c6325
GP
4418 rcu_read_lock();
4419
4420 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4421 struct hci_chan *tmp;
4422
4423 if (conn->type != type)
4424 continue;
4425
4426 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4427 continue;
4428
4429 conn_num++;
4430
8192edef 4431 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4432 struct sk_buff *skb;
4433
4434 if (skb_queue_empty(&tmp->data_q))
4435 continue;
4436
4437 skb = skb_peek(&tmp->data_q);
4438 if (skb->priority < cur_prio)
4439 continue;
4440
4441 if (skb->priority > cur_prio) {
4442 num = 0;
4443 min = ~0;
4444 cur_prio = skb->priority;
4445 }
4446
4447 num++;
4448
4449 if (conn->sent < min) {
4450 min = conn->sent;
4451 chan = tmp;
4452 }
4453 }
4454
4455 if (hci_conn_num(hdev, type) == conn_num)
4456 break;
4457 }
4458
bf4c6325
GP
4459 rcu_read_unlock();
4460
73d80deb
LAD
4461 if (!chan)
4462 return NULL;
4463
4464 switch (chan->conn->type) {
4465 case ACL_LINK:
4466 cnt = hdev->acl_cnt;
4467 break;
bd1eb66b
AE
4468 case AMP_LINK:
4469 cnt = hdev->block_cnt;
4470 break;
73d80deb
LAD
4471 case SCO_LINK:
4472 case ESCO_LINK:
4473 cnt = hdev->sco_cnt;
4474 break;
4475 case LE_LINK:
4476 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4477 break;
4478 default:
4479 cnt = 0;
2064ee33 4480 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
73d80deb
LAD
4481 }
4482
4483 q = cnt / num;
4484 *quote = q ? q : 1;
4485 BT_DBG("chan %p quote %d", chan, *quote);
4486 return chan;
4487}
4488
02b20f0b
LAD
4489static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4490{
4491 struct hci_conn_hash *h = &hdev->conn_hash;
4492 struct hci_conn *conn;
4493 int num = 0;
4494
4495 BT_DBG("%s", hdev->name);
4496
bf4c6325
GP
4497 rcu_read_lock();
4498
4499 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4500 struct hci_chan *chan;
4501
4502 if (conn->type != type)
4503 continue;
4504
4505 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4506 continue;
4507
4508 num++;
4509
8192edef 4510 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4511 struct sk_buff *skb;
4512
4513 if (chan->sent) {
4514 chan->sent = 0;
4515 continue;
4516 }
4517
4518 if (skb_queue_empty(&chan->data_q))
4519 continue;
4520
4521 skb = skb_peek(&chan->data_q);
4522 if (skb->priority >= HCI_PRIO_MAX - 1)
4523 continue;
4524
4525 skb->priority = HCI_PRIO_MAX - 1;
4526
4527 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4528 skb->priority);
02b20f0b
LAD
4529 }
4530
4531 if (hci_conn_num(hdev, type) == num)
4532 break;
4533 }
bf4c6325
GP
4534
4535 rcu_read_unlock();
4536
02b20f0b
LAD
4537}
4538
b71d385a
AE
4539static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4540{
4541 /* Calculate count of blocks used by this packet */
4542 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4543}
4544
6039aa73 4545static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4546{
d7a5a11d 4547 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
4548 /* ACL tx timeout must be longer than maximum
4549 * link supervision timeout (40.9 seconds) */
63d2bc1b 4550 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4551 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4552 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4553 }
63d2bc1b 4554}
1da177e4 4555
7fedd3bb
APS
4556/* Schedule SCO */
4557static void hci_sched_sco(struct hci_dev *hdev)
4558{
4559 struct hci_conn *conn;
4560 struct sk_buff *skb;
4561 int quote;
4562
4563 BT_DBG("%s", hdev->name);
4564
4565 if (!hci_conn_num(hdev, SCO_LINK))
4566 return;
4567
4568 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4569 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4570 BT_DBG("skb %p len %d", skb, skb->len);
4571 hci_send_frame(hdev, skb);
4572
4573 conn->sent++;
4574 if (conn->sent == ~0)
4575 conn->sent = 0;
4576 }
4577 }
4578}
4579
4580static void hci_sched_esco(struct hci_dev *hdev)
4581{
4582 struct hci_conn *conn;
4583 struct sk_buff *skb;
4584 int quote;
4585
4586 BT_DBG("%s", hdev->name);
4587
4588 if (!hci_conn_num(hdev, ESCO_LINK))
4589 return;
4590
4591 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4592 &quote))) {
4593 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4594 BT_DBG("skb %p len %d", skb, skb->len);
4595 hci_send_frame(hdev, skb);
4596
4597 conn->sent++;
4598 if (conn->sent == ~0)
4599 conn->sent = 0;
4600 }
4601 }
4602}
4603
6039aa73 4604static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4605{
4606 unsigned int cnt = hdev->acl_cnt;
4607 struct hci_chan *chan;
4608 struct sk_buff *skb;
4609 int quote;
4610
4611 __check_timeout(hdev, cnt);
04837f64 4612
73d80deb 4613 while (hdev->acl_cnt &&
a8c5fb1a 4614 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4615 u32 priority = (skb_peek(&chan->data_q))->priority;
4616 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4617 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4618 skb->len, skb->priority);
73d80deb 4619
ec1cce24
LAD
4620 /* Stop if priority has changed */
4621 if (skb->priority < priority)
4622 break;
4623
4624 skb = skb_dequeue(&chan->data_q);
4625
73d80deb 4626 hci_conn_enter_active_mode(chan->conn,
04124681 4627 bt_cb(skb)->force_active);
04837f64 4628
57d17d70 4629 hci_send_frame(hdev, skb);
1da177e4
LT
4630 hdev->acl_last_tx = jiffies;
4631
4632 hdev->acl_cnt--;
73d80deb
LAD
4633 chan->sent++;
4634 chan->conn->sent++;
7fedd3bb
APS
4635
4636 /* Send pending SCO packets right away */
4637 hci_sched_sco(hdev);
4638 hci_sched_esco(hdev);
1da177e4
LT
4639 }
4640 }
02b20f0b
LAD
4641
4642 if (cnt != hdev->acl_cnt)
4643 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4644}
4645
6039aa73 4646static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4647{
63d2bc1b 4648 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4649 struct hci_chan *chan;
4650 struct sk_buff *skb;
4651 int quote;
bd1eb66b 4652 u8 type;
b71d385a 4653
63d2bc1b 4654 __check_timeout(hdev, cnt);
b71d385a 4655
bd1eb66b
AE
4656 BT_DBG("%s", hdev->name);
4657
4658 if (hdev->dev_type == HCI_AMP)
4659 type = AMP_LINK;
4660 else
4661 type = ACL_LINK;
4662
b71d385a 4663 while (hdev->block_cnt > 0 &&
bd1eb66b 4664 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4665 u32 priority = (skb_peek(&chan->data_q))->priority;
4666 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4667 int blocks;
4668
4669 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4670 skb->len, skb->priority);
b71d385a
AE
4671
4672 /* Stop if priority has changed */
4673 if (skb->priority < priority)
4674 break;
4675
4676 skb = skb_dequeue(&chan->data_q);
4677
4678 blocks = __get_blocks(hdev, skb);
4679 if (blocks > hdev->block_cnt)
4680 return;
4681
4682 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4683 bt_cb(skb)->force_active);
b71d385a 4684
57d17d70 4685 hci_send_frame(hdev, skb);
b71d385a
AE
4686 hdev->acl_last_tx = jiffies;
4687
4688 hdev->block_cnt -= blocks;
4689 quote -= blocks;
4690
4691 chan->sent += blocks;
4692 chan->conn->sent += blocks;
4693 }
4694 }
4695
4696 if (cnt != hdev->block_cnt)
bd1eb66b 4697 hci_prio_recalculate(hdev, type);
b71d385a
AE
4698}
4699
6039aa73 4700static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4701{
4702 BT_DBG("%s", hdev->name);
4703
bd1eb66b 4704 /* No ACL link over BR/EDR controller */
ca8bee5d 4705 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
bd1eb66b
AE
4706 return;
4707
4708 /* No AMP link over AMP controller */
4709 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4710 return;
4711
4712 switch (hdev->flow_ctl_mode) {
4713 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4714 hci_sched_acl_pkt(hdev);
4715 break;
4716
4717 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4718 hci_sched_acl_blk(hdev);
4719 break;
4720 }
4721}
4722
6039aa73 4723static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4724{
73d80deb 4725 struct hci_chan *chan;
6ed58ec5 4726 struct sk_buff *skb;
02b20f0b 4727 int quote, cnt, tmp;
6ed58ec5
VT
4728
4729 BT_DBG("%s", hdev->name);
4730
52087a79
LAD
4731 if (!hci_conn_num(hdev, LE_LINK))
4732 return;
4733
6ed58ec5 4734 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1b1d29e5
LAD
4735
4736 __check_timeout(hdev, cnt);
4737
02b20f0b 4738 tmp = cnt;
73d80deb 4739 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4740 u32 priority = (skb_peek(&chan->data_q))->priority;
4741 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4742 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4743 skb->len, skb->priority);
6ed58ec5 4744
ec1cce24
LAD
4745 /* Stop if priority has changed */
4746 if (skb->priority < priority)
4747 break;
4748
4749 skb = skb_dequeue(&chan->data_q);
4750
57d17d70 4751 hci_send_frame(hdev, skb);
6ed58ec5
VT
4752 hdev->le_last_tx = jiffies;
4753
4754 cnt--;
73d80deb
LAD
4755 chan->sent++;
4756 chan->conn->sent++;
7fedd3bb
APS
4757
4758 /* Send pending SCO packets right away */
4759 hci_sched_sco(hdev);
4760 hci_sched_esco(hdev);
6ed58ec5
VT
4761 }
4762 }
73d80deb 4763
6ed58ec5
VT
4764 if (hdev->le_pkts)
4765 hdev->le_cnt = cnt;
4766 else
4767 hdev->acl_cnt = cnt;
02b20f0b
LAD
4768
4769 if (cnt != tmp)
4770 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4771}
4772
3eff45ea 4773static void hci_tx_work(struct work_struct *work)
1da177e4 4774{
3eff45ea 4775 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4776 struct sk_buff *skb;
4777
6ed58ec5 4778 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4779 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4780
d7a5a11d 4781 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e 4782 /* Schedule queues and send stuff to HCI driver */
52de599e
MH
4783 hci_sched_sco(hdev);
4784 hci_sched_esco(hdev);
7fedd3bb 4785 hci_sched_acl(hdev);
52de599e
MH
4786 hci_sched_le(hdev);
4787 }
6ed58ec5 4788
1da177e4
LT
4789 /* Send next queued raw (unknown type) packet */
4790 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4791 hci_send_frame(hdev, skb);
1da177e4
LT
4792}
4793
25985edc 4794/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4795
4796/* ACL data packet */
6039aa73 4797static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4798{
4799 struct hci_acl_hdr *hdr = (void *) skb->data;
4800 struct hci_conn *conn;
4801 __u16 handle, flags;
4802
4803 skb_pull(skb, HCI_ACL_HDR_SIZE);
4804
4805 handle = __le16_to_cpu(hdr->handle);
4806 flags = hci_flags(handle);
4807 handle = hci_handle(handle);
4808
f0e09510 4809 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4810 handle, flags);
1da177e4
LT
4811
4812 hdev->stat.acl_rx++;
4813
4814 hci_dev_lock(hdev);
4815 conn = hci_conn_hash_lookup_handle(hdev, handle);
4816 hci_dev_unlock(hdev);
8e87d142 4817
1da177e4 4818 if (conn) {
65983fc7 4819 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4820
1da177e4 4821 /* Send to upper protocol */
686ebf28
UF
4822 l2cap_recv_acldata(conn, skb, flags);
4823 return;
1da177e4 4824 } else {
2064ee33
MH
4825 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4826 handle);
1da177e4
LT
4827 }
4828
4829 kfree_skb(skb);
4830}
4831
4832/* SCO data packet */
6039aa73 4833static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4834{
4835 struct hci_sco_hdr *hdr = (void *) skb->data;
4836 struct hci_conn *conn;
debdedf2 4837 __u16 handle, flags;
1da177e4
LT
4838
4839 skb_pull(skb, HCI_SCO_HDR_SIZE);
4840
4841 handle = __le16_to_cpu(hdr->handle);
debdedf2
MH
4842 flags = hci_flags(handle);
4843 handle = hci_handle(handle);
1da177e4 4844
debdedf2
MH
4845 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4846 handle, flags);
1da177e4
LT
4847
4848 hdev->stat.sco_rx++;
4849
4850 hci_dev_lock(hdev);
4851 conn = hci_conn_hash_lookup_handle(hdev, handle);
4852 hci_dev_unlock(hdev);
4853
4854 if (conn) {
1da177e4 4855 /* Send to upper protocol */
00398e1d 4856 bt_cb(skb)->sco.pkt_status = flags & 0x03;
686ebf28
UF
4857 sco_recv_scodata(conn, skb);
4858 return;
1da177e4 4859 } else {
2064ee33
MH
4860 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4861 handle);
1da177e4
LT
4862 }
4863
4864 kfree_skb(skb);
4865}
4866
9238f36a
JH
4867static bool hci_req_is_complete(struct hci_dev *hdev)
4868{
4869 struct sk_buff *skb;
4870
4871 skb = skb_peek(&hdev->cmd_q);
4872 if (!skb)
4873 return true;
4874
44d27137 4875 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
9238f36a
JH
4876}
4877
42c6b129
JH
4878static void hci_resend_last(struct hci_dev *hdev)
4879{
4880 struct hci_command_hdr *sent;
4881 struct sk_buff *skb;
4882 u16 opcode;
4883
4884 if (!hdev->sent_cmd)
4885 return;
4886
4887 sent = (void *) hdev->sent_cmd->data;
4888 opcode = __le16_to_cpu(sent->opcode);
4889 if (opcode == HCI_OP_RESET)
4890 return;
4891
4892 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4893 if (!skb)
4894 return;
4895
4896 skb_queue_head(&hdev->cmd_q, skb);
4897 queue_work(hdev->workqueue, &hdev->cmd_work);
4898}
4899
e6214487
JH
4900void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4901 hci_req_complete_t *req_complete,
4902 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4903{
9238f36a
JH
4904 struct sk_buff *skb;
4905 unsigned long flags;
4906
4907 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4908
42c6b129
JH
4909 /* If the completed command doesn't match the last one that was
4910 * sent we need to do special handling of it.
9238f36a 4911 */
42c6b129
JH
4912 if (!hci_sent_cmd_data(hdev, opcode)) {
4913 /* Some CSR based controllers generate a spontaneous
4914 * reset complete event during init and any pending
4915 * command will never be completed. In such a case we
4916 * need to resend whatever was the last sent
4917 * command.
4918 */
4919 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4920 hci_resend_last(hdev);
4921
9238f36a 4922 return;
42c6b129 4923 }
9238f36a 4924
f80c5dad
JPRV
4925 /* If we reach this point this event matches the last command sent */
4926 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4927
9238f36a
JH
4928 /* If the command succeeded and there's still more commands in
4929 * this request the request is not yet complete.
4930 */
4931 if (!status && !hci_req_is_complete(hdev))
4932 return;
4933
4934 /* If this was the last command in a request the complete
4935 * callback would be found in hdev->sent_cmd instead of the
4936 * command queue (hdev->cmd_q).
4937 */
44d27137
JH
4938 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4939 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
e6214487
JH
4940 return;
4941 }
53e21fbc 4942
44d27137
JH
4943 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4944 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
e6214487 4945 return;
9238f36a
JH
4946 }
4947
4948 /* Remove all pending commands belonging to this request */
4949 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4950 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
44d27137 4951 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
9238f36a
JH
4952 __skb_queue_head(&hdev->cmd_q, skb);
4953 break;
4954 }
4955
3bd7594e
DA
4956 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4957 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4958 else
4959 *req_complete = bt_cb(skb)->hci.req_complete;
9238f36a
JH
4960 kfree_skb(skb);
4961 }
4962 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4963}
4964
b78752cc 4965static void hci_rx_work(struct work_struct *work)
1da177e4 4966{
b78752cc 4967 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4968 struct sk_buff *skb;
4969
4970 BT_DBG("%s", hdev->name);
4971
1da177e4 4972 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4973 /* Send copy to monitor */
4974 hci_send_to_monitor(hdev, skb);
4975
1da177e4
LT
4976 if (atomic_read(&hdev->promisc)) {
4977 /* Send copy to the sockets */
470fe1b5 4978 hci_send_to_sock(hdev, skb);
1da177e4
LT
4979 }
4980
eb8c101e
MK
4981 /* If the device has been opened in HCI_USER_CHANNEL,
4982 * the userspace has exclusive access to device.
4983 * When device is HCI_INIT, we still need to process
4984 * the data packets to the driver in order
4985 * to complete its setup().
4986 */
4987 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4988 !test_bit(HCI_INIT, &hdev->flags)) {
1da177e4
LT
4989 kfree_skb(skb);
4990 continue;
4991 }
4992
4993 if (test_bit(HCI_INIT, &hdev->flags)) {
4994 /* Don't process data packets in this states. */
d79f34e3 4995 switch (hci_skb_pkt_type(skb)) {
1da177e4
LT
4996 case HCI_ACLDATA_PKT:
4997 case HCI_SCODATA_PKT:
cc974003 4998 case HCI_ISODATA_PKT:
1da177e4
LT
4999 kfree_skb(skb);
5000 continue;
3ff50b79 5001 }
1da177e4
LT
5002 }
5003
5004 /* Process frame */
d79f34e3 5005 switch (hci_skb_pkt_type(skb)) {
1da177e4 5006 case HCI_EVENT_PKT:
b78752cc 5007 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5008 hci_event_packet(hdev, skb);
5009 break;
5010
5011 case HCI_ACLDATA_PKT:
5012 BT_DBG("%s ACL data packet", hdev->name);
5013 hci_acldata_packet(hdev, skb);
5014 break;
5015
5016 case HCI_SCODATA_PKT:
5017 BT_DBG("%s SCO data packet", hdev->name);
5018 hci_scodata_packet(hdev, skb);
5019 break;
5020
5021 default:
5022 kfree_skb(skb);
5023 break;
5024 }
5025 }
1da177e4
LT
5026}
5027
c347b765 5028static void hci_cmd_work(struct work_struct *work)
1da177e4 5029{
c347b765 5030 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5031 struct sk_buff *skb;
5032
2104786b
AE
5033 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5034 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5035
1da177e4 5036 /* Send queued commands */
5a08ecce
AE
5037 if (atomic_read(&hdev->cmd_cnt)) {
5038 skb = skb_dequeue(&hdev->cmd_q);
5039 if (!skb)
5040 return;
5041
7585b97a 5042 kfree_skb(hdev->sent_cmd);
1da177e4 5043
a675d7f1 5044 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5045 if (hdev->sent_cmd) {
f80c5dad
JPRV
5046 if (hci_req_status_pend(hdev))
5047 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
1da177e4 5048 atomic_dec(&hdev->cmd_cnt);
57d17d70 5049 hci_send_frame(hdev, skb);
7bdb8a5c 5050 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5051 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5052 else
65cc2b49
MH
5053 schedule_delayed_work(&hdev->cmd_timer,
5054 HCI_CMD_TIMEOUT);
1da177e4
LT
5055 } else {
5056 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5057 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5058 }
5059 }
5060}