Bluetooth: Fix default values for advertising interval
[linux-2.6-block.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
8c520a59 29#include <linux/rfkill.h>
baf27f6e 30#include <linux/debugfs.h>
99780a7b 31#include <linux/crypto.h>
7a0e5b15 32#include <linux/property.h>
9952d90e
APS
33#include <linux/suspend.h>
34#include <linux/wait.h>
47219839 35#include <asm/unaligned.h>
1da177e4
LT
36
37#include <net/bluetooth/bluetooth.h>
38#include <net/bluetooth/hci_core.h>
4bc58f51 39#include <net/bluetooth/l2cap.h>
af58925c 40#include <net/bluetooth/mgmt.h>
1da177e4 41
0857dd3b 42#include "hci_request.h"
60c5f5fb 43#include "hci_debugfs.h"
970c4e46 44#include "smp.h"
6d5d2ee6 45#include "leds.h"
145373cb 46#include "msft.h"
f67743f9 47#include "aosp.h"
970c4e46 48
b78752cc 49static void hci_rx_work(struct work_struct *work);
c347b765 50static void hci_cmd_work(struct work_struct *work);
3eff45ea 51static void hci_tx_work(struct work_struct *work);
1da177e4 52
1da177e4
LT
53/* HCI device list */
54LIST_HEAD(hci_dev_list);
55DEFINE_RWLOCK(hci_dev_list_lock);
56
57/* HCI callback list */
58LIST_HEAD(hci_cb_list);
fba7ecf0 59DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 60
3df92b31
SL
61/* HCI ID Numbering */
62static DEFINE_IDA(hci_index_ida);
63
baf27f6e
MH
64/* ---- HCI debugfs entries ---- */
65
4b4148e9
MH
66static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
74b93e9f 72 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
4b4148e9
MH
73 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
4b4148e9 83 bool enable;
3bf5e97d 84 int err;
4b4148e9
MH
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
3bf5e97d
AS
89 err = kstrtobool_from_user(user_buf, count, &enable);
90 if (err)
91 return err;
4b4148e9 92
b7cb93e5 93 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
94 return -EALREADY;
95
b504430c 96 hci_req_sync_lock(hdev);
4b4148e9
MH
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
b504430c 103 hci_req_sync_unlock(hdev);
4b4148e9
MH
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
4b4148e9
MH
108 kfree_skb(skb);
109
b7cb93e5 110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
111
112 return count;
113}
114
115static const struct file_operations dut_mode_fops = {
116 .open = simple_open,
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
120};
121
4b4113d6
MH
122static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
124{
125 struct hci_dev *hdev = file->private_data;
126 char buf[3];
127
74b93e9f 128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
4b4113d6
MH
129 buf[1] = '\n';
130 buf[2] = '\0';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132}
133
134static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
136{
137 struct hci_dev *hdev = file->private_data;
4b4113d6
MH
138 bool enable;
139 int err;
140
3bf5e97d
AS
141 err = kstrtobool_from_user(user_buf, count, &enable);
142 if (err)
143 return err;
4b4113d6 144
7e995b9e 145 /* When the diagnostic flags are not persistent and the transport
b56c7b25
MH
146 * is not active or in user channel operation, then there is no need
147 * for the vendor callback. Instead just store the desired value and
148 * the setting will be programmed when the controller gets powered on.
7e995b9e
MH
149 */
150 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
b56c7b25
MH
151 (!test_bit(HCI_RUNNING, &hdev->flags) ||
152 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
7e995b9e
MH
153 goto done;
154
b504430c 155 hci_req_sync_lock(hdev);
4b4113d6 156 err = hdev->set_diag(hdev, enable);
b504430c 157 hci_req_sync_unlock(hdev);
4b4113d6
MH
158
159 if (err < 0)
160 return err;
161
7e995b9e 162done:
4b4113d6
MH
163 if (enable)
164 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
165 else
166 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
167
168 return count;
169}
170
171static const struct file_operations vendor_diag_fops = {
172 .open = simple_open,
173 .read = vendor_diag_read,
174 .write = vendor_diag_write,
175 .llseek = default_llseek,
176};
177
f640ee98
MH
178static void hci_debugfs_create_basic(struct hci_dev *hdev)
179{
180 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
181 &dut_mode_fops);
182
183 if (hdev->set_diag)
184 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
185 &vendor_diag_fops);
186}
187
a1d01db1 188static int hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 189{
42c6b129 190 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
191
192 /* Reset device */
42c6b129
JH
193 set_bit(HCI_RESET, &req->hdev->flags);
194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
a1d01db1 195 return 0;
1da177e4
LT
196}
197
42c6b129 198static void bredr_init(struct hci_request *req)
1da177e4 199{
42c6b129 200 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 201
1da177e4 202 /* Read Local Supported Features */
42c6b129 203 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 204
1143e5a6 205 /* Read Local Version */
42c6b129 206 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
207
208 /* Read BD Address */
42c6b129 209 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
210}
211
0af801b9 212static void amp_init1(struct hci_request *req)
e61ef499 213{
42c6b129 214 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 215
e61ef499 216 /* Read Local Version */
42c6b129 217 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 218
f6996cfe
MH
219 /* Read Local Supported Commands */
220 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
221
6bcbc489 222 /* Read Local AMP Info */
42c6b129 223 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
224
225 /* Read Data Blk size */
42c6b129 226 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 227
f38ba941
MH
228 /* Read Flow Control Mode */
229 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
230
7528ca1c
MH
231 /* Read Location Data */
232 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
233}
234
a1d01db1 235static int amp_init2(struct hci_request *req)
0af801b9
JH
236{
237 /* Read Local Supported Features. Not all AMP controllers
238 * support this so it's placed conditionally in the second
239 * stage init.
240 */
241 if (req->hdev->commands[14] & 0x20)
242 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
a1d01db1
JH
243
244 return 0;
0af801b9
JH
245}
246
a1d01db1 247static int hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 248{
42c6b129 249 struct hci_dev *hdev = req->hdev;
e61ef499
AE
250
251 BT_DBG("%s %ld", hdev->name, opt);
252
11778716
AE
253 /* Reset */
254 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 255 hci_reset_req(req, 0);
11778716 256
e61ef499 257 switch (hdev->dev_type) {
ca8bee5d 258 case HCI_PRIMARY:
42c6b129 259 bredr_init(req);
e61ef499 260 break;
e61ef499 261 case HCI_AMP:
0af801b9 262 amp_init1(req);
e61ef499 263 break;
e61ef499 264 default:
2064ee33 265 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
e61ef499
AE
266 break;
267 }
a1d01db1
JH
268
269 return 0;
e61ef499
AE
270}
271
42c6b129 272static void bredr_setup(struct hci_request *req)
2177bab5 273{
2177bab5
JH
274 __le16 param;
275 __u8 flt_type;
276
277 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 278 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
279
280 /* Read Class of Device */
42c6b129 281 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
282
283 /* Read Local Name */
42c6b129 284 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
285
286 /* Read Voice Setting */
42c6b129 287 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 288
b4cb9fb2
MH
289 /* Read Number of Supported IAC */
290 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
291
4b836f39
MH
292 /* Read Current IAC LAP */
293 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
294
2177bab5
JH
295 /* Clear Event Filters */
296 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 297 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
298
299 /* Connection accept timeout ~20 secs */
dcf4adbf 300 param = cpu_to_le16(0x7d00);
42c6b129 301 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
302}
303
42c6b129 304static void le_setup(struct hci_request *req)
2177bab5 305{
c73eee91
JH
306 struct hci_dev *hdev = req->hdev;
307
2177bab5 308 /* Read LE Buffer Size */
42c6b129 309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
310
311 /* Read LE Local Supported Features */
42c6b129 312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 313
747d3f03
MH
314 /* Read LE Supported States */
315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
316
c73eee91
JH
317 /* LE-only controllers have LE implicitly enabled */
318 if (!lmp_bredr_capable(hdev))
a1536da2 319 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
320}
321
42c6b129 322static void hci_setup_event_mask(struct hci_request *req)
2177bab5 323{
42c6b129
JH
324 struct hci_dev *hdev = req->hdev;
325
2177bab5
JH
326 /* The second byte is 0xff instead of 0x9f (two reserved bits
327 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
328 * command otherwise.
329 */
330 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
331
332 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
333 * any event mask for pre 1.2 devices.
334 */
335 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
336 return;
337
338 if (lmp_bredr_capable(hdev)) {
339 events[4] |= 0x01; /* Flow Specification Complete */
c7882cbd
MH
340 } else {
341 /* Use a different default for LE-only devices */
342 memset(events, 0, sizeof(events));
c7882cbd
MH
343 events[1] |= 0x20; /* Command Complete */
344 events[1] |= 0x40; /* Command Status */
345 events[1] |= 0x80; /* Hardware Error */
5c3d3b4c
MH
346
347 /* If the controller supports the Disconnect command, enable
348 * the corresponding event. In addition enable packet flow
349 * control related events.
350 */
351 if (hdev->commands[0] & 0x20) {
352 events[0] |= 0x10; /* Disconnection Complete */
353 events[2] |= 0x04; /* Number of Completed Packets */
354 events[3] |= 0x02; /* Data Buffer Overflow */
355 }
356
357 /* If the controller supports the Read Remote Version
358 * Information command, enable the corresponding event.
359 */
360 if (hdev->commands[2] & 0x80)
361 events[1] |= 0x08; /* Read Remote Version Information
362 * Complete
363 */
0da71f1b
MH
364
365 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366 events[0] |= 0x80; /* Encryption Change */
367 events[5] |= 0x80; /* Encryption Key Refresh Complete */
368 }
2177bab5
JH
369 }
370
9fe759ce
MH
371 if (lmp_inq_rssi_capable(hdev) ||
372 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
2177bab5
JH
373 events[4] |= 0x02; /* Inquiry Result with RSSI */
374
70f56aa2
MH
375 if (lmp_ext_feat_capable(hdev))
376 events[4] |= 0x04; /* Read Remote Extended Features Complete */
377
378 if (lmp_esco_capable(hdev)) {
379 events[5] |= 0x08; /* Synchronous Connection Complete */
380 events[5] |= 0x10; /* Synchronous Connection Changed */
381 }
382
2177bab5
JH
383 if (lmp_sniffsubr_capable(hdev))
384 events[5] |= 0x20; /* Sniff Subrating */
385
386 if (lmp_pause_enc_capable(hdev))
387 events[5] |= 0x80; /* Encryption Key Refresh Complete */
388
389 if (lmp_ext_inq_capable(hdev))
390 events[5] |= 0x40; /* Extended Inquiry Result */
391
392 if (lmp_no_flush_capable(hdev))
393 events[7] |= 0x01; /* Enhanced Flush Complete */
394
395 if (lmp_lsto_capable(hdev))
396 events[6] |= 0x80; /* Link Supervision Timeout Changed */
397
398 if (lmp_ssp_capable(hdev)) {
399 events[6] |= 0x01; /* IO Capability Request */
400 events[6] |= 0x02; /* IO Capability Response */
401 events[6] |= 0x04; /* User Confirmation Request */
402 events[6] |= 0x08; /* User Passkey Request */
403 events[6] |= 0x10; /* Remote OOB Data Request */
404 events[6] |= 0x20; /* Simple Pairing Complete */
405 events[7] |= 0x04; /* User Passkey Notification */
406 events[7] |= 0x08; /* Keypress Notification */
407 events[7] |= 0x10; /* Remote Host Supported
408 * Features Notification
409 */
410 }
411
412 if (lmp_le_capable(hdev))
413 events[7] |= 0x20; /* LE Meta-Event */
414
42c6b129 415 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
416}
417
a1d01db1 418static int hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 419{
42c6b129
JH
420 struct hci_dev *hdev = req->hdev;
421
0af801b9
JH
422 if (hdev->dev_type == HCI_AMP)
423 return amp_init2(req);
424
2177bab5 425 if (lmp_bredr_capable(hdev))
42c6b129 426 bredr_setup(req);
56f87901 427 else
a358dc11 428 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
429
430 if (lmp_le_capable(hdev))
42c6b129 431 le_setup(req);
2177bab5 432
0f3adeae
MH
433 /* All Bluetooth 1.2 and later controllers should support the
434 * HCI command for reading the local supported commands.
435 *
436 * Unfortunately some controllers indicate Bluetooth 1.2 support,
437 * but do not have support for this command. If that is the case,
438 * the driver can quirk the behavior and skip reading the local
439 * supported commands.
3f8e2d75 440 */
0f3adeae
MH
441 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 443 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
444
445 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
446 /* When SSP is available, then the host features page
447 * should also be available as well. However some
448 * controllers list the max_page as 0 as long as SSP
449 * has not been enabled. To achieve proper debugging
450 * output, force the minimum max_page to 1 at least.
451 */
452 hdev->max_page = 0x01;
453
d7a5a11d 454 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 455 u8 mode = 0x01;
574ea3c7 456
42c6b129
JH
457 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458 sizeof(mode), &mode);
2177bab5
JH
459 } else {
460 struct hci_cp_write_eir cp;
461
462 memset(hdev->eir, 0, sizeof(hdev->eir));
463 memset(&cp, 0, sizeof(cp));
464
42c6b129 465 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
466 }
467 }
468
043ec9bf
MH
469 if (lmp_inq_rssi_capable(hdev) ||
470 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
471 u8 mode;
472
473 /* If Extended Inquiry Result events are supported, then
474 * they are clearly preferred over Inquiry Result with RSSI
475 * events.
476 */
477 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
478
479 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
480 }
2177bab5
JH
481
482 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 483 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
484
485 if (lmp_ext_feat_capable(hdev)) {
486 struct hci_cp_read_local_ext_features cp;
487
488 cp.page = 0x01;
42c6b129
JH
489 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
490 sizeof(cp), &cp);
2177bab5
JH
491 }
492
d7a5a11d 493 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 494 u8 enable = 1;
42c6b129
JH
495 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
496 &enable);
2177bab5 497 }
a1d01db1
JH
498
499 return 0;
2177bab5
JH
500}
501
42c6b129 502static void hci_setup_link_policy(struct hci_request *req)
2177bab5 503{
42c6b129 504 struct hci_dev *hdev = req->hdev;
2177bab5
JH
505 struct hci_cp_write_def_link_policy cp;
506 u16 link_policy = 0;
507
508 if (lmp_rswitch_capable(hdev))
509 link_policy |= HCI_LP_RSWITCH;
510 if (lmp_hold_capable(hdev))
511 link_policy |= HCI_LP_HOLD;
512 if (lmp_sniff_capable(hdev))
513 link_policy |= HCI_LP_SNIFF;
514 if (lmp_park_capable(hdev))
515 link_policy |= HCI_LP_PARK;
516
517 cp.policy = cpu_to_le16(link_policy);
42c6b129 518 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
519}
520
42c6b129 521static void hci_set_le_support(struct hci_request *req)
2177bab5 522{
42c6b129 523 struct hci_dev *hdev = req->hdev;
2177bab5
JH
524 struct hci_cp_write_le_host_supported cp;
525
c73eee91
JH
526 /* LE-only devices do not support explicit enablement */
527 if (!lmp_bredr_capable(hdev))
528 return;
529
2177bab5
JH
530 memset(&cp, 0, sizeof(cp));
531
d7a5a11d 532 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 533 cp.le = 0x01;
32226e4f 534 cp.simul = 0x00;
2177bab5
JH
535 }
536
537 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
538 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
539 &cp);
2177bab5
JH
540}
541
d62e6d67
JH
542static void hci_set_event_mask_page_2(struct hci_request *req)
543{
544 struct hci_dev *hdev = req->hdev;
545 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
313f6888 546 bool changed = false;
d62e6d67
JH
547
548 /* If Connectionless Slave Broadcast master role is supported
549 * enable all necessary events for it.
550 */
53b834d2 551 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
552 events[1] |= 0x40; /* Triggered Clock Capture */
553 events[1] |= 0x80; /* Synchronization Train Complete */
554 events[2] |= 0x10; /* Slave Page Response Timeout */
555 events[2] |= 0x20; /* CSB Channel Map Change */
313f6888 556 changed = true;
d62e6d67
JH
557 }
558
559 /* If Connectionless Slave Broadcast slave role is supported
560 * enable all necessary events for it.
561 */
53b834d2 562 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
563 events[2] |= 0x01; /* Synchronization Train Received */
564 events[2] |= 0x02; /* CSB Receive */
565 events[2] |= 0x04; /* CSB Timeout */
566 events[2] |= 0x08; /* Truncated Page Complete */
313f6888 567 changed = true;
d62e6d67
JH
568 }
569
40c59fcb 570 /* Enable Authenticated Payload Timeout Expired event if supported */
313f6888 571 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
40c59fcb 572 events[2] |= 0x80;
313f6888
MH
573 changed = true;
574 }
40c59fcb 575
313f6888
MH
576 /* Some Broadcom based controllers indicate support for Set Event
577 * Mask Page 2 command, but then actually do not support it. Since
578 * the default value is all bits set to zero, the command is only
579 * required if the event mask has to be changed. In case no change
580 * to the event mask is needed, skip this command.
581 */
582 if (changed)
583 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584 sizeof(events), events);
d62e6d67
JH
585}
586
a1d01db1 587static int hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 588{
42c6b129 589 struct hci_dev *hdev = req->hdev;
d2c5d77f 590 u8 p;
42c6b129 591
0da71f1b
MH
592 hci_setup_event_mask(req);
593
e81be90b
JH
594 if (hdev->commands[6] & 0x20 &&
595 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
596 struct hci_cp_read_stored_link_key cp;
597
598 bacpy(&cp.bdaddr, BDADDR_ANY);
599 cp.read_all = 0x01;
600 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
601 }
602
2177bab5 603 if (hdev->commands[5] & 0x10)
42c6b129 604 hci_setup_link_policy(req);
2177bab5 605
417287de
MH
606 if (hdev->commands[8] & 0x01)
607 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
608
cde1a8a9
IFM
609 if (hdev->commands[18] & 0x04 &&
610 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
00bce3fb
AM
611 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
612
417287de
MH
613 /* Some older Broadcom based Bluetooth 1.2 controllers do not
614 * support the Read Page Scan Type command. Check support for
615 * this command in the bit mask of supported commands.
616 */
617 if (hdev->commands[13] & 0x01)
618 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
619
9193c6e8
AG
620 if (lmp_le_capable(hdev)) {
621 u8 events[8];
622
623 memset(events, 0, sizeof(events));
4d6c705b
MH
624
625 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
626 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
627
628 /* If controller supports the Connection Parameters Request
629 * Link Layer Procedure, enable the corresponding event.
630 */
631 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
632 events[0] |= 0x20; /* LE Remote Connection
633 * Parameter Request
634 */
635
a9f6068e
MH
636 /* If the controller supports the Data Length Extension
637 * feature, enable the corresponding event.
638 */
639 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
640 events[0] |= 0x40; /* LE Data Length Change */
641
ff3b8df2
MH
642 /* If the controller supports LL Privacy feature, enable
643 * the corresponding event.
644 */
645 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
646 events[1] |= 0x02; /* LE Enhanced Connection
647 * Complete
648 */
649
4b71bba4
MH
650 /* If the controller supports Extended Scanner Filter
651 * Policies, enable the correspondig event.
652 */
653 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
654 events[1] |= 0x04; /* LE Direct Advertising
655 * Report
656 */
657
9756d33b
MH
658 /* If the controller supports Channel Selection Algorithm #2
659 * feature, enable the corresponding event.
660 */
661 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
662 events[2] |= 0x08; /* LE Channel Selection
663 * Algorithm
664 */
665
7d26f5c4
MH
666 /* If the controller supports the LE Set Scan Enable command,
667 * enable the corresponding advertising report event.
668 */
669 if (hdev->commands[26] & 0x08)
670 events[0] |= 0x02; /* LE Advertising Report */
671
672 /* If the controller supports the LE Create Connection
673 * command, enable the corresponding event.
674 */
675 if (hdev->commands[26] & 0x10)
676 events[0] |= 0x01; /* LE Connection Complete */
677
678 /* If the controller supports the LE Connection Update
679 * command, enable the corresponding event.
680 */
681 if (hdev->commands[27] & 0x04)
682 events[0] |= 0x04; /* LE Connection Update
683 * Complete
684 */
685
686 /* If the controller supports the LE Read Remote Used Features
687 * command, enable the corresponding event.
688 */
689 if (hdev->commands[27] & 0x20)
690 events[0] |= 0x08; /* LE Read Remote Used
691 * Features Complete
692 */
693
5a34bd5f
MH
694 /* If the controller supports the LE Read Local P-256
695 * Public Key command, enable the corresponding event.
696 */
697 if (hdev->commands[34] & 0x02)
698 events[0] |= 0x80; /* LE Read Local P-256
699 * Public Key Complete
700 */
701
702 /* If the controller supports the LE Generate DHKey
703 * command, enable the corresponding event.
704 */
705 if (hdev->commands[34] & 0x04)
706 events[1] |= 0x01; /* LE Generate DHKey Complete */
707
27bbca44
MH
708 /* If the controller supports the LE Set Default PHY or
709 * LE Set PHY commands, enable the corresponding event.
710 */
711 if (hdev->commands[35] & (0x20 | 0x40))
712 events[1] |= 0x08; /* LE PHY Update Complete */
713
c215e939
JK
714 /* If the controller supports LE Set Extended Scan Parameters
715 * and LE Set Extended Scan Enable commands, enable the
716 * corresponding event.
717 */
718 if (use_ext_scan(hdev))
719 events[1] |= 0x10; /* LE Extended Advertising
720 * Report
721 */
722
acf0aeae
JK
723 /* If the controller supports the LE Extended Advertising
724 * command, enable the corresponding event.
725 */
726 if (ext_adv_capable(hdev))
727 events[2] |= 0x02; /* LE Advertising Set
728 * Terminated
729 */
730
9193c6e8
AG
731 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
732 events);
733
6b49bcb4
JK
734 /* Read LE Advertising Channel TX Power */
735 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
736 /* HCI TS spec forbids mixing of legacy and extended
737 * advertising commands wherein READ_ADV_TX_POWER is
738 * also included. So do not call it if extended adv
739 * is supported otherwise controller will return
740 * COMMAND_DISALLOWED for extended commands.
741 */
15a49cca
MH
742 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
743 }
744
7c395ea5
DW
745 if (hdev->commands[38] & 0x80) {
746 /* Read LE Min/Max Tx Power*/
747 hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
748 0, NULL);
749 }
750
2ab216a7
MH
751 if (hdev->commands[26] & 0x40) {
752 /* Read LE White List Size */
753 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
754 0, NULL);
755 }
756
757 if (hdev->commands[26] & 0x80) {
758 /* Clear LE White List */
759 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
760 }
761
cfdb0c2d
AN
762 if (hdev->commands[34] & 0x40) {
763 /* Read LE Resolving List Size */
764 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
765 0, NULL);
766 }
767
545f2596
AN
768 if (hdev->commands[34] & 0x20) {
769 /* Clear LE Resolving List */
770 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
771 }
772
a31489d2 773 if (hdev->commands[35] & 0x04) {
b2cc2339
SN
774 __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
775
776 /* Set RPA timeout */
777 hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
778 &rpa_timeout);
779 }
780
a9f6068e
MH
781 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
782 /* Read LE Maximum Data Length */
783 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
784
785 /* Read LE Suggested Default Data Length */
786 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
787 }
788
6b49bcb4
JK
789 if (ext_adv_capable(hdev)) {
790 /* Read LE Number of Supported Advertising Sets */
791 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
792 0, NULL);
793 }
794
42c6b129 795 hci_set_le_support(req);
9193c6e8 796 }
d2c5d77f
JH
797
798 /* Read features beyond page 1 if available */
799 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
800 struct hci_cp_read_local_ext_features cp;
801
802 cp.page = p;
803 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
804 sizeof(cp), &cp);
805 }
a1d01db1
JH
806
807 return 0;
2177bab5
JH
808}
809
a1d01db1 810static int hci_init4_req(struct hci_request *req, unsigned long opt)
5d4e7e8d
JH
811{
812 struct hci_dev *hdev = req->hdev;
813
36f260ce
MH
814 /* Some Broadcom based Bluetooth controllers do not support the
815 * Delete Stored Link Key command. They are clearly indicating its
816 * absence in the bit mask of supported commands.
817 *
bb6d6895 818 * Check the supported commands and only if the command is marked
36f260ce
MH
819 * as supported send it. If not supported assume that the controller
820 * does not have actual support for stored link keys which makes this
821 * command redundant anyway.
822 *
823 * Some controllers indicate that they support handling deleting
824 * stored link keys, but they don't. The quirk lets a driver
825 * just disable this command.
826 */
827 if (hdev->commands[6] & 0x80 &&
828 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
829 struct hci_cp_delete_stored_link_key cp;
830
831 bacpy(&cp.bdaddr, BDADDR_ANY);
832 cp.delete_all = 0x01;
833 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
834 sizeof(cp), &cp);
835 }
836
d62e6d67
JH
837 /* Set event mask page 2 if the HCI command for it is supported */
838 if (hdev->commands[22] & 0x04)
839 hci_set_event_mask_page_2(req);
840
109e3191
MH
841 /* Read local codec list if the HCI command is supported */
842 if (hdev->commands[29] & 0x20)
843 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
844
a4790360
MH
845 /* Read local pairing options if the HCI command is supported */
846 if (hdev->commands[41] & 0x08)
847 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
848
f4fe73ed
MH
849 /* Get MWS transport configuration if the HCI command is supported */
850 if (hdev->commands[30] & 0x08)
851 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
852
5d4e7e8d 853 /* Check for Synchronization Train support */
53b834d2 854 if (lmp_sync_train_capable(hdev))
5d4e7e8d 855 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
856
857 /* Enable Secure Connections if supported and configured */
d7a5a11d 858 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 859 bredr_sc_enabled(hdev)) {
a6d0d690 860 u8 support = 0x01;
574ea3c7 861
a6d0d690
MH
862 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
863 sizeof(support), &support);
864 }
a1d01db1 865
00bce3fb
AM
866 /* Set erroneous data reporting if supported to the wideband speech
867 * setting value
868 */
cde1a8a9
IFM
869 if (hdev->commands[18] & 0x08 &&
870 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
00bce3fb
AM
871 bool enabled = hci_dev_test_flag(hdev,
872 HCI_WIDEBAND_SPEECH_ENABLED);
873
874 if (enabled !=
875 (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
876 struct hci_cp_write_def_err_data_reporting cp;
877
878 cp.err_data_reporting = enabled ?
879 ERR_DATA_REPORTING_ENABLED :
880 ERR_DATA_REPORTING_DISABLED;
881
882 hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
883 sizeof(cp), &cp);
884 }
885 }
886
12204875
MH
887 /* Set Suggested Default Data Length to maximum if supported */
888 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
889 struct hci_cp_le_write_def_data_len cp;
890
727ea61a
BDC
891 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
892 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
12204875
MH
893 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
894 }
895
de2ba303
MH
896 /* Set Default PHY parameters if command is supported */
897 if (hdev->commands[35] & 0x20) {
898 struct hci_cp_le_set_default_phy cp;
899
6decb5b4
JK
900 cp.all_phys = 0x00;
901 cp.tx_phys = hdev->le_tx_def_phys;
902 cp.rx_phys = hdev->le_rx_def_phys;
de2ba303
MH
903
904 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
905 }
906
a1d01db1 907 return 0;
5d4e7e8d
JH
908}
909
2177bab5
JH
910static int __hci_init(struct hci_dev *hdev)
911{
912 int err;
913
4ebeee2d 914 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
2177bab5
JH
915 if (err < 0)
916 return err;
917
f640ee98
MH
918 if (hci_dev_test_flag(hdev, HCI_SETUP))
919 hci_debugfs_create_basic(hdev);
4b4148e9 920
4ebeee2d 921 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
0af801b9
JH
922 if (err < 0)
923 return err;
924
ca8bee5d 925 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
2177bab5 926 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 927 * first two stages of init.
2177bab5 928 */
ca8bee5d 929 if (hdev->dev_type != HCI_PRIMARY)
2177bab5
JH
930 return 0;
931
4ebeee2d 932 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
5d4e7e8d
JH
933 if (err < 0)
934 return err;
935
4ebeee2d 936 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
baf27f6e
MH
937 if (err < 0)
938 return err;
939
ec6cef9c
MH
940 /* This function is only called when the controller is actually in
941 * configured state. When the controller is marked as unconfigured,
942 * this initialization procedure is not run.
943 *
944 * It means that it is possible that a controller runs through its
945 * setup phase and then discovers missing settings. If that is the
946 * case, then this function will not be called. It then will only
947 * be called during the config phase.
948 *
949 * So only when in setup phase or config phase, create the debugfs
950 * entries and register the SMP channels.
baf27f6e 951 */
d7a5a11d
MH
952 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
953 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
954 return 0;
955
60c5f5fb
MH
956 hci_debugfs_create_common(hdev);
957
71c3b60e 958 if (lmp_bredr_capable(hdev))
60c5f5fb 959 hci_debugfs_create_bredr(hdev);
2bfa3531 960
162a3bac 961 if (lmp_le_capable(hdev))
60c5f5fb 962 hci_debugfs_create_le(hdev);
e7b8fc92 963
baf27f6e 964 return 0;
2177bab5
JH
965}
966
a1d01db1 967static int hci_init0_req(struct hci_request *req, unsigned long opt)
0ebca7d6
MH
968{
969 struct hci_dev *hdev = req->hdev;
970
971 BT_DBG("%s %ld", hdev->name, opt);
972
973 /* Reset */
974 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
975 hci_reset_req(req, 0);
976
977 /* Read Local Version */
978 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
979
980 /* Read BD Address */
981 if (hdev->set_bdaddr)
982 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
a1d01db1
JH
983
984 return 0;
0ebca7d6
MH
985}
986
987static int __hci_unconf_init(struct hci_dev *hdev)
988{
989 int err;
990
cc78b44b
MH
991 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
992 return 0;
993
4ebeee2d 994 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
0ebca7d6
MH
995 if (err < 0)
996 return err;
997
f640ee98
MH
998 if (hci_dev_test_flag(hdev, HCI_SETUP))
999 hci_debugfs_create_basic(hdev);
1000
0ebca7d6
MH
1001 return 0;
1002}
1003
a1d01db1 1004static int hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1005{
1006 __u8 scan = opt;
1007
42c6b129 1008 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1009
1010 /* Inquiry and Page scans */
42c6b129 1011 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
a1d01db1 1012 return 0;
1da177e4
LT
1013}
1014
a1d01db1 1015static int hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1016{
1017 __u8 auth = opt;
1018
42c6b129 1019 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1020
1021 /* Authentication */
42c6b129 1022 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
a1d01db1 1023 return 0;
1da177e4
LT
1024}
1025
a1d01db1 1026static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1027{
1028 __u8 encrypt = opt;
1029
42c6b129 1030 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1031
e4e8e37c 1032 /* Encryption */
42c6b129 1033 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
a1d01db1 1034 return 0;
1da177e4
LT
1035}
1036
a1d01db1 1037static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1038{
1039 __le16 policy = cpu_to_le16(opt);
1040
42c6b129 1041 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1042
1043 /* Default link policy */
42c6b129 1044 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
a1d01db1 1045 return 0;
e4e8e37c
MH
1046}
1047
8e87d142 1048/* Get HCI device by index.
1da177e4
LT
1049 * Device is held on return. */
1050struct hci_dev *hci_dev_get(int index)
1051{
8035ded4 1052 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1053
1054 BT_DBG("%d", index);
1055
1056 if (index < 0)
1057 return NULL;
1058
1059 read_lock(&hci_dev_list_lock);
8035ded4 1060 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1061 if (d->id == index) {
1062 hdev = hci_dev_hold(d);
1063 break;
1064 }
1065 }
1066 read_unlock(&hci_dev_list_lock);
1067 return hdev;
1068}
1da177e4
LT
1069
1070/* ---- Inquiry support ---- */
ff9ef578 1071
30dc78e1
JH
1072bool hci_discovery_active(struct hci_dev *hdev)
1073{
1074 struct discovery_state *discov = &hdev->discovery;
1075
6fbe195d 1076 switch (discov->state) {
343f935b 1077 case DISCOVERY_FINDING:
6fbe195d 1078 case DISCOVERY_RESOLVING:
30dc78e1
JH
1079 return true;
1080
6fbe195d
AG
1081 default:
1082 return false;
1083 }
30dc78e1
JH
1084}
1085
ff9ef578
JH
1086void hci_discovery_set_state(struct hci_dev *hdev, int state)
1087{
bb3e0a33
JH
1088 int old_state = hdev->discovery.state;
1089
ff9ef578
JH
1090 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1091
bb3e0a33 1092 if (old_state == state)
ff9ef578
JH
1093 return;
1094
bb3e0a33
JH
1095 hdev->discovery.state = state;
1096
ff9ef578
JH
1097 switch (state) {
1098 case DISCOVERY_STOPPED:
c54c3860
AG
1099 hci_update_background_scan(hdev);
1100
bb3e0a33 1101 if (old_state != DISCOVERY_STARTING)
7b99b659 1102 mgmt_discovering(hdev, 0);
ff9ef578
JH
1103 break;
1104 case DISCOVERY_STARTING:
1105 break;
343f935b 1106 case DISCOVERY_FINDING:
ff9ef578
JH
1107 mgmt_discovering(hdev, 1);
1108 break;
30dc78e1
JH
1109 case DISCOVERY_RESOLVING:
1110 break;
ff9ef578
JH
1111 case DISCOVERY_STOPPING:
1112 break;
1113 }
ff9ef578
JH
1114}
1115
1f9b9a5d 1116void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1117{
30883512 1118 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1119 struct inquiry_entry *p, *n;
1da177e4 1120
561aafbc
JH
1121 list_for_each_entry_safe(p, n, &cache->all, all) {
1122 list_del(&p->all);
b57c1a56 1123 kfree(p);
1da177e4 1124 }
561aafbc
JH
1125
1126 INIT_LIST_HEAD(&cache->unknown);
1127 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1128}
1129
a8c5fb1a
GP
1130struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1131 bdaddr_t *bdaddr)
1da177e4 1132{
30883512 1133 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1134 struct inquiry_entry *e;
1135
6ed93dc6 1136 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1137
561aafbc
JH
1138 list_for_each_entry(e, &cache->all, all) {
1139 if (!bacmp(&e->data.bdaddr, bdaddr))
1140 return e;
1141 }
1142
1143 return NULL;
1144}
1145
1146struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1147 bdaddr_t *bdaddr)
561aafbc 1148{
30883512 1149 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1150 struct inquiry_entry *e;
1151
6ed93dc6 1152 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1153
1154 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1155 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1156 return e;
1157 }
1158
1159 return NULL;
1da177e4
LT
1160}
1161
30dc78e1 1162struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1163 bdaddr_t *bdaddr,
1164 int state)
30dc78e1
JH
1165{
1166 struct discovery_state *cache = &hdev->discovery;
1167 struct inquiry_entry *e;
1168
6ed93dc6 1169 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1170
1171 list_for_each_entry(e, &cache->resolve, list) {
1172 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1173 return e;
1174 if (!bacmp(&e->data.bdaddr, bdaddr))
1175 return e;
1176 }
1177
1178 return NULL;
1179}
1180
a3d4e20a 1181void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1182 struct inquiry_entry *ie)
a3d4e20a
JH
1183{
1184 struct discovery_state *cache = &hdev->discovery;
1185 struct list_head *pos = &cache->resolve;
1186 struct inquiry_entry *p;
1187
1188 list_del(&ie->list);
1189
1190 list_for_each_entry(p, &cache->resolve, list) {
1191 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1192 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1193 break;
1194 pos = &p->list;
1195 }
1196
1197 list_add(&ie->list, pos);
1198}
1199
af58925c
MH
1200u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1201 bool name_known)
1da177e4 1202{
30883512 1203 struct discovery_state *cache = &hdev->discovery;
70f23020 1204 struct inquiry_entry *ie;
af58925c 1205 u32 flags = 0;
1da177e4 1206
6ed93dc6 1207 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1208
6928a924 1209 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1210
af58925c
MH
1211 if (!data->ssp_mode)
1212 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1213
70f23020 1214 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1215 if (ie) {
af58925c
MH
1216 if (!ie->data.ssp_mode)
1217 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1218
a3d4e20a 1219 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1220 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1221 ie->data.rssi = data->rssi;
1222 hci_inquiry_cache_update_resolve(hdev, ie);
1223 }
1224
561aafbc 1225 goto update;
a3d4e20a 1226 }
561aafbc
JH
1227
1228 /* Entry not in the cache. Add new one. */
27f70f3e 1229 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1230 if (!ie) {
1231 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1232 goto done;
1233 }
561aafbc
JH
1234
1235 list_add(&ie->all, &cache->all);
1236
1237 if (name_known) {
1238 ie->name_state = NAME_KNOWN;
1239 } else {
1240 ie->name_state = NAME_NOT_KNOWN;
1241 list_add(&ie->list, &cache->unknown);
1242 }
70f23020 1243
561aafbc
JH
1244update:
1245 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1246 ie->name_state != NAME_PENDING) {
561aafbc
JH
1247 ie->name_state = NAME_KNOWN;
1248 list_del(&ie->list);
1da177e4
LT
1249 }
1250
70f23020
AE
1251 memcpy(&ie->data, data, sizeof(*data));
1252 ie->timestamp = jiffies;
1da177e4 1253 cache->timestamp = jiffies;
3175405b
JH
1254
1255 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1256 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1257
af58925c
MH
1258done:
1259 return flags;
1da177e4
LT
1260}
1261
1262static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1263{
30883512 1264 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1265 struct inquiry_info *info = (struct inquiry_info *) buf;
1266 struct inquiry_entry *e;
1267 int copied = 0;
1268
561aafbc 1269 list_for_each_entry(e, &cache->all, all) {
1da177e4 1270 struct inquiry_data *data = &e->data;
b57c1a56
JH
1271
1272 if (copied >= num)
1273 break;
1274
1da177e4
LT
1275 bacpy(&info->bdaddr, &data->bdaddr);
1276 info->pscan_rep_mode = data->pscan_rep_mode;
1277 info->pscan_period_mode = data->pscan_period_mode;
1278 info->pscan_mode = data->pscan_mode;
1279 memcpy(info->dev_class, data->dev_class, 3);
1280 info->clock_offset = data->clock_offset;
b57c1a56 1281
1da177e4 1282 info++;
b57c1a56 1283 copied++;
1da177e4
LT
1284 }
1285
1286 BT_DBG("cache %p, copied %d", cache, copied);
1287 return copied;
1288}
1289
a1d01db1 1290static int hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1291{
1292 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1293 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1294 struct hci_cp_inquiry cp;
1295
1296 BT_DBG("%s", hdev->name);
1297
1298 if (test_bit(HCI_INQUIRY, &hdev->flags))
a1d01db1 1299 return 0;
1da177e4
LT
1300
1301 /* Start Inquiry */
1302 memcpy(&cp.lap, &ir->lap, 3);
1303 cp.length = ir->length;
1304 cp.num_rsp = ir->num_rsp;
42c6b129 1305 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
a1d01db1
JH
1306
1307 return 0;
1da177e4
LT
1308}
1309
1310int hci_inquiry(void __user *arg)
1311{
1312 __u8 __user *ptr = arg;
1313 struct hci_inquiry_req ir;
1314 struct hci_dev *hdev;
1315 int err = 0, do_inquiry = 0, max_rsp;
1316 long timeo;
1317 __u8 *buf;
1318
1319 if (copy_from_user(&ir, ptr, sizeof(ir)))
1320 return -EFAULT;
1321
5a08ecce
AE
1322 hdev = hci_dev_get(ir.dev_id);
1323 if (!hdev)
1da177e4
LT
1324 return -ENODEV;
1325
d7a5a11d 1326 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1327 err = -EBUSY;
1328 goto done;
1329 }
1330
d7a5a11d 1331 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1332 err = -EOPNOTSUPP;
1333 goto done;
1334 }
1335
ca8bee5d 1336 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
1337 err = -EOPNOTSUPP;
1338 goto done;
1339 }
1340
d7a5a11d 1341 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1342 err = -EOPNOTSUPP;
1343 goto done;
1344 }
1345
09fd0de5 1346 hci_dev_lock(hdev);
8e87d142 1347 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1348 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1349 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1350 do_inquiry = 1;
1351 }
09fd0de5 1352 hci_dev_unlock(hdev);
1da177e4 1353
04837f64 1354 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1355
1356 if (do_inquiry) {
01178cd4 1357 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
4ebeee2d 1358 timeo, NULL);
70f23020
AE
1359 if (err < 0)
1360 goto done;
3e13fa1e
AG
1361
1362 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1363 * cleared). If it is interrupted by a signal, return -EINTR.
1364 */
74316201 1365 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
28a758c8
PB
1366 TASK_INTERRUPTIBLE)) {
1367 err = -EINTR;
1368 goto done;
1369 }
70f23020 1370 }
1da177e4 1371
8fc9ced3
GP
1372 /* for unlimited number of responses we will use buffer with
1373 * 255 entries
1374 */
1da177e4
LT
1375 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1376
1377 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1378 * copy it to the user space.
1379 */
6da2ec56 1380 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
70f23020 1381 if (!buf) {
1da177e4
LT
1382 err = -ENOMEM;
1383 goto done;
1384 }
1385
09fd0de5 1386 hci_dev_lock(hdev);
1da177e4 1387 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1388 hci_dev_unlock(hdev);
1da177e4
LT
1389
1390 BT_DBG("num_rsp %d", ir.num_rsp);
1391
1392 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1393 ptr += sizeof(ir);
1394 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1395 ir.num_rsp))
1da177e4 1396 err = -EFAULT;
8e87d142 1397 } else
1da177e4
LT
1398 err = -EFAULT;
1399
1400 kfree(buf);
1401
1402done:
1403 hci_dev_put(hdev);
1404 return err;
1405}
1406
7a0e5b15
MK
1407/**
1408 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1409 * (BD_ADDR) for a HCI device from
1410 * a firmware node property.
1411 * @hdev: The HCI device
1412 *
1413 * Search the firmware node for 'local-bd-address'.
1414 *
1415 * All-zero BD addresses are rejected, because those could be properties
1416 * that exist in the firmware tables, but were not updated by the firmware. For
1417 * example, the DTS could define 'local-bd-address', with zero BD addresses.
1418 */
1419static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1420{
1421 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1422 bdaddr_t ba;
1423 int ret;
1424
1425 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1426 (u8 *)&ba, sizeof(ba));
1427 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1428 return;
1429
1430 bacpy(&hdev->public_addr, &ba);
1431}
1432
cbed0ca1 1433static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1434{
1da177e4
LT
1435 int ret = 0;
1436
1da177e4
LT
1437 BT_DBG("%s %p", hdev->name, hdev);
1438
b504430c 1439 hci_req_sync_lock(hdev);
1da177e4 1440
d7a5a11d 1441 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1442 ret = -ENODEV;
1443 goto done;
1444 }
1445
d7a5a11d
MH
1446 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1447 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1448 /* Check for rfkill but allow the HCI setup stage to
1449 * proceed (which in itself doesn't cause any RF activity).
1450 */
d7a5a11d 1451 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1452 ret = -ERFKILL;
1453 goto done;
1454 }
1455
1456 /* Check for valid public address or a configured static
1457 * random adddress, but let the HCI setup proceed to
1458 * be able to determine if there is a public address
1459 * or not.
1460 *
c6beca0e
MH
1461 * In case of user channel usage, it is not important
1462 * if a public address or static random address is
1463 * available.
1464 *
a5c8f270
MH
1465 * This check is only valid for BR/EDR controllers
1466 * since AMP controllers do not have an address.
1467 */
d7a5a11d 1468 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
ca8bee5d 1469 hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
1470 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1471 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1472 ret = -EADDRNOTAVAIL;
1473 goto done;
1474 }
611b30f7
MH
1475 }
1476
1da177e4
LT
1477 if (test_bit(HCI_UP, &hdev->flags)) {
1478 ret = -EALREADY;
1479 goto done;
1480 }
1481
1da177e4
LT
1482 if (hdev->open(hdev)) {
1483 ret = -EIO;
1484 goto done;
1485 }
1486
e9ca8bf1 1487 set_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1488 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
4a3f95b7 1489
f41c70c4
MH
1490 atomic_set(&hdev->cmd_cnt, 1);
1491 set_bit(HCI_INIT, &hdev->flags);
1492
740011cf
SW
1493 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1494 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
7fdf6c6a
MH
1495 bool invalid_bdaddr;
1496
e131d74a
MH
1497 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1498
af202f84
MH
1499 if (hdev->setup)
1500 ret = hdev->setup(hdev);
f41c70c4 1501
7fdf6c6a
MH
1502 /* The transport driver can set the quirk to mark the
1503 * BD_ADDR invalid before creating the HCI device or in
1504 * its setup callback.
1505 */
1506 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1507 &hdev->quirks);
1508
7a0e5b15
MK
1509 if (ret)
1510 goto setup_failed;
1511
1512 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1513 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1514 hci_dev_get_bd_addr_from_property(hdev);
1515
1516 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
7fdf6c6a 1517 hdev->set_bdaddr) {
7a0e5b15
MK
1518 ret = hdev->set_bdaddr(hdev,
1519 &hdev->public_addr);
7fdf6c6a
MH
1520
1521 /* If setting of the BD_ADDR from the device
1522 * property succeeds, then treat the address
1523 * as valid even if the invalid BD_ADDR
1524 * quirk indicates otherwise.
1525 */
1526 if (!ret)
1527 invalid_bdaddr = false;
1528 }
7a0e5b15
MK
1529 }
1530
1531setup_failed:
af202f84
MH
1532 /* The transport driver can set these quirks before
1533 * creating the HCI device or in its setup callback.
1534 *
7fdf6c6a
MH
1535 * For the invalid BD_ADDR quirk it is possible that
1536 * it becomes a valid address if the bootloader does
1537 * provide it (see above).
1538 *
af202f84
MH
1539 * In case any of them is set, the controller has to
1540 * start up as unconfigured.
1541 */
eb1904f4 1542 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
7fdf6c6a 1543 invalid_bdaddr)
a1536da2 1544 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1545
0ebca7d6
MH
1546 /* For an unconfigured controller it is required to
1547 * read at least the version information provided by
1548 * the Read Local Version Information command.
1549 *
1550 * If the set_bdaddr driver callback is provided, then
1551 * also the original Bluetooth public device address
1552 * will be read using the Read BD Address command.
1553 */
d7a5a11d 1554 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1555 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1556 }
1557
d7a5a11d 1558 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1559 /* If public address change is configured, ensure that
1560 * the address gets programmed. If the driver does not
1561 * support changing the public address, fail the power
1562 * on procedure.
1563 */
1564 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1565 hdev->set_bdaddr)
24c457e2
MH
1566 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1567 else
1568 ret = -EADDRNOTAVAIL;
1569 }
1570
f41c70c4 1571 if (!ret) {
d7a5a11d 1572 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
98a63aaf 1573 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
f41c70c4 1574 ret = __hci_init(hdev);
98a63aaf
MH
1575 if (!ret && hdev->post_init)
1576 ret = hdev->post_init(hdev);
1577 }
1da177e4
LT
1578 }
1579
7e995b9e
MH
1580 /* If the HCI Reset command is clearing all diagnostic settings,
1581 * then they need to be reprogrammed after the init procedure
1582 * completed.
1583 */
1584 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
b56c7b25 1585 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
7e995b9e
MH
1586 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1587 ret = hdev->set_diag(hdev, true);
1588
145373cb 1589 msft_do_open(hdev);
f67743f9 1590 aosp_do_open(hdev);
145373cb 1591
f41c70c4
MH
1592 clear_bit(HCI_INIT, &hdev->flags);
1593
1da177e4
LT
1594 if (!ret) {
1595 hci_dev_hold(hdev);
a1536da2 1596 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
a73c046a 1597 hci_adv_instances_set_rpa_expired(hdev, true);
1da177e4 1598 set_bit(HCI_UP, &hdev->flags);
05fcd4c4 1599 hci_sock_dev_event(hdev, HCI_DEV_UP);
6d5d2ee6 1600 hci_leds_update_powered(hdev, true);
d7a5a11d
MH
1601 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1602 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1603 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1604 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
2ff13894 1605 hci_dev_test_flag(hdev, HCI_MGMT) &&
ca8bee5d 1606 hdev->dev_type == HCI_PRIMARY) {
2ff13894
JH
1607 ret = __hci_req_hci_power_on(hdev);
1608 mgmt_power_on(hdev, ret);
56e5cb86 1609 }
8e87d142 1610 } else {
1da177e4 1611 /* Init failed, cleanup */
3eff45ea 1612 flush_work(&hdev->tx_work);
c347b765 1613 flush_work(&hdev->cmd_work);
b78752cc 1614 flush_work(&hdev->rx_work);
1da177e4
LT
1615
1616 skb_queue_purge(&hdev->cmd_q);
1617 skb_queue_purge(&hdev->rx_q);
1618
1619 if (hdev->flush)
1620 hdev->flush(hdev);
1621
1622 if (hdev->sent_cmd) {
1623 kfree_skb(hdev->sent_cmd);
1624 hdev->sent_cmd = NULL;
1625 }
1626
e9ca8bf1 1627 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1628 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1629
1da177e4 1630 hdev->close(hdev);
fee746b0 1631 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1632 }
1633
1634done:
b504430c 1635 hci_req_sync_unlock(hdev);
1da177e4
LT
1636 return ret;
1637}
1638
cbed0ca1
JH
1639/* ---- HCI ioctl helpers ---- */
1640
1641int hci_dev_open(__u16 dev)
1642{
1643 struct hci_dev *hdev;
1644 int err;
1645
1646 hdev = hci_dev_get(dev);
1647 if (!hdev)
1648 return -ENODEV;
1649
4a964404 1650 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1651 * up as user channel. Trying to bring them up as normal devices
1652 * will result into a failure. Only user channel operation is
1653 * possible.
1654 *
1655 * When this function is called for a user channel, the flag
1656 * HCI_USER_CHANNEL will be set first before attempting to
1657 * open the device.
1658 */
d7a5a11d
MH
1659 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1660 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1661 err = -EOPNOTSUPP;
1662 goto done;
1663 }
1664
e1d08f40
JH
1665 /* We need to ensure that no other power on/off work is pending
1666 * before proceeding to call hci_dev_do_open. This is
1667 * particularly important if the setup procedure has not yet
1668 * completed.
1669 */
a69d8927 1670 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1671 cancel_delayed_work(&hdev->power_off);
1672
a5c8f270
MH
1673 /* After this call it is guaranteed that the setup procedure
1674 * has finished. This means that error conditions like RFKILL
1675 * or no valid public or static random address apply.
1676 */
e1d08f40
JH
1677 flush_workqueue(hdev->req_workqueue);
1678
12aa4f0a 1679 /* For controllers not using the management interface and that
b6ae8457 1680 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1681 * so that pairing works for them. Once the management interface
1682 * is in use this bit will be cleared again and userspace has
1683 * to explicitly enable it.
1684 */
d7a5a11d
MH
1685 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1686 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1687 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1688
cbed0ca1
JH
1689 err = hci_dev_do_open(hdev);
1690
fee746b0 1691done:
cbed0ca1 1692 hci_dev_put(hdev);
cbed0ca1
JH
1693 return err;
1694}
1695
d7347f3c
JH
1696/* This function requires the caller holds hdev->lock */
1697static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1698{
1699 struct hci_conn_params *p;
1700
f161dd41
JH
1701 list_for_each_entry(p, &hdev->le_conn_params, list) {
1702 if (p->conn) {
1703 hci_conn_drop(p->conn);
f8aaf9b6 1704 hci_conn_put(p->conn);
f161dd41
JH
1705 p->conn = NULL;
1706 }
d7347f3c 1707 list_del_init(&p->action);
f161dd41 1708 }
d7347f3c
JH
1709
1710 BT_DBG("All LE pending actions cleared");
1711}
1712
6b3cc1db 1713int hci_dev_do_close(struct hci_dev *hdev)
1da177e4 1714{
acc649c6
MH
1715 bool auto_off;
1716
1da177e4
LT
1717 BT_DBG("%s %p", hdev->name, hdev);
1718
d24d8144 1719 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1720 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1721 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1722 /* Execute vendor specific shutdown routine */
1723 if (hdev->shutdown)
1724 hdev->shutdown(hdev);
1725 }
1726
78c04c0b
VCG
1727 cancel_delayed_work(&hdev->power_off);
1728
7df0f73e 1729 hci_request_cancel_all(hdev);
b504430c 1730 hci_req_sync_lock(hdev);
1da177e4
LT
1731
1732 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1733 cancel_delayed_work_sync(&hdev->cmd_timer);
b504430c 1734 hci_req_sync_unlock(hdev);
1da177e4
LT
1735 return 0;
1736 }
1737
6d5d2ee6
HK
1738 hci_leds_update_powered(hdev, false);
1739
3eff45ea
GP
1740 /* Flush RX and TX works */
1741 flush_work(&hdev->tx_work);
b78752cc 1742 flush_work(&hdev->rx_work);
1da177e4 1743
16ab91ab 1744 if (hdev->discov_timeout > 0) {
16ab91ab 1745 hdev->discov_timeout = 0;
a358dc11
MH
1746 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1747 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1748 }
1749
a69d8927 1750 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1751 cancel_delayed_work(&hdev->service_cache);
1752
a73c046a
JK
1753 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1754 struct adv_info *adv_instance;
1755
4518bb0f 1756 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1757
a73c046a
JK
1758 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1759 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1760 }
1761
76727c02
JH
1762 /* Avoid potential lockdep warnings from the *_flush() calls by
1763 * ensuring the workqueue is empty up front.
1764 */
1765 drain_workqueue(hdev->workqueue);
1766
09fd0de5 1767 hci_dev_lock(hdev);
1aeb9c65 1768
8f502f84
JH
1769 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1770
acc649c6
MH
1771 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1772
ca8bee5d 1773 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
baab7932 1774 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
2ff13894
JH
1775 hci_dev_test_flag(hdev, HCI_MGMT))
1776 __mgmt_power_off(hdev);
1aeb9c65 1777
1f9b9a5d 1778 hci_inquiry_cache_flush(hdev);
d7347f3c 1779 hci_pend_le_actions_clear(hdev);
f161dd41 1780 hci_conn_hash_flush(hdev);
09fd0de5 1781 hci_dev_unlock(hdev);
1da177e4 1782
64dae967
MH
1783 smp_unregister(hdev);
1784
05fcd4c4 1785 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1da177e4 1786
f67743f9 1787 aosp_do_close(hdev);
145373cb
MC
1788 msft_do_close(hdev);
1789
1da177e4
LT
1790 if (hdev->flush)
1791 hdev->flush(hdev);
1792
1793 /* Reset device */
1794 skb_queue_purge(&hdev->cmd_q);
1795 atomic_set(&hdev->cmd_cnt, 1);
acc649c6
MH
1796 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1797 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4 1798 set_bit(HCI_INIT, &hdev->flags);
4ebeee2d 1799 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1da177e4
LT
1800 clear_bit(HCI_INIT, &hdev->flags);
1801 }
1802
c347b765
GP
1803 /* flush cmd work */
1804 flush_work(&hdev->cmd_work);
1da177e4
LT
1805
1806 /* Drop queues */
1807 skb_queue_purge(&hdev->rx_q);
1808 skb_queue_purge(&hdev->cmd_q);
1809 skb_queue_purge(&hdev->raw_q);
1810
1811 /* Drop last sent command */
1812 if (hdev->sent_cmd) {
65cc2b49 1813 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1814 kfree_skb(hdev->sent_cmd);
1815 hdev->sent_cmd = NULL;
1816 }
1817
e9ca8bf1 1818 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1819 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1820
9952d90e
APS
1821 if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1822 wake_up(&hdev->suspend_wait_q);
1823
1da177e4
LT
1824 /* After this point our queues are empty
1825 * and no tasks are scheduled. */
1826 hdev->close(hdev);
1827
35b973c9 1828 /* Clear flags */
fee746b0 1829 hdev->flags &= BIT(HCI_RAW);
eacb44df 1830 hci_dev_clear_volatile_flags(hdev);
35b973c9 1831
ced5c338 1832 /* Controller radio is available but is currently powered down */
536619e8 1833 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1834
e59fda8d 1835 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1836 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1837 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1838
b504430c 1839 hci_req_sync_unlock(hdev);
1da177e4
LT
1840
1841 hci_dev_put(hdev);
1842 return 0;
1843}
1844
1845int hci_dev_close(__u16 dev)
1846{
1847 struct hci_dev *hdev;
1848 int err;
1849
70f23020
AE
1850 hdev = hci_dev_get(dev);
1851 if (!hdev)
1da177e4 1852 return -ENODEV;
8ee56540 1853
d7a5a11d 1854 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1855 err = -EBUSY;
1856 goto done;
1857 }
1858
a69d8927 1859 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1860 cancel_delayed_work(&hdev->power_off);
1861
1da177e4 1862 err = hci_dev_do_close(hdev);
8ee56540 1863
0736cfa8 1864done:
1da177e4
LT
1865 hci_dev_put(hdev);
1866 return err;
1867}
1868
5c912495 1869static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1870{
5c912495 1871 int ret;
1da177e4 1872
5c912495 1873 BT_DBG("%s %p", hdev->name, hdev);
1da177e4 1874
b504430c 1875 hci_req_sync_lock(hdev);
1da177e4 1876
1da177e4
LT
1877 /* Drop queues */
1878 skb_queue_purge(&hdev->rx_q);
1879 skb_queue_purge(&hdev->cmd_q);
1880
76727c02
JH
1881 /* Avoid potential lockdep warnings from the *_flush() calls by
1882 * ensuring the workqueue is empty up front.
1883 */
1884 drain_workqueue(hdev->workqueue);
1885
09fd0de5 1886 hci_dev_lock(hdev);
1f9b9a5d 1887 hci_inquiry_cache_flush(hdev);
1da177e4 1888 hci_conn_hash_flush(hdev);
09fd0de5 1889 hci_dev_unlock(hdev);
1da177e4
LT
1890
1891 if (hdev->flush)
1892 hdev->flush(hdev);
1893
8e87d142 1894 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1895 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1896
4ebeee2d 1897 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1da177e4 1898
b504430c 1899 hci_req_sync_unlock(hdev);
1da177e4
LT
1900 return ret;
1901}
1902
5c912495
MH
1903int hci_dev_reset(__u16 dev)
1904{
1905 struct hci_dev *hdev;
1906 int err;
1907
1908 hdev = hci_dev_get(dev);
1909 if (!hdev)
1910 return -ENODEV;
1911
1912 if (!test_bit(HCI_UP, &hdev->flags)) {
1913 err = -ENETDOWN;
1914 goto done;
1915 }
1916
d7a5a11d 1917 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1918 err = -EBUSY;
1919 goto done;
1920 }
1921
d7a5a11d 1922 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1923 err = -EOPNOTSUPP;
1924 goto done;
1925 }
1926
1927 err = hci_dev_do_reset(hdev);
1928
1929done:
1930 hci_dev_put(hdev);
1931 return err;
1932}
1933
1da177e4
LT
1934int hci_dev_reset_stat(__u16 dev)
1935{
1936 struct hci_dev *hdev;
1937 int ret = 0;
1938
70f23020
AE
1939 hdev = hci_dev_get(dev);
1940 if (!hdev)
1da177e4
LT
1941 return -ENODEV;
1942
d7a5a11d 1943 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1944 ret = -EBUSY;
1945 goto done;
1946 }
1947
d7a5a11d 1948 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1949 ret = -EOPNOTSUPP;
1950 goto done;
1951 }
1952
1da177e4
LT
1953 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1954
0736cfa8 1955done:
1da177e4 1956 hci_dev_put(hdev);
1da177e4
LT
1957 return ret;
1958}
1959
123abc08
JH
1960static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1961{
bc6d2d04 1962 bool conn_changed, discov_changed;
123abc08
JH
1963
1964 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1965
1966 if ((scan & SCAN_PAGE))
238be788
MH
1967 conn_changed = !hci_dev_test_and_set_flag(hdev,
1968 HCI_CONNECTABLE);
123abc08 1969 else
a69d8927
MH
1970 conn_changed = hci_dev_test_and_clear_flag(hdev,
1971 HCI_CONNECTABLE);
123abc08 1972
bc6d2d04 1973 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1974 discov_changed = !hci_dev_test_and_set_flag(hdev,
1975 HCI_DISCOVERABLE);
bc6d2d04 1976 } else {
a358dc11 1977 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1978 discov_changed = hci_dev_test_and_clear_flag(hdev,
1979 HCI_DISCOVERABLE);
bc6d2d04
JH
1980 }
1981
d7a5a11d 1982 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1983 return;
1984
bc6d2d04
JH
1985 if (conn_changed || discov_changed) {
1986 /* In case this was disabled through mgmt */
a1536da2 1987 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1988
d7a5a11d 1989 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
cab054ab 1990 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
bc6d2d04 1991
123abc08 1992 mgmt_new_settings(hdev);
bc6d2d04 1993 }
123abc08
JH
1994}
1995
1da177e4
LT
1996int hci_dev_cmd(unsigned int cmd, void __user *arg)
1997{
1998 struct hci_dev *hdev;
1999 struct hci_dev_req dr;
2000 int err = 0;
2001
2002 if (copy_from_user(&dr, arg, sizeof(dr)))
2003 return -EFAULT;
2004
70f23020
AE
2005 hdev = hci_dev_get(dr.dev_id);
2006 if (!hdev)
1da177e4
LT
2007 return -ENODEV;
2008
d7a5a11d 2009 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
2010 err = -EBUSY;
2011 goto done;
2012 }
2013
d7a5a11d 2014 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
2015 err = -EOPNOTSUPP;
2016 goto done;
2017 }
2018
ca8bee5d 2019 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
2020 err = -EOPNOTSUPP;
2021 goto done;
2022 }
2023
d7a5a11d 2024 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
2025 err = -EOPNOTSUPP;
2026 goto done;
2027 }
2028
1da177e4
LT
2029 switch (cmd) {
2030 case HCISETAUTH:
01178cd4 2031 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 2032 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2033 break;
2034
2035 case HCISETENCRYPT:
2036 if (!lmp_encrypt_capable(hdev)) {
2037 err = -EOPNOTSUPP;
2038 break;
2039 }
2040
2041 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2042 /* Auth must be enabled first */
01178cd4 2043 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 2044 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2045 if (err)
2046 break;
2047 }
2048
01178cd4 2049 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
4ebeee2d 2050 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2051 break;
2052
2053 case HCISETSCAN:
01178cd4 2054 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
4ebeee2d 2055 HCI_INIT_TIMEOUT, NULL);
91a668b0 2056
bc6d2d04
JH
2057 /* Ensure that the connectable and discoverable states
2058 * get correctly modified as this was a non-mgmt change.
91a668b0 2059 */
123abc08
JH
2060 if (!err)
2061 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
2062 break;
2063
1da177e4 2064 case HCISETLINKPOL:
01178cd4 2065 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
4ebeee2d 2066 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2067 break;
2068
2069 case HCISETLINKMODE:
e4e8e37c
MH
2070 hdev->link_mode = ((__u16) dr.dev_opt) &
2071 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2072 break;
2073
2074 case HCISETPTYPE:
b7c23df8
JK
2075 if (hdev->pkt_type == (__u16) dr.dev_opt)
2076 break;
2077
e4e8e37c 2078 hdev->pkt_type = (__u16) dr.dev_opt;
b7c23df8 2079 mgmt_phy_configuration_changed(hdev, NULL);
1da177e4
LT
2080 break;
2081
2082 case HCISETACLMTU:
e4e8e37c
MH
2083 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2084 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2085 break;
2086
2087 case HCISETSCOMTU:
e4e8e37c
MH
2088 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2089 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2090 break;
2091
2092 default:
2093 err = -EINVAL;
2094 break;
2095 }
e4e8e37c 2096
0736cfa8 2097done:
1da177e4
LT
2098 hci_dev_put(hdev);
2099 return err;
2100}
2101
2102int hci_get_dev_list(void __user *arg)
2103{
8035ded4 2104 struct hci_dev *hdev;
1da177e4
LT
2105 struct hci_dev_list_req *dl;
2106 struct hci_dev_req *dr;
1da177e4
LT
2107 int n = 0, size, err;
2108 __u16 dev_num;
2109
2110 if (get_user(dev_num, (__u16 __user *) arg))
2111 return -EFAULT;
2112
2113 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2114 return -EINVAL;
2115
2116 size = sizeof(*dl) + dev_num * sizeof(*dr);
2117
70f23020
AE
2118 dl = kzalloc(size, GFP_KERNEL);
2119 if (!dl)
1da177e4
LT
2120 return -ENOMEM;
2121
2122 dr = dl->dev_req;
2123
f20d09d5 2124 read_lock(&hci_dev_list_lock);
8035ded4 2125 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2126 unsigned long flags = hdev->flags;
c542a06c 2127
2e84d8db
MH
2128 /* When the auto-off is configured it means the transport
2129 * is running, but in that case still indicate that the
2130 * device is actually down.
2131 */
d7a5a11d 2132 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 2133 flags &= ~BIT(HCI_UP);
c542a06c 2134
1da177e4 2135 (dr + n)->dev_id = hdev->id;
2e84d8db 2136 (dr + n)->dev_opt = flags;
c542a06c 2137
1da177e4
LT
2138 if (++n >= dev_num)
2139 break;
2140 }
f20d09d5 2141 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2142
2143 dl->dev_num = n;
2144 size = sizeof(*dl) + n * sizeof(*dr);
2145
2146 err = copy_to_user(arg, dl, size);
2147 kfree(dl);
2148
2149 return err ? -EFAULT : 0;
2150}
2151
2152int hci_get_dev_info(void __user *arg)
2153{
2154 struct hci_dev *hdev;
2155 struct hci_dev_info di;
2e84d8db 2156 unsigned long flags;
1da177e4
LT
2157 int err = 0;
2158
2159 if (copy_from_user(&di, arg, sizeof(di)))
2160 return -EFAULT;
2161
70f23020
AE
2162 hdev = hci_dev_get(di.dev_id);
2163 if (!hdev)
1da177e4
LT
2164 return -ENODEV;
2165
2e84d8db
MH
2166 /* When the auto-off is configured it means the transport
2167 * is running, but in that case still indicate that the
2168 * device is actually down.
2169 */
d7a5a11d 2170 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
2171 flags = hdev->flags & ~BIT(HCI_UP);
2172 else
2173 flags = hdev->flags;
c542a06c 2174
1da177e4
LT
2175 strcpy(di.name, hdev->name);
2176 di.bdaddr = hdev->bdaddr;
60f2a3ed 2177 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2178 di.flags = flags;
1da177e4 2179 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2180 if (lmp_bredr_capable(hdev)) {
2181 di.acl_mtu = hdev->acl_mtu;
2182 di.acl_pkts = hdev->acl_pkts;
2183 di.sco_mtu = hdev->sco_mtu;
2184 di.sco_pkts = hdev->sco_pkts;
2185 } else {
2186 di.acl_mtu = hdev->le_mtu;
2187 di.acl_pkts = hdev->le_pkts;
2188 di.sco_mtu = 0;
2189 di.sco_pkts = 0;
2190 }
1da177e4
LT
2191 di.link_policy = hdev->link_policy;
2192 di.link_mode = hdev->link_mode;
2193
2194 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2195 memcpy(&di.features, &hdev->features, sizeof(di.features));
2196
2197 if (copy_to_user(arg, &di, sizeof(di)))
2198 err = -EFAULT;
2199
2200 hci_dev_put(hdev);
2201
2202 return err;
2203}
2204
2205/* ---- Interface to HCI drivers ---- */
2206
611b30f7
MH
2207static int hci_rfkill_set_block(void *data, bool blocked)
2208{
2209 struct hci_dev *hdev = data;
2210
2211 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2212
d7a5a11d 2213 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2214 return -EBUSY;
2215
5e130367 2216 if (blocked) {
a1536da2 2217 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2218 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2219 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2220 hci_dev_do_close(hdev);
5e130367 2221 } else {
a358dc11 2222 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2223 }
611b30f7
MH
2224
2225 return 0;
2226}
2227
2228static const struct rfkill_ops hci_rfkill_ops = {
2229 .set_block = hci_rfkill_set_block,
2230};
2231
ab81cbf9
JH
2232static void hci_power_on(struct work_struct *work)
2233{
2234 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2235 int err;
ab81cbf9
JH
2236
2237 BT_DBG("%s", hdev->name);
2238
2ff13894
JH
2239 if (test_bit(HCI_UP, &hdev->flags) &&
2240 hci_dev_test_flag(hdev, HCI_MGMT) &&
2241 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
d82142a8 2242 cancel_delayed_work(&hdev->power_off);
2ff13894
JH
2243 hci_req_sync_lock(hdev);
2244 err = __hci_req_hci_power_on(hdev);
2245 hci_req_sync_unlock(hdev);
2246 mgmt_power_on(hdev, err);
2247 return;
2248 }
2249
cbed0ca1 2250 err = hci_dev_do_open(hdev);
96570ffc 2251 if (err < 0) {
3ad67582 2252 hci_dev_lock(hdev);
96570ffc 2253 mgmt_set_powered_failed(hdev, err);
3ad67582 2254 hci_dev_unlock(hdev);
ab81cbf9 2255 return;
96570ffc 2256 }
ab81cbf9 2257
a5c8f270
MH
2258 /* During the HCI setup phase, a few error conditions are
2259 * ignored and they need to be checked now. If they are still
2260 * valid, it is important to turn the device back off.
2261 */
d7a5a11d
MH
2262 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2263 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
ca8bee5d 2264 (hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
2265 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2266 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2267 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2268 hci_dev_do_close(hdev);
d7a5a11d 2269 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2270 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2271 HCI_AUTO_OFF_TIMEOUT);
bf543036 2272 }
ab81cbf9 2273
a69d8927 2274 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2275 /* For unconfigured devices, set the HCI_RAW flag
2276 * so that userspace can easily identify them.
4a964404 2277 */
d7a5a11d 2278 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2279 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2280
2281 /* For fully configured devices, this will send
2282 * the Index Added event. For unconfigured devices,
2283 * it will send Unconfigued Index Added event.
2284 *
2285 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2286 * and no event will be send.
2287 */
2288 mgmt_index_added(hdev);
a69d8927 2289 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2290 /* When the controller is now configured, then it
2291 * is important to clear the HCI_RAW flag.
2292 */
d7a5a11d 2293 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2294 clear_bit(HCI_RAW, &hdev->flags);
2295
d603b76b
MH
2296 /* Powering on the controller with HCI_CONFIG set only
2297 * happens with the transition from unconfigured to
2298 * configured. This will send the Index Added event.
2299 */
744cf19e 2300 mgmt_index_added(hdev);
fee746b0 2301 }
ab81cbf9
JH
2302}
2303
2304static void hci_power_off(struct work_struct *work)
2305{
3243553f 2306 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2307 power_off.work);
ab81cbf9
JH
2308
2309 BT_DBG("%s", hdev->name);
2310
8ee56540 2311 hci_dev_do_close(hdev);
ab81cbf9
JH
2312}
2313
c7741d16
MH
2314static void hci_error_reset(struct work_struct *work)
2315{
2316 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2317
2318 BT_DBG("%s", hdev->name);
2319
2320 if (hdev->hw_error)
2321 hdev->hw_error(hdev, hdev->hw_error_code);
2322 else
2064ee33 2323 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
c7741d16
MH
2324
2325 if (hci_dev_do_close(hdev))
2326 return;
2327
c7741d16
MH
2328 hci_dev_do_open(hdev);
2329}
2330
35f7498a 2331void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2332{
4821002c 2333 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2334
4821002c
JH
2335 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2336 list_del(&uuid->list);
2aeb9a1a
JH
2337 kfree(uuid);
2338 }
2aeb9a1a
JH
2339}
2340
35f7498a 2341void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2342{
0378b597 2343 struct link_key *key;
55ed8ca1 2344
d7d41682 2345 list_for_each_entry(key, &hdev->link_keys, list) {
0378b597
JH
2346 list_del_rcu(&key->list);
2347 kfree_rcu(key, rcu);
55ed8ca1 2348 }
55ed8ca1
JH
2349}
2350
35f7498a 2351void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2352{
970d0f1b 2353 struct smp_ltk *k;
b899efaf 2354
d7d41682 2355 list_for_each_entry(k, &hdev->long_term_keys, list) {
970d0f1b
JH
2356 list_del_rcu(&k->list);
2357 kfree_rcu(k, rcu);
b899efaf 2358 }
b899efaf
VCG
2359}
2360
970c4e46
JH
2361void hci_smp_irks_clear(struct hci_dev *hdev)
2362{
adae20cb 2363 struct smp_irk *k;
970c4e46 2364
d7d41682 2365 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
adae20cb
JH
2366 list_del_rcu(&k->list);
2367 kfree_rcu(k, rcu);
970c4e46
JH
2368 }
2369}
2370
600a8749
AM
2371void hci_blocked_keys_clear(struct hci_dev *hdev)
2372{
2373 struct blocked_key *b;
2374
d7d41682 2375 list_for_each_entry(b, &hdev->blocked_keys, list) {
600a8749
AM
2376 list_del_rcu(&b->list);
2377 kfree_rcu(b, rcu);
2378 }
2379}
2380
2381bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2382{
2383 bool blocked = false;
2384 struct blocked_key *b;
2385
2386 rcu_read_lock();
0c2ac7d4 2387 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
600a8749
AM
2388 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2389 blocked = true;
2390 break;
2391 }
2392 }
2393
2394 rcu_read_unlock();
2395 return blocked;
2396}
2397
55ed8ca1
JH
2398struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2399{
8035ded4 2400 struct link_key *k;
55ed8ca1 2401
0378b597
JH
2402 rcu_read_lock();
2403 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2404 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2405 rcu_read_unlock();
600a8749
AM
2406
2407 if (hci_is_blocked_key(hdev,
2408 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2409 k->val)) {
2410 bt_dev_warn_ratelimited(hdev,
2411 "Link key blocked for %pMR",
2412 &k->bdaddr);
2413 return NULL;
2414 }
2415
55ed8ca1 2416 return k;
0378b597
JH
2417 }
2418 }
2419 rcu_read_unlock();
55ed8ca1
JH
2420
2421 return NULL;
2422}
2423
745c0ce3 2424static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2425 u8 key_type, u8 old_key_type)
d25e28ab
JH
2426{
2427 /* Legacy key */
2428 if (key_type < 0x03)
745c0ce3 2429 return true;
d25e28ab
JH
2430
2431 /* Debug keys are insecure so don't store them persistently */
2432 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2433 return false;
d25e28ab
JH
2434
2435 /* Changed combination key and there's no previous one */
2436 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2437 return false;
d25e28ab
JH
2438
2439 /* Security mode 3 case */
2440 if (!conn)
745c0ce3 2441 return true;
d25e28ab 2442
e3befab9
JH
2443 /* BR/EDR key derived using SC from an LE link */
2444 if (conn->type == LE_LINK)
2445 return true;
2446
d25e28ab
JH
2447 /* Neither local nor remote side had no-bonding as requirement */
2448 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2449 return true;
d25e28ab
JH
2450
2451 /* Local side had dedicated bonding as requirement */
2452 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2453 return true;
d25e28ab
JH
2454
2455 /* Remote side had dedicated bonding as requirement */
2456 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2457 return true;
d25e28ab
JH
2458
2459 /* If none of the above criteria match, then don't store the key
2460 * persistently */
745c0ce3 2461 return false;
d25e28ab
JH
2462}
2463
e804d25d 2464static u8 ltk_role(u8 type)
98a0b845 2465{
e804d25d
JH
2466 if (type == SMP_LTK)
2467 return HCI_ROLE_MASTER;
98a0b845 2468
e804d25d 2469 return HCI_ROLE_SLAVE;
98a0b845
JH
2470}
2471
f3a73d97
JH
2472struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2473 u8 addr_type, u8 role)
75d262c2 2474{
c9839a11 2475 struct smp_ltk *k;
75d262c2 2476
970d0f1b
JH
2477 rcu_read_lock();
2478 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2479 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2480 continue;
2481
923e2414 2482 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2483 rcu_read_unlock();
600a8749
AM
2484
2485 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2486 k->val)) {
2487 bt_dev_warn_ratelimited(hdev,
2488 "LTK blocked for %pMR",
2489 &k->bdaddr);
2490 return NULL;
2491 }
2492
75d262c2 2493 return k;
970d0f1b
JH
2494 }
2495 }
2496 rcu_read_unlock();
75d262c2
VCG
2497
2498 return NULL;
2499}
75d262c2 2500
970c4e46
JH
2501struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2502{
600a8749 2503 struct smp_irk *irk_to_return = NULL;
970c4e46
JH
2504 struct smp_irk *irk;
2505
adae20cb
JH
2506 rcu_read_lock();
2507 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2508 if (!bacmp(&irk->rpa, rpa)) {
600a8749
AM
2509 irk_to_return = irk;
2510 goto done;
adae20cb 2511 }
970c4e46
JH
2512 }
2513
adae20cb 2514 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2515 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2516 bacpy(&irk->rpa, rpa);
600a8749
AM
2517 irk_to_return = irk;
2518 goto done;
970c4e46
JH
2519 }
2520 }
600a8749
AM
2521
2522done:
2523 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2524 irk_to_return->val)) {
2525 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2526 &irk_to_return->bdaddr);
2527 irk_to_return = NULL;
2528 }
2529
adae20cb 2530 rcu_read_unlock();
970c4e46 2531
600a8749 2532 return irk_to_return;
970c4e46
JH
2533}
2534
2535struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2536 u8 addr_type)
2537{
600a8749 2538 struct smp_irk *irk_to_return = NULL;
970c4e46
JH
2539 struct smp_irk *irk;
2540
6cfc9988
JH
2541 /* Identity Address must be public or static random */
2542 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2543 return NULL;
2544
adae20cb
JH
2545 rcu_read_lock();
2546 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2547 if (addr_type == irk->addr_type &&
adae20cb 2548 bacmp(bdaddr, &irk->bdaddr) == 0) {
600a8749
AM
2549 irk_to_return = irk;
2550 goto done;
adae20cb 2551 }
970c4e46 2552 }
600a8749
AM
2553
2554done:
2555
2556 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2557 irk_to_return->val)) {
2558 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2559 &irk_to_return->bdaddr);
2560 irk_to_return = NULL;
2561 }
2562
adae20cb 2563 rcu_read_unlock();
970c4e46 2564
600a8749 2565 return irk_to_return;
970c4e46
JH
2566}
2567
567fa2aa 2568struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2569 bdaddr_t *bdaddr, u8 *val, u8 type,
2570 u8 pin_len, bool *persistent)
55ed8ca1
JH
2571{
2572 struct link_key *key, *old_key;
745c0ce3 2573 u8 old_key_type;
55ed8ca1
JH
2574
2575 old_key = hci_find_link_key(hdev, bdaddr);
2576 if (old_key) {
2577 old_key_type = old_key->type;
2578 key = old_key;
2579 } else {
12adcf3a 2580 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2581 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2582 if (!key)
567fa2aa 2583 return NULL;
0378b597 2584 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2585 }
2586
6ed93dc6 2587 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2588
d25e28ab
JH
2589 /* Some buggy controller combinations generate a changed
2590 * combination key for legacy pairing even when there's no
2591 * previous key */
2592 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2593 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2594 type = HCI_LK_COMBINATION;
655fe6ec
JH
2595 if (conn)
2596 conn->key_type = type;
2597 }
d25e28ab 2598
55ed8ca1 2599 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2600 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2601 key->pin_len = pin_len;
2602
b6020ba0 2603 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2604 key->type = old_key_type;
4748fed2
JH
2605 else
2606 key->type = type;
2607
7652ff6a
JH
2608 if (persistent)
2609 *persistent = hci_persistent_key(hdev, conn, type,
2610 old_key_type);
4df378a1 2611
567fa2aa 2612 return key;
55ed8ca1
JH
2613}
2614
ca9142b8 2615struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2616 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2617 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2618{
c9839a11 2619 struct smp_ltk *key, *old_key;
e804d25d 2620 u8 role = ltk_role(type);
75d262c2 2621
f3a73d97 2622 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2623 if (old_key)
75d262c2 2624 key = old_key;
c9839a11 2625 else {
0a14ab41 2626 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2627 if (!key)
ca9142b8 2628 return NULL;
970d0f1b 2629 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2630 }
2631
75d262c2 2632 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2633 key->bdaddr_type = addr_type;
2634 memcpy(key->val, tk, sizeof(key->val));
2635 key->authenticated = authenticated;
2636 key->ediv = ediv;
fe39c7b2 2637 key->rand = rand;
c9839a11
VCG
2638 key->enc_size = enc_size;
2639 key->type = type;
75d262c2 2640
ca9142b8 2641 return key;
75d262c2
VCG
2642}
2643
ca9142b8
JH
2644struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2645 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2646{
2647 struct smp_irk *irk;
2648
2649 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2650 if (!irk) {
2651 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2652 if (!irk)
ca9142b8 2653 return NULL;
970c4e46
JH
2654
2655 bacpy(&irk->bdaddr, bdaddr);
2656 irk->addr_type = addr_type;
2657
adae20cb 2658 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2659 }
2660
2661 memcpy(irk->val, val, 16);
2662 bacpy(&irk->rpa, rpa);
2663
ca9142b8 2664 return irk;
970c4e46
JH
2665}
2666
55ed8ca1
JH
2667int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2668{
2669 struct link_key *key;
2670
2671 key = hci_find_link_key(hdev, bdaddr);
2672 if (!key)
2673 return -ENOENT;
2674
6ed93dc6 2675 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2676
0378b597
JH
2677 list_del_rcu(&key->list);
2678 kfree_rcu(key, rcu);
55ed8ca1
JH
2679
2680 return 0;
2681}
2682
e0b2b27e 2683int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2684{
970d0f1b 2685 struct smp_ltk *k;
c51ffa0b 2686 int removed = 0;
b899efaf 2687
970d0f1b 2688 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2689 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2690 continue;
2691
6ed93dc6 2692 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2693
970d0f1b
JH
2694 list_del_rcu(&k->list);
2695 kfree_rcu(k, rcu);
c51ffa0b 2696 removed++;
b899efaf
VCG
2697 }
2698
c51ffa0b 2699 return removed ? 0 : -ENOENT;
b899efaf
VCG
2700}
2701
a7ec7338
JH
2702void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2703{
adae20cb 2704 struct smp_irk *k;
a7ec7338 2705
adae20cb 2706 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2707 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2708 continue;
2709
2710 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2711
adae20cb
JH
2712 list_del_rcu(&k->list);
2713 kfree_rcu(k, rcu);
a7ec7338
JH
2714 }
2715}
2716
55e76b38
JH
2717bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2718{
2719 struct smp_ltk *k;
4ba9faf3 2720 struct smp_irk *irk;
55e76b38
JH
2721 u8 addr_type;
2722
2723 if (type == BDADDR_BREDR) {
2724 if (hci_find_link_key(hdev, bdaddr))
2725 return true;
2726 return false;
2727 }
2728
2729 /* Convert to HCI addr type which struct smp_ltk uses */
2730 if (type == BDADDR_LE_PUBLIC)
2731 addr_type = ADDR_LE_DEV_PUBLIC;
2732 else
2733 addr_type = ADDR_LE_DEV_RANDOM;
2734
4ba9faf3
JH
2735 irk = hci_get_irk(hdev, bdaddr, addr_type);
2736 if (irk) {
2737 bdaddr = &irk->bdaddr;
2738 addr_type = irk->addr_type;
2739 }
2740
55e76b38
JH
2741 rcu_read_lock();
2742 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2743 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2744 rcu_read_unlock();
55e76b38 2745 return true;
87c8b28d 2746 }
55e76b38
JH
2747 }
2748 rcu_read_unlock();
2749
2750 return false;
2751}
2752
6bd32326 2753/* HCI command timer function */
65cc2b49 2754static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2755{
65cc2b49
MH
2756 struct hci_dev *hdev = container_of(work, struct hci_dev,
2757 cmd_timer.work);
6bd32326 2758
bda4f23a
AE
2759 if (hdev->sent_cmd) {
2760 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2761 u16 opcode = __le16_to_cpu(sent->opcode);
2762
2064ee33 2763 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
bda4f23a 2764 } else {
2064ee33 2765 bt_dev_err(hdev, "command tx timeout");
bda4f23a
AE
2766 }
2767
e2bef384
RJ
2768 if (hdev->cmd_timeout)
2769 hdev->cmd_timeout(hdev);
2770
6bd32326 2771 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2772 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2773}
2774
2763eda6 2775struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2776 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2777{
2778 struct oob_data *data;
2779
6928a924
JH
2780 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2781 if (bacmp(bdaddr, &data->bdaddr) != 0)
2782 continue;
2783 if (data->bdaddr_type != bdaddr_type)
2784 continue;
2785 return data;
2786 }
2763eda6
SJ
2787
2788 return NULL;
2789}
2790
6928a924
JH
2791int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2792 u8 bdaddr_type)
2763eda6
SJ
2793{
2794 struct oob_data *data;
2795
6928a924 2796 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2797 if (!data)
2798 return -ENOENT;
2799
6928a924 2800 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2801
2802 list_del(&data->list);
2803 kfree(data);
2804
2805 return 0;
2806}
2807
35f7498a 2808void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2809{
2810 struct oob_data *data, *n;
2811
2812 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2813 list_del(&data->list);
2814 kfree(data);
2815 }
2763eda6
SJ
2816}
2817
0798872e 2818int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2819 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2820 u8 *hash256, u8 *rand256)
2763eda6
SJ
2821{
2822 struct oob_data *data;
2823
6928a924 2824 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2825 if (!data) {
0a14ab41 2826 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2827 if (!data)
2828 return -ENOMEM;
2829
2830 bacpy(&data->bdaddr, bdaddr);
6928a924 2831 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2832 list_add(&data->list, &hdev->remote_oob_data);
2833 }
2834
81328d5c
JH
2835 if (hash192 && rand192) {
2836 memcpy(data->hash192, hash192, sizeof(data->hash192));
2837 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2838 if (hash256 && rand256)
2839 data->present = 0x03;
81328d5c
JH
2840 } else {
2841 memset(data->hash192, 0, sizeof(data->hash192));
2842 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2843 if (hash256 && rand256)
2844 data->present = 0x02;
2845 else
2846 data->present = 0x00;
0798872e
MH
2847 }
2848
81328d5c
JH
2849 if (hash256 && rand256) {
2850 memcpy(data->hash256, hash256, sizeof(data->hash256));
2851 memcpy(data->rand256, rand256, sizeof(data->rand256));
2852 } else {
2853 memset(data->hash256, 0, sizeof(data->hash256));
2854 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2855 if (hash192 && rand192)
2856 data->present = 0x01;
81328d5c 2857 }
0798872e 2858
6ed93dc6 2859 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2860
2861 return 0;
2862}
2863
d2609b34
FG
2864/* This function requires the caller holds hdev->lock */
2865struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2866{
2867 struct adv_info *adv_instance;
2868
2869 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2870 if (adv_instance->instance == instance)
2871 return adv_instance;
2872 }
2873
2874 return NULL;
2875}
2876
2877/* This function requires the caller holds hdev->lock */
74b93e9f
PK
2878struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2879{
d2609b34
FG
2880 struct adv_info *cur_instance;
2881
2882 cur_instance = hci_find_adv_instance(hdev, instance);
2883 if (!cur_instance)
2884 return NULL;
2885
2886 if (cur_instance == list_last_entry(&hdev->adv_instances,
2887 struct adv_info, list))
2888 return list_first_entry(&hdev->adv_instances,
2889 struct adv_info, list);
2890 else
2891 return list_next_entry(cur_instance, list);
2892}
2893
2894/* This function requires the caller holds hdev->lock */
2895int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2896{
2897 struct adv_info *adv_instance;
2898
2899 adv_instance = hci_find_adv_instance(hdev, instance);
2900 if (!adv_instance)
2901 return -ENOENT;
2902
2903 BT_DBG("%s removing %dMR", hdev->name, instance);
2904
cab054ab
JH
2905 if (hdev->cur_adv_instance == instance) {
2906 if (hdev->adv_instance_timeout) {
2907 cancel_delayed_work(&hdev->adv_instance_expire);
2908 hdev->adv_instance_timeout = 0;
2909 }
2910 hdev->cur_adv_instance = 0x00;
5d900e46
FG
2911 }
2912
a73c046a
JK
2913 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2914
d2609b34
FG
2915 list_del(&adv_instance->list);
2916 kfree(adv_instance);
2917
2918 hdev->adv_instance_cnt--;
2919
2920 return 0;
2921}
2922
a73c046a
JK
2923void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2924{
2925 struct adv_info *adv_instance, *n;
2926
2927 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2928 adv_instance->rpa_expired = rpa_expired;
2929}
2930
d2609b34
FG
2931/* This function requires the caller holds hdev->lock */
2932void hci_adv_instances_clear(struct hci_dev *hdev)
2933{
2934 struct adv_info *adv_instance, *n;
2935
5d900e46
FG
2936 if (hdev->adv_instance_timeout) {
2937 cancel_delayed_work(&hdev->adv_instance_expire);
2938 hdev->adv_instance_timeout = 0;
2939 }
2940
d2609b34 2941 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
a73c046a 2942 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
d2609b34
FG
2943 list_del(&adv_instance->list);
2944 kfree(adv_instance);
2945 }
2946
2947 hdev->adv_instance_cnt = 0;
cab054ab 2948 hdev->cur_adv_instance = 0x00;
d2609b34
FG
2949}
2950
a73c046a
JK
2951static void adv_instance_rpa_expired(struct work_struct *work)
2952{
2953 struct adv_info *adv_instance = container_of(work, struct adv_info,
2954 rpa_expired_cb.work);
2955
2956 BT_DBG("");
2957
2958 adv_instance->rpa_expired = true;
2959}
2960
d2609b34
FG
2961/* This function requires the caller holds hdev->lock */
2962int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2963 u16 adv_data_len, u8 *adv_data,
2964 u16 scan_rsp_len, u8 *scan_rsp_data,
9bf9f4b6
DW
2965 u16 timeout, u16 duration, s8 tx_power,
2966 u32 min_interval, u32 max_interval)
d2609b34
FG
2967{
2968 struct adv_info *adv_instance;
2969
2970 adv_instance = hci_find_adv_instance(hdev, instance);
2971 if (adv_instance) {
2972 memset(adv_instance->adv_data, 0,
2973 sizeof(adv_instance->adv_data));
2974 memset(adv_instance->scan_rsp_data, 0,
2975 sizeof(adv_instance->scan_rsp_data));
2976 } else {
1d0fac2c 2977 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
87597482 2978 instance < 1 || instance > hdev->le_num_of_adv_sets)
d2609b34
FG
2979 return -EOVERFLOW;
2980
39ecfad6 2981 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2982 if (!adv_instance)
2983 return -ENOMEM;
2984
fffd38bc 2985 adv_instance->pending = true;
d2609b34
FG
2986 adv_instance->instance = instance;
2987 list_add(&adv_instance->list, &hdev->adv_instances);
2988 hdev->adv_instance_cnt++;
2989 }
2990
2991 adv_instance->flags = flags;
2992 adv_instance->adv_data_len = adv_data_len;
2993 adv_instance->scan_rsp_len = scan_rsp_len;
9bf9f4b6
DW
2994 adv_instance->min_interval = min_interval;
2995 adv_instance->max_interval = max_interval;
2996 adv_instance->tx_power = tx_power;
d2609b34
FG
2997
2998 if (adv_data_len)
2999 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3000
3001 if (scan_rsp_len)
3002 memcpy(adv_instance->scan_rsp_data,
3003 scan_rsp_data, scan_rsp_len);
3004
3005 adv_instance->timeout = timeout;
5d900e46 3006 adv_instance->remaining_time = timeout;
d2609b34
FG
3007
3008 if (duration == 0)
10873f99 3009 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
d2609b34
FG
3010 else
3011 adv_instance->duration = duration;
3012
a73c046a
JK
3013 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3014 adv_instance_rpa_expired);
3015
d2609b34
FG
3016 BT_DBG("%s for %dMR", hdev->name, instance);
3017
3018 return 0;
3019}
3020
31aab5c2
DW
3021/* This function requires the caller holds hdev->lock */
3022int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3023 u16 adv_data_len, u8 *adv_data,
3024 u16 scan_rsp_len, u8 *scan_rsp_data)
3025{
3026 struct adv_info *adv_instance;
3027
3028 adv_instance = hci_find_adv_instance(hdev, instance);
3029
3030 /* If advertisement doesn't exist, we can't modify its data */
3031 if (!adv_instance)
3032 return -ENOENT;
3033
3034 if (adv_data_len) {
3035 memset(adv_instance->adv_data, 0,
3036 sizeof(adv_instance->adv_data));
3037 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3038 adv_instance->adv_data_len = adv_data_len;
3039 }
3040
3041 if (scan_rsp_len) {
3042 memset(adv_instance->scan_rsp_data, 0,
3043 sizeof(adv_instance->scan_rsp_data));
3044 memcpy(adv_instance->scan_rsp_data,
3045 scan_rsp_data, scan_rsp_len);
3046 adv_instance->scan_rsp_len = scan_rsp_len;
3047 }
3048
3049 return 0;
3050}
3051
e5e1e7fd
MC
3052/* This function requires the caller holds hdev->lock */
3053void hci_adv_monitors_clear(struct hci_dev *hdev)
3054{
b139553d
MC
3055 struct adv_monitor *monitor;
3056 int handle;
3057
3058 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
66bd095a 3059 hci_free_adv_monitor(hdev, monitor);
b139553d 3060
e5e1e7fd
MC
3061 idr_destroy(&hdev->adv_monitors_idr);
3062}
3063
66bd095a
AP
3064/* Frees the monitor structure and do some bookkeepings.
3065 * This function requires the caller holds hdev->lock.
3066 */
3067void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
b139553d
MC
3068{
3069 struct adv_pattern *pattern;
3070 struct adv_pattern *tmp;
3071
3072 if (!monitor)
3073 return;
3074
66bd095a
AP
3075 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3076 list_del(&pattern->list);
b139553d 3077 kfree(pattern);
66bd095a
AP
3078 }
3079
3080 if (monitor->handle)
3081 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3082
3083 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3084 hdev->adv_monitors_cnt--;
3085 mgmt_adv_monitor_removed(hdev, monitor->handle);
3086 }
b139553d
MC
3087
3088 kfree(monitor);
3089}
3090
a2a4dedf
AP
3091int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3092{
3093 return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3094}
3095
66bd095a
AP
3096int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3097{
3098 return mgmt_remove_adv_monitor_complete(hdev, status);
3099}
3100
a2a4dedf
AP
3101/* Assigns handle to a monitor, and if offloading is supported and power is on,
3102 * also attempts to forward the request to the controller.
3103 * Returns true if request is forwarded (result is pending), false otherwise.
3104 * This function requires the caller holds hdev->lock.
3105 */
3106bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3107 int *err)
b139553d
MC
3108{
3109 int min, max, handle;
3110
a2a4dedf
AP
3111 *err = 0;
3112
3113 if (!monitor) {
3114 *err = -EINVAL;
3115 return false;
3116 }
b139553d
MC
3117
3118 min = HCI_MIN_ADV_MONITOR_HANDLE;
3119 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3120 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3121 GFP_KERNEL);
a2a4dedf
AP
3122 if (handle < 0) {
3123 *err = handle;
3124 return false;
3125 }
b139553d 3126
b139553d 3127 monitor->handle = handle;
8208f5a9 3128
a2a4dedf
AP
3129 if (!hdev_is_powered(hdev))
3130 return false;
8208f5a9 3131
a2a4dedf
AP
3132 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3133 case HCI_ADV_MONITOR_EXT_NONE:
3134 hci_update_background_scan(hdev);
3135 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3136 /* Message was not forwarded to controller - not an error */
3137 return false;
3138 case HCI_ADV_MONITOR_EXT_MSFT:
3139 *err = msft_add_monitor_pattern(hdev, monitor);
3140 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3141 *err);
3142 break;
3143 }
3144
3145 return (*err == 0);
b139553d
MC
3146}
3147
66bd095a
AP
3148/* Attempts to tell the controller and free the monitor. If somehow the
3149 * controller doesn't have a corresponding handle, remove anyway.
3150 * Returns true if request is forwarded (result is pending), false otherwise.
3151 * This function requires the caller holds hdev->lock.
3152 */
3153static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3154 struct adv_monitor *monitor,
3155 u16 handle, int *err)
bd2fbc6c 3156{
66bd095a 3157 *err = 0;
bd2fbc6c 3158
66bd095a
AP
3159 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3160 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3161 goto free_monitor;
3162 case HCI_ADV_MONITOR_EXT_MSFT:
3163 *err = msft_remove_monitor(hdev, monitor, handle);
3164 break;
3165 }
bd2fbc6c 3166
66bd095a
AP
3167 /* In case no matching handle registered, just free the monitor */
3168 if (*err == -ENOENT)
3169 goto free_monitor;
3170
3171 return (*err == 0);
3172
3173free_monitor:
3174 if (*err == -ENOENT)
3175 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3176 monitor->handle);
3177 hci_free_adv_monitor(hdev, monitor);
3178
3179 *err = 0;
3180 return false;
bd2fbc6c
MC
3181}
3182
66bd095a
AP
3183/* Returns true if request is forwarded (result is pending), false otherwise.
3184 * This function requires the caller holds hdev->lock.
3185 */
3186bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3187{
3188 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3189 bool pending;
3190
3191 if (!monitor) {
3192 *err = -EINVAL;
3193 return false;
3194 }
3195
3196 pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3197 if (!*err && !pending)
3198 hci_update_background_scan(hdev);
3199
3200 bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3201 hdev->name, handle, *err, pending ? "" : "not ");
3202
3203 return pending;
3204}
3205
3206/* Returns true if request is forwarded (result is pending), false otherwise.
3207 * This function requires the caller holds hdev->lock.
3208 */
3209bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
bd2fbc6c
MC
3210{
3211 struct adv_monitor *monitor;
66bd095a
AP
3212 int idr_next_id = 0;
3213 bool pending = false;
3214 bool update = false;
bd2fbc6c 3215
66bd095a
AP
3216 *err = 0;
3217
3218 while (!*err && !pending) {
3219 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
bd2fbc6c 3220 if (!monitor)
66bd095a 3221 break;
bd2fbc6c 3222
66bd095a
AP
3223 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3224
3225 if (!*err && !pending)
3226 update = true;
bd2fbc6c
MC
3227 }
3228
66bd095a
AP
3229 if (update)
3230 hci_update_background_scan(hdev);
8208f5a9 3231
66bd095a
AP
3232 bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3233 hdev->name, *err, pending ? "" : "not ");
3234
3235 return pending;
bd2fbc6c
MC
3236}
3237
8208f5a9
MC
3238/* This function requires the caller holds hdev->lock */
3239bool hci_is_adv_monitoring(struct hci_dev *hdev)
3240{
3241 return !idr_is_empty(&hdev->adv_monitors_idr);
3242}
3243
a2a4dedf
AP
3244int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3245{
3246 if (msft_monitor_supported(hdev))
3247 return HCI_ADV_MONITOR_EXT_MSFT;
3248
3249 return HCI_ADV_MONITOR_EXT_NONE;
3250}
3251
dcc36c16 3252struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 3253 bdaddr_t *bdaddr, u8 type)
b2a66aad 3254{
8035ded4 3255 struct bdaddr_list *b;
b2a66aad 3256
dcc36c16 3257 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 3258 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3259 return b;
b9ee0a78 3260 }
b2a66aad
AJ
3261
3262 return NULL;
3263}
3264
b950aa88
AN
3265struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3266 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3267 u8 type)
3268{
3269 struct bdaddr_list_with_irk *b;
3270
3271 list_for_each_entry(b, bdaddr_list, list) {
3272 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3273 return b;
3274 }
3275
3276 return NULL;
3277}
3278
8baaa403
APS
3279struct bdaddr_list_with_flags *
3280hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3281 bdaddr_t *bdaddr, u8 type)
3282{
3283 struct bdaddr_list_with_flags *b;
3284
3285 list_for_each_entry(b, bdaddr_list, list) {
3286 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3287 return b;
3288 }
3289
3290 return NULL;
3291}
3292
dcc36c16 3293void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad 3294{
7eb7404f 3295 struct bdaddr_list *b, *n;
b2a66aad 3296
7eb7404f
GT
3297 list_for_each_entry_safe(b, n, bdaddr_list, list) {
3298 list_del(&b->list);
b2a66aad
AJ
3299 kfree(b);
3300 }
b2a66aad
AJ
3301}
3302
dcc36c16 3303int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3304{
3305 struct bdaddr_list *entry;
b2a66aad 3306
b9ee0a78 3307 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3308 return -EBADF;
3309
dcc36c16 3310 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 3311 return -EEXIST;
b2a66aad 3312
27f70f3e 3313 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
3314 if (!entry)
3315 return -ENOMEM;
b2a66aad
AJ
3316
3317 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3318 entry->bdaddr_type = type;
b2a66aad 3319
dcc36c16 3320 list_add(&entry->list, list);
b2a66aad 3321
2a8357f2 3322 return 0;
b2a66aad
AJ
3323}
3324
b950aa88
AN
3325int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3326 u8 type, u8 *peer_irk, u8 *local_irk)
3327{
3328 struct bdaddr_list_with_irk *entry;
3329
3330 if (!bacmp(bdaddr, BDADDR_ANY))
3331 return -EBADF;
3332
3333 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3334 return -EEXIST;
3335
3336 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3337 if (!entry)
3338 return -ENOMEM;
3339
3340 bacpy(&entry->bdaddr, bdaddr);
3341 entry->bdaddr_type = type;
3342
3343 if (peer_irk)
3344 memcpy(entry->peer_irk, peer_irk, 16);
3345
3346 if (local_irk)
3347 memcpy(entry->local_irk, local_irk, 16);
3348
3349 list_add(&entry->list, list);
3350
3351 return 0;
3352}
3353
8baaa403
APS
3354int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3355 u8 type, u32 flags)
3356{
3357 struct bdaddr_list_with_flags *entry;
3358
3359 if (!bacmp(bdaddr, BDADDR_ANY))
3360 return -EBADF;
3361
3362 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3363 return -EEXIST;
3364
3365 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3366 if (!entry)
3367 return -ENOMEM;
3368
3369 bacpy(&entry->bdaddr, bdaddr);
3370 entry->bdaddr_type = type;
3371 entry->current_flags = flags;
3372
3373 list_add(&entry->list, list);
3374
3375 return 0;
3376}
3377
dcc36c16 3378int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3379{
3380 struct bdaddr_list *entry;
b2a66aad 3381
35f7498a 3382 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 3383 hci_bdaddr_list_clear(list);
35f7498a
JH
3384 return 0;
3385 }
b2a66aad 3386
dcc36c16 3387 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
3388 if (!entry)
3389 return -ENOENT;
3390
3391 list_del(&entry->list);
3392 kfree(entry);
3393
3394 return 0;
3395}
3396
b950aa88
AN
3397int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3398 u8 type)
3399{
3400 struct bdaddr_list_with_irk *entry;
3401
3402 if (!bacmp(bdaddr, BDADDR_ANY)) {
3403 hci_bdaddr_list_clear(list);
3404 return 0;
3405 }
3406
3407 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3408 if (!entry)
3409 return -ENOENT;
3410
3411 list_del(&entry->list);
3412 kfree(entry);
3413
3414 return 0;
3415}
3416
8baaa403
APS
3417int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3418 u8 type)
3419{
3420 struct bdaddr_list_with_flags *entry;
3421
3422 if (!bacmp(bdaddr, BDADDR_ANY)) {
3423 hci_bdaddr_list_clear(list);
3424 return 0;
3425 }
3426
3427 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3428 if (!entry)
3429 return -ENOENT;
3430
3431 list_del(&entry->list);
3432 kfree(entry);
3433
3434 return 0;
3435}
3436
15819a70
AG
3437/* This function requires the caller holds hdev->lock */
3438struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3439 bdaddr_t *addr, u8 addr_type)
3440{
3441 struct hci_conn_params *params;
3442
3443 list_for_each_entry(params, &hdev->le_conn_params, list) {
3444 if (bacmp(&params->addr, addr) == 0 &&
3445 params->addr_type == addr_type) {
3446 return params;
3447 }
3448 }
3449
3450 return NULL;
3451}
3452
4b10966f 3453/* This function requires the caller holds hdev->lock */
501f8827
JH
3454struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3455 bdaddr_t *addr, u8 addr_type)
a9b0a04c 3456{
912b42ef 3457 struct hci_conn_params *param;
a9b0a04c 3458
6540351e
MH
3459 switch (addr_type) {
3460 case ADDR_LE_DEV_PUBLIC_RESOLVED:
3461 addr_type = ADDR_LE_DEV_PUBLIC;
3462 break;
3463 case ADDR_LE_DEV_RANDOM_RESOLVED:
3464 addr_type = ADDR_LE_DEV_RANDOM;
3465 break;
3466 }
3467
501f8827 3468 list_for_each_entry(param, list, action) {
912b42ef
JH
3469 if (bacmp(&param->addr, addr) == 0 &&
3470 param->addr_type == addr_type)
3471 return param;
4b10966f
MH
3472 }
3473
3474 return NULL;
a9b0a04c
AG
3475}
3476
15819a70 3477/* This function requires the caller holds hdev->lock */
51d167c0
MH
3478struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3479 bdaddr_t *addr, u8 addr_type)
15819a70
AG
3480{
3481 struct hci_conn_params *params;
3482
3483 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 3484 if (params)
51d167c0 3485 return params;
15819a70
AG
3486
3487 params = kzalloc(sizeof(*params), GFP_KERNEL);
3488 if (!params) {
2064ee33 3489 bt_dev_err(hdev, "out of memory");
51d167c0 3490 return NULL;
15819a70
AG
3491 }
3492
3493 bacpy(&params->addr, addr);
3494 params->addr_type = addr_type;
cef952ce
AG
3495
3496 list_add(&params->list, &hdev->le_conn_params);
93450c75 3497 INIT_LIST_HEAD(&params->action);
cef952ce 3498
bf5b3c8b
MH
3499 params->conn_min_interval = hdev->le_conn_min_interval;
3500 params->conn_max_interval = hdev->le_conn_max_interval;
3501 params->conn_latency = hdev->le_conn_latency;
3502 params->supervision_timeout = hdev->le_supv_timeout;
3503 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3504
3505 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3506
51d167c0 3507 return params;
bf5b3c8b
MH
3508}
3509
f6c63249 3510static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 3511{
f8aaf9b6 3512 if (params->conn) {
f161dd41 3513 hci_conn_drop(params->conn);
f8aaf9b6
JH
3514 hci_conn_put(params->conn);
3515 }
f161dd41 3516
95305baa 3517 list_del(&params->action);
15819a70
AG
3518 list_del(&params->list);
3519 kfree(params);
f6c63249
JH
3520}
3521
3522/* This function requires the caller holds hdev->lock */
3523void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3524{
3525 struct hci_conn_params *params;
3526
3527 params = hci_conn_params_lookup(hdev, addr, addr_type);
3528 if (!params)
3529 return;
3530
3531 hci_conn_params_free(params);
15819a70 3532
95305baa
JH
3533 hci_update_background_scan(hdev);
3534
15819a70
AG
3535 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3536}
3537
3538/* This function requires the caller holds hdev->lock */
55af49a8 3539void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3540{
3541 struct hci_conn_params *params, *tmp;
3542
3543 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3544 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3545 continue;
f75113a2
JP
3546
3547 /* If trying to estabilish one time connection to disabled
3548 * device, leave the params, but mark them as just once.
3549 */
3550 if (params->explicit_connect) {
3551 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3552 continue;
3553 }
3554
15819a70
AG
3555 list_del(&params->list);
3556 kfree(params);
3557 }
3558
55af49a8 3559 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3560}
3561
3562/* This function requires the caller holds hdev->lock */
030e7f81 3563static void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3564{
15819a70 3565 struct hci_conn_params *params, *tmp;
77a77a30 3566
f6c63249
JH
3567 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3568 hci_conn_params_free(params);
77a77a30 3569
15819a70 3570 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3571}
3572
a1f4c318
JH
3573/* Copy the Identity Address of the controller.
3574 *
3575 * If the controller has a public BD_ADDR, then by default use that one.
3576 * If this is a LE only controller without a public address, default to
3577 * the static random address.
3578 *
3579 * For debugging purposes it is possible to force controllers with a
3580 * public address to use the static random address instead.
50b5b952
MH
3581 *
3582 * In case BR/EDR has been disabled on a dual-mode controller and
3583 * userspace has configured a static address, then that address
3584 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
3585 */
3586void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3587 u8 *bdaddr_type)
3588{
b7cb93e5 3589 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 3590 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 3591 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 3592 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
3593 bacpy(bdaddr, &hdev->static_addr);
3594 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3595 } else {
3596 bacpy(bdaddr, &hdev->bdaddr);
3597 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3598 }
3599}
3600
0e995280
APS
3601static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3602{
3603 int i;
3604
3605 for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3606 clear_bit(i, hdev->suspend_tasks);
3607
3608 wake_up(&hdev->suspend_wait_q);
3609}
3610
9952d90e
APS
3611static int hci_suspend_wait_event(struct hci_dev *hdev)
3612{
3613#define WAKE_COND \
3614 (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
3615 __SUSPEND_NUM_TASKS)
3616
3617 int i;
3618 int ret = wait_event_timeout(hdev->suspend_wait_q,
3619 WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3620
3621 if (ret == 0) {
a9ec8423 3622 bt_dev_err(hdev, "Timed out waiting for suspend events");
9952d90e
APS
3623 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3624 if (test_bit(i, hdev->suspend_tasks))
a9ec8423 3625 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
9952d90e
APS
3626 clear_bit(i, hdev->suspend_tasks);
3627 }
3628
3629 ret = -ETIMEDOUT;
3630 } else {
3631 ret = 0;
3632 }
3633
3634 return ret;
3635}
3636
3637static void hci_prepare_suspend(struct work_struct *work)
3638{
3639 struct hci_dev *hdev =
3640 container_of(work, struct hci_dev, suspend_prepare);
3641
3642 hci_dev_lock(hdev);
3643 hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3644 hci_dev_unlock(hdev);
3645}
3646
8731840a
APS
3647static int hci_change_suspend_state(struct hci_dev *hdev,
3648 enum suspended_state next)
3649{
3650 hdev->suspend_state_next = next;
3651 set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3652 queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3653 return hci_suspend_wait_event(hdev);
3654}
3655
2f20216c
APS
3656static void hci_clear_wake_reason(struct hci_dev *hdev)
3657{
3658 hci_dev_lock(hdev);
3659
3660 hdev->wake_reason = 0;
3661 bacpy(&hdev->wake_addr, BDADDR_ANY);
3662 hdev->wake_addr_type = 0;
3663
3664 hci_dev_unlock(hdev);
3665}
3666
9952d90e
APS
3667static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3668 void *data)
3669{
3670 struct hci_dev *hdev =
3671 container_of(nb, struct hci_dev, suspend_notifier);
3672 int ret = 0;
2f20216c 3673 u8 state = BT_RUNNING;
9952d90e
APS
3674
3675 /* If powering down, wait for completion. */
3676 if (mgmt_powering_down(hdev)) {
3677 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3678 ret = hci_suspend_wait_event(hdev);
3679 if (ret)
3680 goto done;
3681 }
3682
3683 /* Suspend notifier should only act on events when powered. */
5ff20cbe
VS
3684 if (!hdev_is_powered(hdev) ||
3685 hci_dev_test_flag(hdev, HCI_UNREGISTER))
9952d90e
APS
3686 goto done;
3687
3688 if (action == PM_SUSPEND_PREPARE) {
4f40afc6
APS
3689 /* Suspend consists of two actions:
3690 * - First, disconnect everything and make the controller not
3691 * connectable (disabling scanning)
3692 * - Second, program event filter/whitelist and enable scan
3693 */
8731840a 3694 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
2f20216c
APS
3695 if (!ret)
3696 state = BT_SUSPEND_DISCONNECT;
4f40afc6 3697
81dafad5
APS
3698 /* Only configure whitelist if disconnect succeeded and wake
3699 * isn't being prevented.
3700 */
2f20216c 3701 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
8731840a 3702 ret = hci_change_suspend_state(hdev,
0d2c9825 3703 BT_SUSPEND_CONFIGURE_WAKE);
2f20216c
APS
3704 if (!ret)
3705 state = BT_SUSPEND_CONFIGURE_WAKE;
3706 }
3707
3708 hci_clear_wake_reason(hdev);
3709 mgmt_suspending(hdev, state);
3710
9952d90e 3711 } else if (action == PM_POST_SUSPEND) {
8731840a 3712 ret = hci_change_suspend_state(hdev, BT_RUNNING);
2f20216c
APS
3713
3714 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3715 hdev->wake_addr_type);
9952d90e
APS
3716 }
3717
3718done:
a9ec8423
APS
3719 /* We always allow suspend even if suspend preparation failed and
3720 * attempt to recover in resume.
3721 */
3722 if (ret)
3723 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3724 action, ret);
3725
24b06572 3726 return NOTIFY_DONE;
9952d90e 3727}
8731840a 3728
9be0dab7
DH
3729/* Alloc HCI device */
3730struct hci_dev *hci_alloc_dev(void)
3731{
3732 struct hci_dev *hdev;
3733
27f70f3e 3734 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3735 if (!hdev)
3736 return NULL;
3737
b1b813d4
DH
3738 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3739 hdev->esco_type = (ESCO_HV1);
3740 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3741 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3742 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3743 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3744 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3745 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
3746 hdev->adv_instance_cnt = 0;
3747 hdev->cur_adv_instance = 0x00;
5d900e46 3748 hdev->adv_instance_timeout = 0;
b1b813d4 3749
c4f1f408
HC
3750 hdev->advmon_allowlist_duration = 300;
3751 hdev->advmon_no_filter_duration = 500;
80af16a3 3752 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
c4f1f408 3753
b1b813d4
DH
3754 hdev->sniff_max_interval = 800;
3755 hdev->sniff_min_interval = 80;
3756
3f959d46 3757 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3758 hdev->le_adv_min_interval = 0x0800;
3759 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3760 hdev->le_scan_interval = 0x0060;
3761 hdev->le_scan_window = 0x0030;
10873f99
AM
3762 hdev->le_scan_int_suspend = 0x0400;
3763 hdev->le_scan_window_suspend = 0x0012;
3764 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3765 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3766 hdev->le_scan_int_connect = 0x0060;
3767 hdev->le_scan_window_connect = 0x0060;
b48c3b59
JH
3768 hdev->le_conn_min_interval = 0x0018;
3769 hdev->le_conn_max_interval = 0x0028;
04fb7d90
MH
3770 hdev->le_conn_latency = 0x0000;
3771 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3772 hdev->le_def_tx_len = 0x001b;
3773 hdev->le_def_tx_time = 0x0148;
3774 hdev->le_max_tx_len = 0x001b;
3775 hdev->le_max_tx_time = 0x0148;
3776 hdev->le_max_rx_len = 0x001b;
3777 hdev->le_max_rx_time = 0x0148;
30d65e08
MK
3778 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3779 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
6decb5b4
JK
3780 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3781 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
1d0fac2c 3782 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
10873f99 3783 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
49b020c1 3784 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
7c395ea5
DW
3785 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3786 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
bef64738 3787
d6bfd59c 3788 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3789 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3790 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3791 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
302975cb 3792 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
58a96fc3 3793 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
d6bfd59c 3794
10873f99
AM
3795 /* default 1.28 sec page scan */
3796 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3797 hdev->def_page_scan_int = 0x0800;
3798 hdev->def_page_scan_window = 0x0012;
3799
b1b813d4
DH
3800 mutex_init(&hdev->lock);
3801 mutex_init(&hdev->req_lock);
3802
3803 INIT_LIST_HEAD(&hdev->mgmt_pending);
3804 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3805 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3806 INIT_LIST_HEAD(&hdev->uuids);
3807 INIT_LIST_HEAD(&hdev->link_keys);
3808 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3809 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3810 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3811 INIT_LIST_HEAD(&hdev->le_white_list);
cfdb0c2d 3812 INIT_LIST_HEAD(&hdev->le_resolv_list);
15819a70 3813 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3814 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3815 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3816 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3817 INIT_LIST_HEAD(&hdev->adv_instances);
600a8749 3818 INIT_LIST_HEAD(&hdev->blocked_keys);
b1b813d4
DH
3819
3820 INIT_WORK(&hdev->rx_work, hci_rx_work);
3821 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3822 INIT_WORK(&hdev->tx_work, hci_tx_work);
3823 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3824 INIT_WORK(&hdev->error_reset, hci_error_reset);
9952d90e 3825 INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
b1b813d4 3826
b1b813d4 3827 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
b1b813d4 3828
b1b813d4
DH
3829 skb_queue_head_init(&hdev->rx_q);
3830 skb_queue_head_init(&hdev->cmd_q);
3831 skb_queue_head_init(&hdev->raw_q);
3832
3833 init_waitqueue_head(&hdev->req_wait_q);
9952d90e 3834 init_waitqueue_head(&hdev->suspend_wait_q);
b1b813d4 3835
65cc2b49 3836 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3837
5fc16cc4
JH
3838 hci_request_setup(hdev);
3839
b1b813d4
DH
3840 hci_init_sysfs(hdev);
3841 discovery_init(hdev);
9be0dab7
DH
3842
3843 return hdev;
3844}
3845EXPORT_SYMBOL(hci_alloc_dev);
3846
3847/* Free HCI device */
3848void hci_free_dev(struct hci_dev *hdev)
3849{
9be0dab7
DH
3850 /* will free via device release */
3851 put_device(&hdev->dev);
3852}
3853EXPORT_SYMBOL(hci_free_dev);
3854
1da177e4
LT
3855/* Register HCI device */
3856int hci_register_dev(struct hci_dev *hdev)
3857{
b1b813d4 3858 int id, error;
1da177e4 3859
74292d5a 3860 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3861 return -EINVAL;
3862
08add513
MM
3863 /* Do not allow HCI_AMP devices to register at index 0,
3864 * so the index can be used as the AMP controller ID.
3865 */
3df92b31 3866 switch (hdev->dev_type) {
ca8bee5d 3867 case HCI_PRIMARY:
3df92b31
SL
3868 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3869 break;
3870 case HCI_AMP:
3871 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3872 break;
3873 default:
3874 return -EINVAL;
1da177e4 3875 }
8e87d142 3876
3df92b31
SL
3877 if (id < 0)
3878 return id;
3879
1da177e4
LT
3880 sprintf(hdev->name, "hci%d", id);
3881 hdev->id = id;
2d8b3a11
AE
3882
3883 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3884
29e2dd0d 3885 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
33ca954d
DH
3886 if (!hdev->workqueue) {
3887 error = -ENOMEM;
3888 goto err;
3889 }
f48fd9c8 3890
29e2dd0d
TH
3891 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3892 hdev->name);
6ead1bbc
JH
3893 if (!hdev->req_workqueue) {
3894 destroy_workqueue(hdev->workqueue);
3895 error = -ENOMEM;
3896 goto err;
3897 }
3898
0153e2ec
MH
3899 if (!IS_ERR_OR_NULL(bt_debugfs))
3900 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3901
bdc3e0f1
MH
3902 dev_set_name(&hdev->dev, "%s", hdev->name);
3903
3904 error = device_add(&hdev->dev);
33ca954d 3905 if (error < 0)
54506918 3906 goto err_wqueue;
1da177e4 3907
6d5d2ee6
HK
3908 hci_leds_init(hdev);
3909
611b30f7 3910 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3911 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3912 hdev);
611b30f7
MH
3913 if (hdev->rfkill) {
3914 if (rfkill_register(hdev->rfkill) < 0) {
3915 rfkill_destroy(hdev->rfkill);
3916 hdev->rfkill = NULL;
3917 }
3918 }
3919
5e130367 3920 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3921 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3922
a1536da2
MH
3923 hci_dev_set_flag(hdev, HCI_SETUP);
3924 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3925
ca8bee5d 3926 if (hdev->dev_type == HCI_PRIMARY) {
56f87901
JH
3927 /* Assume BR/EDR support until proven otherwise (such as
3928 * through reading supported features during init.
3929 */
a1536da2 3930 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3931 }
ce2be9ac 3932
fcee3377
GP
3933 write_lock(&hci_dev_list_lock);
3934 list_add(&hdev->list, &hci_dev_list);
3935 write_unlock(&hci_dev_list_lock);
3936
4a964404
MH
3937 /* Devices that are marked for raw-only usage are unconfigured
3938 * and should not be included in normal operation.
fee746b0
MH
3939 */
3940 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3941 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3942
05fcd4c4 3943 hci_sock_dev_event(hdev, HCI_DEV_REG);
dc946bd8 3944 hci_dev_hold(hdev);
1da177e4 3945
219991e6
HG
3946 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3947 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3948 error = register_pm_notifier(&hdev->suspend_notifier);
3949 if (error)
3950 goto err_wqueue;
3951 }
9952d90e 3952
19202573 3953 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3954
e5e1e7fd
MC
3955 idr_init(&hdev->adv_monitors_idr);
3956
1da177e4 3957 return id;
f48fd9c8 3958
33ca954d
DH
3959err_wqueue:
3960 destroy_workqueue(hdev->workqueue);
6ead1bbc 3961 destroy_workqueue(hdev->req_workqueue);
33ca954d 3962err:
3df92b31 3963 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3964
33ca954d 3965 return error;
1da177e4
LT
3966}
3967EXPORT_SYMBOL(hci_register_dev);
3968
3969/* Unregister HCI device */
59735631 3970void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3971{
2d7cc19e 3972 int id;
ef222013 3973
c13854ce 3974 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3975
a1536da2 3976 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3977
3df92b31
SL
3978 id = hdev->id;
3979
f20d09d5 3980 write_lock(&hci_dev_list_lock);
1da177e4 3981 list_del(&hdev->list);
f20d09d5 3982 write_unlock(&hci_dev_list_lock);
1da177e4 3983
b9b5ef18
GP
3984 cancel_work_sync(&hdev->power_on);
3985
219991e6
HG
3986 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3987 hci_suspend_clear_tasks(hdev);
3988 unregister_pm_notifier(&hdev->suspend_notifier);
3989 cancel_work_sync(&hdev->suspend_prepare);
3990 }
4e8c36c3
APS
3991
3992 hci_dev_do_close(hdev);
9952d90e 3993
ab81cbf9 3994 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3995 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3996 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3997 hci_dev_lock(hdev);
744cf19e 3998 mgmt_index_removed(hdev);
09fd0de5 3999 hci_dev_unlock(hdev);
56e5cb86 4000 }
ab81cbf9 4001
2e58ef3e
JH
4002 /* mgmt_index_removed should take care of emptying the
4003 * pending list */
4004 BUG_ON(!list_empty(&hdev->mgmt_pending));
4005
05fcd4c4 4006 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
1da177e4 4007
611b30f7
MH
4008 if (hdev->rfkill) {
4009 rfkill_unregister(hdev->rfkill);
4010 rfkill_destroy(hdev->rfkill);
4011 }
4012
bdc3e0f1 4013 device_del(&hdev->dev);
147e2d59 4014
0153e2ec 4015 debugfs_remove_recursive(hdev->debugfs);
5177a838
MH
4016 kfree_const(hdev->hw_info);
4017 kfree_const(hdev->fw_info);
0153e2ec 4018
f48fd9c8 4019 destroy_workqueue(hdev->workqueue);
6ead1bbc 4020 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4021
09fd0de5 4022 hci_dev_lock(hdev);
dcc36c16 4023 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 4024 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 4025 hci_uuids_clear(hdev);
55ed8ca1 4026 hci_link_keys_clear(hdev);
b899efaf 4027 hci_smp_ltks_clear(hdev);
970c4e46 4028 hci_smp_irks_clear(hdev);
2763eda6 4029 hci_remote_oob_data_clear(hdev);
d2609b34 4030 hci_adv_instances_clear(hdev);
e5e1e7fd 4031 hci_adv_monitors_clear(hdev);
dcc36c16 4032 hci_bdaddr_list_clear(&hdev->le_white_list);
cfdb0c2d 4033 hci_bdaddr_list_clear(&hdev->le_resolv_list);
373110c5 4034 hci_conn_params_clear_all(hdev);
22078800 4035 hci_discovery_filter_clear(hdev);
600a8749 4036 hci_blocked_keys_clear(hdev);
09fd0de5 4037 hci_dev_unlock(hdev);
e2e0cacb 4038
dc946bd8 4039 hci_dev_put(hdev);
3df92b31
SL
4040
4041 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4042}
4043EXPORT_SYMBOL(hci_unregister_dev);
4044
4045/* Suspend HCI device */
4046int hci_suspend_dev(struct hci_dev *hdev)
4047{
05fcd4c4 4048 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
1da177e4
LT
4049 return 0;
4050}
4051EXPORT_SYMBOL(hci_suspend_dev);
4052
4053/* Resume HCI device */
4054int hci_resume_dev(struct hci_dev *hdev)
4055{
05fcd4c4 4056 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
1da177e4
LT
4057 return 0;
4058}
4059EXPORT_SYMBOL(hci_resume_dev);
4060
75e0569f
MH
4061/* Reset HCI device */
4062int hci_reset_dev(struct hci_dev *hdev)
4063{
1e4b6e91 4064 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
75e0569f
MH
4065 struct sk_buff *skb;
4066
4067 skb = bt_skb_alloc(3, GFP_ATOMIC);
4068 if (!skb)
4069 return -ENOMEM;
4070
d79f34e3 4071 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
59ae1d12 4072 skb_put_data(skb, hw_err, 3);
75e0569f
MH
4073
4074 /* Send Hardware Error to upper stack */
4075 return hci_recv_frame(hdev, skb);
4076}
4077EXPORT_SYMBOL(hci_reset_dev);
4078
76bca880 4079/* Receive frame from HCI drivers */
e1a26170 4080int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4081{
76bca880 4082 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4083 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4084 kfree_skb(skb);
4085 return -ENXIO;
4086 }
4087
d79f34e3
MH
4088 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4089 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
cc974003
MH
4090 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4091 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
fe806dce
MH
4092 kfree_skb(skb);
4093 return -EINVAL;
4094 }
4095
d82603c6 4096 /* Incoming skb */
76bca880
MH
4097 bt_cb(skb)->incoming = 1;
4098
4099 /* Time stamp */
4100 __net_timestamp(skb);
4101
76bca880 4102 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4103 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4104
76bca880
MH
4105 return 0;
4106}
4107EXPORT_SYMBOL(hci_recv_frame);
4108
e875ff84
MH
4109/* Receive diagnostic message from HCI drivers */
4110int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4111{
581d6fd6 4112 /* Mark as diagnostic packet */
d79f34e3 4113 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
581d6fd6 4114
e875ff84
MH
4115 /* Time stamp */
4116 __net_timestamp(skb);
4117
581d6fd6
MH
4118 skb_queue_tail(&hdev->rx_q, skb);
4119 queue_work(hdev->workqueue, &hdev->rx_work);
e875ff84 4120
e875ff84
MH
4121 return 0;
4122}
4123EXPORT_SYMBOL(hci_recv_diag);
4124
5177a838
MH
4125void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4126{
4127 va_list vargs;
4128
4129 va_start(vargs, fmt);
4130 kfree_const(hdev->hw_info);
4131 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4132 va_end(vargs);
4133}
4134EXPORT_SYMBOL(hci_set_hw_info);
4135
4136void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4137{
4138 va_list vargs;
4139
4140 va_start(vargs, fmt);
4141 kfree_const(hdev->fw_info);
4142 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4143 va_end(vargs);
4144}
4145EXPORT_SYMBOL(hci_set_fw_info);
4146
1da177e4
LT
4147/* ---- Interface to upper protocols ---- */
4148
1da177e4
LT
4149int hci_register_cb(struct hci_cb *cb)
4150{
4151 BT_DBG("%p name %s", cb, cb->name);
4152
fba7ecf0 4153 mutex_lock(&hci_cb_list_lock);
00629e0f 4154 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 4155 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
4156
4157 return 0;
4158}
4159EXPORT_SYMBOL(hci_register_cb);
4160
4161int hci_unregister_cb(struct hci_cb *cb)
4162{
4163 BT_DBG("%p name %s", cb, cb->name);
4164
fba7ecf0 4165 mutex_lock(&hci_cb_list_lock);
1da177e4 4166 list_del(&cb->list);
fba7ecf0 4167 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
4168
4169 return 0;
4170}
4171EXPORT_SYMBOL(hci_unregister_cb);
4172
51086991 4173static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4174{
cdc52faa
MH
4175 int err;
4176
d79f34e3
MH
4177 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4178 skb->len);
1da177e4 4179
cd82e61c
MH
4180 /* Time stamp */
4181 __net_timestamp(skb);
1da177e4 4182
cd82e61c
MH
4183 /* Send copy to monitor */
4184 hci_send_to_monitor(hdev, skb);
4185
4186 if (atomic_read(&hdev->promisc)) {
4187 /* Send copy to the sockets */
470fe1b5 4188 hci_send_to_sock(hdev, skb);
1da177e4
LT
4189 }
4190
4191 /* Get rid of skb owner, prior to sending to the driver. */
4192 skb_orphan(skb);
4193
73d0d3c8
MH
4194 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4195 kfree_skb(skb);
4196 return;
4197 }
4198
cdc52faa
MH
4199 err = hdev->send(hdev, skb);
4200 if (err < 0) {
2064ee33 4201 bt_dev_err(hdev, "sending frame failed (%d)", err);
cdc52faa
MH
4202 kfree_skb(skb);
4203 }
1da177e4
LT
4204}
4205
1ca3a9d0 4206/* Send HCI command */
07dc93dd
JH
4207int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4208 const void *param)
1ca3a9d0
JH
4209{
4210 struct sk_buff *skb;
4211
4212 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4213
4214 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4215 if (!skb) {
2064ee33 4216 bt_dev_err(hdev, "no memory for command");
1ca3a9d0
JH
4217 return -ENOMEM;
4218 }
4219
49c922bb 4220 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
4221 * single-command requests.
4222 */
44d27137 4223 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
11714b3d 4224
1da177e4 4225 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4226 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4227
4228 return 0;
4229}
1da177e4 4230
d6ee6ad7
LP
4231int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4232 const void *param)
4233{
4234 struct sk_buff *skb;
4235
4236 if (hci_opcode_ogf(opcode) != 0x3f) {
4237 /* A controller receiving a command shall respond with either
4238 * a Command Status Event or a Command Complete Event.
4239 * Therefore, all standard HCI commands must be sent via the
4240 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4241 * Some vendors do not comply with this rule for vendor-specific
4242 * commands and do not return any event. We want to support
4243 * unresponded commands for such cases only.
4244 */
4245 bt_dev_err(hdev, "unresponded command not supported");
4246 return -EINVAL;
4247 }
4248
4249 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4250 if (!skb) {
4251 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4252 opcode);
4253 return -ENOMEM;
4254 }
4255
4256 hci_send_frame(hdev, skb);
4257
4258 return 0;
4259}
4260EXPORT_SYMBOL(__hci_cmd_send);
4261
1da177e4 4262/* Get data from the previously sent command */
a9de9248 4263void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4264{
4265 struct hci_command_hdr *hdr;
4266
4267 if (!hdev->sent_cmd)
4268 return NULL;
4269
4270 hdr = (void *) hdev->sent_cmd->data;
4271
a9de9248 4272 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4273 return NULL;
4274
f0e09510 4275 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4276
4277 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4278}
4279
fbef168f
LP
4280/* Send HCI command and wait for command commplete event */
4281struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4282 const void *param, u32 timeout)
4283{
4284 struct sk_buff *skb;
4285
4286 if (!test_bit(HCI_UP, &hdev->flags))
4287 return ERR_PTR(-ENETDOWN);
4288
4289 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4290
b504430c 4291 hci_req_sync_lock(hdev);
fbef168f 4292 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
b504430c 4293 hci_req_sync_unlock(hdev);
fbef168f
LP
4294
4295 return skb;
4296}
4297EXPORT_SYMBOL(hci_cmd_sync);
4298
1da177e4
LT
4299/* Send ACL data */
4300static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4301{
4302 struct hci_acl_hdr *hdr;
4303 int len = skb->len;
4304
badff6d0
ACM
4305 skb_push(skb, HCI_ACL_HDR_SIZE);
4306 skb_reset_transport_header(skb);
9c70220b 4307 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4308 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4309 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4310}
4311
ee22be7e 4312static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4313 struct sk_buff *skb, __u16 flags)
1da177e4 4314{
ee22be7e 4315 struct hci_conn *conn = chan->conn;
1da177e4
LT
4316 struct hci_dev *hdev = conn->hdev;
4317 struct sk_buff *list;
4318
087bfd99
GP
4319 skb->len = skb_headlen(skb);
4320 skb->data_len = 0;
4321
d79f34e3 4322 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
204a6e54
AE
4323
4324 switch (hdev->dev_type) {
ca8bee5d 4325 case HCI_PRIMARY:
204a6e54
AE
4326 hci_add_acl_hdr(skb, conn->handle, flags);
4327 break;
4328 case HCI_AMP:
4329 hci_add_acl_hdr(skb, chan->handle, flags);
4330 break;
4331 default:
2064ee33 4332 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
204a6e54
AE
4333 return;
4334 }
087bfd99 4335
70f23020
AE
4336 list = skb_shinfo(skb)->frag_list;
4337 if (!list) {
1da177e4
LT
4338 /* Non fragmented */
4339 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4340
73d80deb 4341 skb_queue_tail(queue, skb);
1da177e4
LT
4342 } else {
4343 /* Fragmented */
4344 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4345
4346 skb_shinfo(skb)->frag_list = NULL;
4347
9cfd5a23
JR
4348 /* Queue all fragments atomically. We need to use spin_lock_bh
4349 * here because of 6LoWPAN links, as there this function is
4350 * called from softirq and using normal spin lock could cause
4351 * deadlocks.
4352 */
4353 spin_lock_bh(&queue->lock);
1da177e4 4354
73d80deb 4355 __skb_queue_tail(queue, skb);
e702112f
AE
4356
4357 flags &= ~ACL_START;
4358 flags |= ACL_CONT;
1da177e4
LT
4359 do {
4360 skb = list; list = list->next;
8e87d142 4361
d79f34e3 4362 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
e702112f 4363 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4364
4365 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4366
73d80deb 4367 __skb_queue_tail(queue, skb);
1da177e4
LT
4368 } while (list);
4369
9cfd5a23 4370 spin_unlock_bh(&queue->lock);
1da177e4 4371 }
73d80deb
LAD
4372}
4373
4374void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4375{
ee22be7e 4376 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4377
f0e09510 4378 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4379
ee22be7e 4380 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4381
3eff45ea 4382 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4383}
1da177e4
LT
4384
4385/* Send SCO data */
0d861d8b 4386void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4387{
4388 struct hci_dev *hdev = conn->hdev;
4389 struct hci_sco_hdr hdr;
4390
4391 BT_DBG("%s len %d", hdev->name, skb->len);
4392
aca3192c 4393 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4394 hdr.dlen = skb->len;
4395
badff6d0
ACM
4396 skb_push(skb, HCI_SCO_HDR_SIZE);
4397 skb_reset_transport_header(skb);
9c70220b 4398 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4399
d79f34e3 4400 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
c78ae283 4401
1da177e4 4402 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4403 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4404}
1da177e4
LT
4405
4406/* ---- HCI TX task (outgoing data) ---- */
4407
4408/* HCI Connection scheduler */
6039aa73
GP
4409static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4410 int *quote)
1da177e4
LT
4411{
4412 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4413 struct hci_conn *conn = NULL, *c;
abc5de8f 4414 unsigned int num = 0, min = ~0;
1da177e4 4415
8e87d142 4416 /* We don't have to lock device here. Connections are always
1da177e4 4417 * added and removed with TX task disabled. */
bf4c6325
GP
4418
4419 rcu_read_lock();
4420
4421 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4422 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4423 continue;
769be974
MH
4424
4425 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4426 continue;
4427
1da177e4
LT
4428 num++;
4429
4430 if (c->sent < min) {
4431 min = c->sent;
4432 conn = c;
4433 }
52087a79
LAD
4434
4435 if (hci_conn_num(hdev, type) == num)
4436 break;
1da177e4
LT
4437 }
4438
bf4c6325
GP
4439 rcu_read_unlock();
4440
1da177e4 4441 if (conn) {
6ed58ec5
VT
4442 int cnt, q;
4443
4444 switch (conn->type) {
4445 case ACL_LINK:
4446 cnt = hdev->acl_cnt;
4447 break;
4448 case SCO_LINK:
4449 case ESCO_LINK:
4450 cnt = hdev->sco_cnt;
4451 break;
4452 case LE_LINK:
4453 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4454 break;
4455 default:
4456 cnt = 0;
2064ee33 4457 bt_dev_err(hdev, "unknown link type %d", conn->type);
6ed58ec5
VT
4458 }
4459
4460 q = cnt / num;
1da177e4
LT
4461 *quote = q ? q : 1;
4462 } else
4463 *quote = 0;
4464
4465 BT_DBG("conn %p quote %d", conn, *quote);
4466 return conn;
4467}
4468
6039aa73 4469static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4470{
4471 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4472 struct hci_conn *c;
1da177e4 4473
2064ee33 4474 bt_dev_err(hdev, "link tx timeout");
1da177e4 4475
bf4c6325
GP
4476 rcu_read_lock();
4477
1da177e4 4478 /* Kill stalled connections */
bf4c6325 4479 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4480 if (c->type == type && c->sent) {
2064ee33
MH
4481 bt_dev_err(hdev, "killing stalled connection %pMR",
4482 &c->dst);
bed71748 4483 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4484 }
4485 }
bf4c6325
GP
4486
4487 rcu_read_unlock();
1da177e4
LT
4488}
4489
6039aa73
GP
4490static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4491 int *quote)
1da177e4 4492{
73d80deb
LAD
4493 struct hci_conn_hash *h = &hdev->conn_hash;
4494 struct hci_chan *chan = NULL;
abc5de8f 4495 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4496 struct hci_conn *conn;
73d80deb
LAD
4497 int cnt, q, conn_num = 0;
4498
4499 BT_DBG("%s", hdev->name);
4500
bf4c6325
GP
4501 rcu_read_lock();
4502
4503 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4504 struct hci_chan *tmp;
4505
4506 if (conn->type != type)
4507 continue;
4508
4509 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4510 continue;
4511
4512 conn_num++;
4513
8192edef 4514 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4515 struct sk_buff *skb;
4516
4517 if (skb_queue_empty(&tmp->data_q))
4518 continue;
4519
4520 skb = skb_peek(&tmp->data_q);
4521 if (skb->priority < cur_prio)
4522 continue;
4523
4524 if (skb->priority > cur_prio) {
4525 num = 0;
4526 min = ~0;
4527 cur_prio = skb->priority;
4528 }
4529
4530 num++;
4531
4532 if (conn->sent < min) {
4533 min = conn->sent;
4534 chan = tmp;
4535 }
4536 }
4537
4538 if (hci_conn_num(hdev, type) == conn_num)
4539 break;
4540 }
4541
bf4c6325
GP
4542 rcu_read_unlock();
4543
73d80deb
LAD
4544 if (!chan)
4545 return NULL;
4546
4547 switch (chan->conn->type) {
4548 case ACL_LINK:
4549 cnt = hdev->acl_cnt;
4550 break;
bd1eb66b
AE
4551 case AMP_LINK:
4552 cnt = hdev->block_cnt;
4553 break;
73d80deb
LAD
4554 case SCO_LINK:
4555 case ESCO_LINK:
4556 cnt = hdev->sco_cnt;
4557 break;
4558 case LE_LINK:
4559 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4560 break;
4561 default:
4562 cnt = 0;
2064ee33 4563 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
73d80deb
LAD
4564 }
4565
4566 q = cnt / num;
4567 *quote = q ? q : 1;
4568 BT_DBG("chan %p quote %d", chan, *quote);
4569 return chan;
4570}
4571
02b20f0b
LAD
4572static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4573{
4574 struct hci_conn_hash *h = &hdev->conn_hash;
4575 struct hci_conn *conn;
4576 int num = 0;
4577
4578 BT_DBG("%s", hdev->name);
4579
bf4c6325
GP
4580 rcu_read_lock();
4581
4582 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4583 struct hci_chan *chan;
4584
4585 if (conn->type != type)
4586 continue;
4587
4588 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4589 continue;
4590
4591 num++;
4592
8192edef 4593 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4594 struct sk_buff *skb;
4595
4596 if (chan->sent) {
4597 chan->sent = 0;
4598 continue;
4599 }
4600
4601 if (skb_queue_empty(&chan->data_q))
4602 continue;
4603
4604 skb = skb_peek(&chan->data_q);
4605 if (skb->priority >= HCI_PRIO_MAX - 1)
4606 continue;
4607
4608 skb->priority = HCI_PRIO_MAX - 1;
4609
4610 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4611 skb->priority);
02b20f0b
LAD
4612 }
4613
4614 if (hci_conn_num(hdev, type) == num)
4615 break;
4616 }
bf4c6325
GP
4617
4618 rcu_read_unlock();
4619
02b20f0b
LAD
4620}
4621
b71d385a
AE
4622static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4623{
4624 /* Calculate count of blocks used by this packet */
4625 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4626}
4627
6039aa73 4628static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4629{
d7a5a11d 4630 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
4631 /* ACL tx timeout must be longer than maximum
4632 * link supervision timeout (40.9 seconds) */
63d2bc1b 4633 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4634 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4635 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4636 }
63d2bc1b 4637}
1da177e4 4638
7fedd3bb
APS
4639/* Schedule SCO */
4640static void hci_sched_sco(struct hci_dev *hdev)
4641{
4642 struct hci_conn *conn;
4643 struct sk_buff *skb;
4644 int quote;
4645
4646 BT_DBG("%s", hdev->name);
4647
4648 if (!hci_conn_num(hdev, SCO_LINK))
4649 return;
4650
4651 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4652 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4653 BT_DBG("skb %p len %d", skb, skb->len);
4654 hci_send_frame(hdev, skb);
4655
4656 conn->sent++;
4657 if (conn->sent == ~0)
4658 conn->sent = 0;
4659 }
4660 }
4661}
4662
4663static void hci_sched_esco(struct hci_dev *hdev)
4664{
4665 struct hci_conn *conn;
4666 struct sk_buff *skb;
4667 int quote;
4668
4669 BT_DBG("%s", hdev->name);
4670
4671 if (!hci_conn_num(hdev, ESCO_LINK))
4672 return;
4673
4674 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4675 &quote))) {
4676 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4677 BT_DBG("skb %p len %d", skb, skb->len);
4678 hci_send_frame(hdev, skb);
4679
4680 conn->sent++;
4681 if (conn->sent == ~0)
4682 conn->sent = 0;
4683 }
4684 }
4685}
4686
6039aa73 4687static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4688{
4689 unsigned int cnt = hdev->acl_cnt;
4690 struct hci_chan *chan;
4691 struct sk_buff *skb;
4692 int quote;
4693
4694 __check_timeout(hdev, cnt);
04837f64 4695
73d80deb 4696 while (hdev->acl_cnt &&
a8c5fb1a 4697 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4698 u32 priority = (skb_peek(&chan->data_q))->priority;
4699 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4700 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4701 skb->len, skb->priority);
73d80deb 4702
ec1cce24
LAD
4703 /* Stop if priority has changed */
4704 if (skb->priority < priority)
4705 break;
4706
4707 skb = skb_dequeue(&chan->data_q);
4708
73d80deb 4709 hci_conn_enter_active_mode(chan->conn,
04124681 4710 bt_cb(skb)->force_active);
04837f64 4711
57d17d70 4712 hci_send_frame(hdev, skb);
1da177e4
LT
4713 hdev->acl_last_tx = jiffies;
4714
4715 hdev->acl_cnt--;
73d80deb
LAD
4716 chan->sent++;
4717 chan->conn->sent++;
7fedd3bb
APS
4718
4719 /* Send pending SCO packets right away */
4720 hci_sched_sco(hdev);
4721 hci_sched_esco(hdev);
1da177e4
LT
4722 }
4723 }
02b20f0b
LAD
4724
4725 if (cnt != hdev->acl_cnt)
4726 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4727}
4728
6039aa73 4729static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4730{
63d2bc1b 4731 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4732 struct hci_chan *chan;
4733 struct sk_buff *skb;
4734 int quote;
bd1eb66b 4735 u8 type;
b71d385a 4736
63d2bc1b 4737 __check_timeout(hdev, cnt);
b71d385a 4738
bd1eb66b
AE
4739 BT_DBG("%s", hdev->name);
4740
4741 if (hdev->dev_type == HCI_AMP)
4742 type = AMP_LINK;
4743 else
4744 type = ACL_LINK;
4745
b71d385a 4746 while (hdev->block_cnt > 0 &&
bd1eb66b 4747 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4748 u32 priority = (skb_peek(&chan->data_q))->priority;
4749 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4750 int blocks;
4751
4752 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4753 skb->len, skb->priority);
b71d385a
AE
4754
4755 /* Stop if priority has changed */
4756 if (skb->priority < priority)
4757 break;
4758
4759 skb = skb_dequeue(&chan->data_q);
4760
4761 blocks = __get_blocks(hdev, skb);
4762 if (blocks > hdev->block_cnt)
4763 return;
4764
4765 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4766 bt_cb(skb)->force_active);
b71d385a 4767
57d17d70 4768 hci_send_frame(hdev, skb);
b71d385a
AE
4769 hdev->acl_last_tx = jiffies;
4770
4771 hdev->block_cnt -= blocks;
4772 quote -= blocks;
4773
4774 chan->sent += blocks;
4775 chan->conn->sent += blocks;
4776 }
4777 }
4778
4779 if (cnt != hdev->block_cnt)
bd1eb66b 4780 hci_prio_recalculate(hdev, type);
b71d385a
AE
4781}
4782
6039aa73 4783static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4784{
4785 BT_DBG("%s", hdev->name);
4786
bd1eb66b 4787 /* No ACL link over BR/EDR controller */
ca8bee5d 4788 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
bd1eb66b
AE
4789 return;
4790
4791 /* No AMP link over AMP controller */
4792 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4793 return;
4794
4795 switch (hdev->flow_ctl_mode) {
4796 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4797 hci_sched_acl_pkt(hdev);
4798 break;
4799
4800 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4801 hci_sched_acl_blk(hdev);
4802 break;
4803 }
4804}
4805
6039aa73 4806static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4807{
73d80deb 4808 struct hci_chan *chan;
6ed58ec5 4809 struct sk_buff *skb;
02b20f0b 4810 int quote, cnt, tmp;
6ed58ec5
VT
4811
4812 BT_DBG("%s", hdev->name);
4813
52087a79
LAD
4814 if (!hci_conn_num(hdev, LE_LINK))
4815 return;
4816
6ed58ec5 4817 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1b1d29e5
LAD
4818
4819 __check_timeout(hdev, cnt);
4820
02b20f0b 4821 tmp = cnt;
73d80deb 4822 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4823 u32 priority = (skb_peek(&chan->data_q))->priority;
4824 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4825 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4826 skb->len, skb->priority);
6ed58ec5 4827
ec1cce24
LAD
4828 /* Stop if priority has changed */
4829 if (skb->priority < priority)
4830 break;
4831
4832 skb = skb_dequeue(&chan->data_q);
4833
57d17d70 4834 hci_send_frame(hdev, skb);
6ed58ec5
VT
4835 hdev->le_last_tx = jiffies;
4836
4837 cnt--;
73d80deb
LAD
4838 chan->sent++;
4839 chan->conn->sent++;
7fedd3bb
APS
4840
4841 /* Send pending SCO packets right away */
4842 hci_sched_sco(hdev);
4843 hci_sched_esco(hdev);
6ed58ec5
VT
4844 }
4845 }
73d80deb 4846
6ed58ec5
VT
4847 if (hdev->le_pkts)
4848 hdev->le_cnt = cnt;
4849 else
4850 hdev->acl_cnt = cnt;
02b20f0b
LAD
4851
4852 if (cnt != tmp)
4853 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4854}
4855
3eff45ea 4856static void hci_tx_work(struct work_struct *work)
1da177e4 4857{
3eff45ea 4858 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4859 struct sk_buff *skb;
4860
6ed58ec5 4861 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4862 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4863
d7a5a11d 4864 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e 4865 /* Schedule queues and send stuff to HCI driver */
52de599e
MH
4866 hci_sched_sco(hdev);
4867 hci_sched_esco(hdev);
7fedd3bb 4868 hci_sched_acl(hdev);
52de599e
MH
4869 hci_sched_le(hdev);
4870 }
6ed58ec5 4871
1da177e4
LT
4872 /* Send next queued raw (unknown type) packet */
4873 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4874 hci_send_frame(hdev, skb);
1da177e4
LT
4875}
4876
25985edc 4877/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4878
4879/* ACL data packet */
6039aa73 4880static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4881{
4882 struct hci_acl_hdr *hdr = (void *) skb->data;
4883 struct hci_conn *conn;
4884 __u16 handle, flags;
4885
4886 skb_pull(skb, HCI_ACL_HDR_SIZE);
4887
4888 handle = __le16_to_cpu(hdr->handle);
4889 flags = hci_flags(handle);
4890 handle = hci_handle(handle);
4891
f0e09510 4892 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4893 handle, flags);
1da177e4
LT
4894
4895 hdev->stat.acl_rx++;
4896
4897 hci_dev_lock(hdev);
4898 conn = hci_conn_hash_lookup_handle(hdev, handle);
4899 hci_dev_unlock(hdev);
8e87d142 4900
1da177e4 4901 if (conn) {
65983fc7 4902 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4903
1da177e4 4904 /* Send to upper protocol */
686ebf28
UF
4905 l2cap_recv_acldata(conn, skb, flags);
4906 return;
1da177e4 4907 } else {
2064ee33
MH
4908 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4909 handle);
1da177e4
LT
4910 }
4911
4912 kfree_skb(skb);
4913}
4914
4915/* SCO data packet */
6039aa73 4916static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4917{
4918 struct hci_sco_hdr *hdr = (void *) skb->data;
4919 struct hci_conn *conn;
debdedf2 4920 __u16 handle, flags;
1da177e4
LT
4921
4922 skb_pull(skb, HCI_SCO_HDR_SIZE);
4923
4924 handle = __le16_to_cpu(hdr->handle);
debdedf2
MH
4925 flags = hci_flags(handle);
4926 handle = hci_handle(handle);
1da177e4 4927
debdedf2
MH
4928 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4929 handle, flags);
1da177e4
LT
4930
4931 hdev->stat.sco_rx++;
4932
4933 hci_dev_lock(hdev);
4934 conn = hci_conn_hash_lookup_handle(hdev, handle);
4935 hci_dev_unlock(hdev);
4936
4937 if (conn) {
1da177e4 4938 /* Send to upper protocol */
00398e1d 4939 bt_cb(skb)->sco.pkt_status = flags & 0x03;
686ebf28
UF
4940 sco_recv_scodata(conn, skb);
4941 return;
1da177e4 4942 } else {
2064ee33
MH
4943 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4944 handle);
1da177e4
LT
4945 }
4946
4947 kfree_skb(skb);
4948}
4949
9238f36a
JH
4950static bool hci_req_is_complete(struct hci_dev *hdev)
4951{
4952 struct sk_buff *skb;
4953
4954 skb = skb_peek(&hdev->cmd_q);
4955 if (!skb)
4956 return true;
4957
44d27137 4958 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
9238f36a
JH
4959}
4960
42c6b129
JH
4961static void hci_resend_last(struct hci_dev *hdev)
4962{
4963 struct hci_command_hdr *sent;
4964 struct sk_buff *skb;
4965 u16 opcode;
4966
4967 if (!hdev->sent_cmd)
4968 return;
4969
4970 sent = (void *) hdev->sent_cmd->data;
4971 opcode = __le16_to_cpu(sent->opcode);
4972 if (opcode == HCI_OP_RESET)
4973 return;
4974
4975 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4976 if (!skb)
4977 return;
4978
4979 skb_queue_head(&hdev->cmd_q, skb);
4980 queue_work(hdev->workqueue, &hdev->cmd_work);
4981}
4982
e6214487
JH
4983void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4984 hci_req_complete_t *req_complete,
4985 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4986{
9238f36a
JH
4987 struct sk_buff *skb;
4988 unsigned long flags;
4989
4990 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4991
42c6b129
JH
4992 /* If the completed command doesn't match the last one that was
4993 * sent we need to do special handling of it.
9238f36a 4994 */
42c6b129
JH
4995 if (!hci_sent_cmd_data(hdev, opcode)) {
4996 /* Some CSR based controllers generate a spontaneous
4997 * reset complete event during init and any pending
4998 * command will never be completed. In such a case we
4999 * need to resend whatever was the last sent
5000 * command.
5001 */
5002 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5003 hci_resend_last(hdev);
5004
9238f36a 5005 return;
42c6b129 5006 }
9238f36a 5007
f80c5dad
JPRV
5008 /* If we reach this point this event matches the last command sent */
5009 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5010
9238f36a
JH
5011 /* If the command succeeded and there's still more commands in
5012 * this request the request is not yet complete.
5013 */
5014 if (!status && !hci_req_is_complete(hdev))
5015 return;
5016
5017 /* If this was the last command in a request the complete
5018 * callback would be found in hdev->sent_cmd instead of the
5019 * command queue (hdev->cmd_q).
5020 */
44d27137
JH
5021 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5022 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
e6214487
JH
5023 return;
5024 }
53e21fbc 5025
44d27137
JH
5026 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5027 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
e6214487 5028 return;
9238f36a
JH
5029 }
5030
5031 /* Remove all pending commands belonging to this request */
5032 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5033 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
44d27137 5034 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
9238f36a
JH
5035 __skb_queue_head(&hdev->cmd_q, skb);
5036 break;
5037 }
5038
3bd7594e
DA
5039 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5040 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5041 else
5042 *req_complete = bt_cb(skb)->hci.req_complete;
9238f36a
JH
5043 kfree_skb(skb);
5044 }
5045 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
5046}
5047
b78752cc 5048static void hci_rx_work(struct work_struct *work)
1da177e4 5049{
b78752cc 5050 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5051 struct sk_buff *skb;
5052
5053 BT_DBG("%s", hdev->name);
5054
1da177e4 5055 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5056 /* Send copy to monitor */
5057 hci_send_to_monitor(hdev, skb);
5058
1da177e4
LT
5059 if (atomic_read(&hdev->promisc)) {
5060 /* Send copy to the sockets */
470fe1b5 5061 hci_send_to_sock(hdev, skb);
1da177e4
LT
5062 }
5063
eb8c101e
MK
5064 /* If the device has been opened in HCI_USER_CHANNEL,
5065 * the userspace has exclusive access to device.
5066 * When device is HCI_INIT, we still need to process
5067 * the data packets to the driver in order
5068 * to complete its setup().
5069 */
5070 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5071 !test_bit(HCI_INIT, &hdev->flags)) {
1da177e4
LT
5072 kfree_skb(skb);
5073 continue;
5074 }
5075
5076 if (test_bit(HCI_INIT, &hdev->flags)) {
5077 /* Don't process data packets in this states. */
d79f34e3 5078 switch (hci_skb_pkt_type(skb)) {
1da177e4
LT
5079 case HCI_ACLDATA_PKT:
5080 case HCI_SCODATA_PKT:
cc974003 5081 case HCI_ISODATA_PKT:
1da177e4
LT
5082 kfree_skb(skb);
5083 continue;
3ff50b79 5084 }
1da177e4
LT
5085 }
5086
5087 /* Process frame */
d79f34e3 5088 switch (hci_skb_pkt_type(skb)) {
1da177e4 5089 case HCI_EVENT_PKT:
b78752cc 5090 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5091 hci_event_packet(hdev, skb);
5092 break;
5093
5094 case HCI_ACLDATA_PKT:
5095 BT_DBG("%s ACL data packet", hdev->name);
5096 hci_acldata_packet(hdev, skb);
5097 break;
5098
5099 case HCI_SCODATA_PKT:
5100 BT_DBG("%s SCO data packet", hdev->name);
5101 hci_scodata_packet(hdev, skb);
5102 break;
5103
5104 default:
5105 kfree_skb(skb);
5106 break;
5107 }
5108 }
1da177e4
LT
5109}
5110
c347b765 5111static void hci_cmd_work(struct work_struct *work)
1da177e4 5112{
c347b765 5113 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5114 struct sk_buff *skb;
5115
2104786b
AE
5116 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5117 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5118
1da177e4 5119 /* Send queued commands */
5a08ecce
AE
5120 if (atomic_read(&hdev->cmd_cnt)) {
5121 skb = skb_dequeue(&hdev->cmd_q);
5122 if (!skb)
5123 return;
5124
7585b97a 5125 kfree_skb(hdev->sent_cmd);
1da177e4 5126
a675d7f1 5127 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5128 if (hdev->sent_cmd) {
f80c5dad
JPRV
5129 if (hci_req_status_pend(hdev))
5130 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
1da177e4 5131 atomic_dec(&hdev->cmd_cnt);
57d17d70 5132 hci_send_frame(hdev, skb);
7bdb8a5c 5133 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5134 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5135 else
65cc2b49
MH
5136 schedule_delayed_work(&hdev->cmd_timer,
5137 HCI_CMD_TIMEOUT);
1da177e4
LT
5138 } else {
5139 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5140 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5141 }
5142 }
5143}