Bluetooth: hci_qca: fix potential GPF
[linux-2.6-block.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
8c520a59 29#include <linux/rfkill.h>
baf27f6e 30#include <linux/debugfs.h>
99780a7b 31#include <linux/crypto.h>
7a0e5b15 32#include <linux/property.h>
9952d90e
APS
33#include <linux/suspend.h>
34#include <linux/wait.h>
47219839 35#include <asm/unaligned.h>
1da177e4
LT
36
37#include <net/bluetooth/bluetooth.h>
38#include <net/bluetooth/hci_core.h>
4bc58f51 39#include <net/bluetooth/l2cap.h>
af58925c 40#include <net/bluetooth/mgmt.h>
1da177e4 41
0857dd3b 42#include "hci_request.h"
60c5f5fb 43#include "hci_debugfs.h"
970c4e46 44#include "smp.h"
6d5d2ee6 45#include "leds.h"
145373cb 46#include "msft.h"
f67743f9 47#include "aosp.h"
970c4e46 48
b78752cc 49static void hci_rx_work(struct work_struct *work);
c347b765 50static void hci_cmd_work(struct work_struct *work);
3eff45ea 51static void hci_tx_work(struct work_struct *work);
1da177e4 52
1da177e4
LT
53/* HCI device list */
54LIST_HEAD(hci_dev_list);
55DEFINE_RWLOCK(hci_dev_list_lock);
56
57/* HCI callback list */
58LIST_HEAD(hci_cb_list);
fba7ecf0 59DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 60
3df92b31
SL
61/* HCI ID Numbering */
62static DEFINE_IDA(hci_index_ida);
63
baf27f6e
MH
64/* ---- HCI debugfs entries ---- */
65
4b4148e9
MH
66static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
74b93e9f 72 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
4b4148e9
MH
73 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
4b4148e9 83 bool enable;
3bf5e97d 84 int err;
4b4148e9
MH
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
3bf5e97d
AS
89 err = kstrtobool_from_user(user_buf, count, &enable);
90 if (err)
91 return err;
4b4148e9 92
b7cb93e5 93 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
94 return -EALREADY;
95
b504430c 96 hci_req_sync_lock(hdev);
4b4148e9
MH
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
b504430c 103 hci_req_sync_unlock(hdev);
4b4148e9
MH
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
4b4148e9
MH
108 kfree_skb(skb);
109
b7cb93e5 110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
111
112 return count;
113}
114
115static const struct file_operations dut_mode_fops = {
116 .open = simple_open,
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
120};
121
4b4113d6
MH
122static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
124{
125 struct hci_dev *hdev = file->private_data;
126 char buf[3];
127
74b93e9f 128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
4b4113d6
MH
129 buf[1] = '\n';
130 buf[2] = '\0';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132}
133
134static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
136{
137 struct hci_dev *hdev = file->private_data;
4b4113d6
MH
138 bool enable;
139 int err;
140
3bf5e97d
AS
141 err = kstrtobool_from_user(user_buf, count, &enable);
142 if (err)
143 return err;
4b4113d6 144
7e995b9e 145 /* When the diagnostic flags are not persistent and the transport
b56c7b25
MH
146 * is not active or in user channel operation, then there is no need
147 * for the vendor callback. Instead just store the desired value and
148 * the setting will be programmed when the controller gets powered on.
7e995b9e
MH
149 */
150 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
b56c7b25
MH
151 (!test_bit(HCI_RUNNING, &hdev->flags) ||
152 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
7e995b9e
MH
153 goto done;
154
b504430c 155 hci_req_sync_lock(hdev);
4b4113d6 156 err = hdev->set_diag(hdev, enable);
b504430c 157 hci_req_sync_unlock(hdev);
4b4113d6
MH
158
159 if (err < 0)
160 return err;
161
7e995b9e 162done:
4b4113d6
MH
163 if (enable)
164 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
165 else
166 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
167
168 return count;
169}
170
171static const struct file_operations vendor_diag_fops = {
172 .open = simple_open,
173 .read = vendor_diag_read,
174 .write = vendor_diag_write,
175 .llseek = default_llseek,
176};
177
f640ee98
MH
178static void hci_debugfs_create_basic(struct hci_dev *hdev)
179{
180 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
181 &dut_mode_fops);
182
183 if (hdev->set_diag)
184 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
185 &vendor_diag_fops);
186}
187
a1d01db1 188static int hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 189{
42c6b129 190 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
191
192 /* Reset device */
42c6b129
JH
193 set_bit(HCI_RESET, &req->hdev->flags);
194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
a1d01db1 195 return 0;
1da177e4
LT
196}
197
42c6b129 198static void bredr_init(struct hci_request *req)
1da177e4 199{
42c6b129 200 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 201
1da177e4 202 /* Read Local Supported Features */
42c6b129 203 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 204
1143e5a6 205 /* Read Local Version */
42c6b129 206 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
207
208 /* Read BD Address */
42c6b129 209 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
210}
211
0af801b9 212static void amp_init1(struct hci_request *req)
e61ef499 213{
42c6b129 214 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 215
e61ef499 216 /* Read Local Version */
42c6b129 217 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 218
f6996cfe
MH
219 /* Read Local Supported Commands */
220 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
221
6bcbc489 222 /* Read Local AMP Info */
42c6b129 223 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
224
225 /* Read Data Blk size */
42c6b129 226 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 227
f38ba941
MH
228 /* Read Flow Control Mode */
229 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
230
7528ca1c
MH
231 /* Read Location Data */
232 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
233}
234
a1d01db1 235static int amp_init2(struct hci_request *req)
0af801b9
JH
236{
237 /* Read Local Supported Features. Not all AMP controllers
238 * support this so it's placed conditionally in the second
239 * stage init.
240 */
241 if (req->hdev->commands[14] & 0x20)
242 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
a1d01db1
JH
243
244 return 0;
0af801b9
JH
245}
246
a1d01db1 247static int hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 248{
42c6b129 249 struct hci_dev *hdev = req->hdev;
e61ef499
AE
250
251 BT_DBG("%s %ld", hdev->name, opt);
252
11778716
AE
253 /* Reset */
254 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 255 hci_reset_req(req, 0);
11778716 256
e61ef499 257 switch (hdev->dev_type) {
ca8bee5d 258 case HCI_PRIMARY:
42c6b129 259 bredr_init(req);
e61ef499 260 break;
e61ef499 261 case HCI_AMP:
0af801b9 262 amp_init1(req);
e61ef499 263 break;
e61ef499 264 default:
2064ee33 265 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
e61ef499
AE
266 break;
267 }
a1d01db1
JH
268
269 return 0;
e61ef499
AE
270}
271
42c6b129 272static void bredr_setup(struct hci_request *req)
2177bab5 273{
2177bab5
JH
274 __le16 param;
275 __u8 flt_type;
276
277 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 278 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
279
280 /* Read Class of Device */
42c6b129 281 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
282
283 /* Read Local Name */
42c6b129 284 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
285
286 /* Read Voice Setting */
42c6b129 287 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 288
b4cb9fb2
MH
289 /* Read Number of Supported IAC */
290 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
291
4b836f39
MH
292 /* Read Current IAC LAP */
293 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
294
2177bab5
JH
295 /* Clear Event Filters */
296 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 297 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
298
299 /* Connection accept timeout ~20 secs */
dcf4adbf 300 param = cpu_to_le16(0x7d00);
42c6b129 301 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
302}
303
42c6b129 304static void le_setup(struct hci_request *req)
2177bab5 305{
c73eee91
JH
306 struct hci_dev *hdev = req->hdev;
307
2177bab5 308 /* Read LE Buffer Size */
42c6b129 309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
310
311 /* Read LE Local Supported Features */
42c6b129 312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 313
747d3f03
MH
314 /* Read LE Supported States */
315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
316
c73eee91
JH
317 /* LE-only controllers have LE implicitly enabled */
318 if (!lmp_bredr_capable(hdev))
a1536da2 319 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
320}
321
42c6b129 322static void hci_setup_event_mask(struct hci_request *req)
2177bab5 323{
42c6b129
JH
324 struct hci_dev *hdev = req->hdev;
325
2177bab5
JH
326 /* The second byte is 0xff instead of 0x9f (two reserved bits
327 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
328 * command otherwise.
329 */
330 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
331
332 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
333 * any event mask for pre 1.2 devices.
334 */
335 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
336 return;
337
338 if (lmp_bredr_capable(hdev)) {
339 events[4] |= 0x01; /* Flow Specification Complete */
c7882cbd
MH
340 } else {
341 /* Use a different default for LE-only devices */
342 memset(events, 0, sizeof(events));
c7882cbd
MH
343 events[1] |= 0x20; /* Command Complete */
344 events[1] |= 0x40; /* Command Status */
345 events[1] |= 0x80; /* Hardware Error */
5c3d3b4c
MH
346
347 /* If the controller supports the Disconnect command, enable
348 * the corresponding event. In addition enable packet flow
349 * control related events.
350 */
351 if (hdev->commands[0] & 0x20) {
352 events[0] |= 0x10; /* Disconnection Complete */
353 events[2] |= 0x04; /* Number of Completed Packets */
354 events[3] |= 0x02; /* Data Buffer Overflow */
355 }
356
357 /* If the controller supports the Read Remote Version
358 * Information command, enable the corresponding event.
359 */
360 if (hdev->commands[2] & 0x80)
361 events[1] |= 0x08; /* Read Remote Version Information
362 * Complete
363 */
0da71f1b
MH
364
365 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366 events[0] |= 0x80; /* Encryption Change */
367 events[5] |= 0x80; /* Encryption Key Refresh Complete */
368 }
2177bab5
JH
369 }
370
9fe759ce
MH
371 if (lmp_inq_rssi_capable(hdev) ||
372 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
2177bab5
JH
373 events[4] |= 0x02; /* Inquiry Result with RSSI */
374
70f56aa2
MH
375 if (lmp_ext_feat_capable(hdev))
376 events[4] |= 0x04; /* Read Remote Extended Features Complete */
377
378 if (lmp_esco_capable(hdev)) {
379 events[5] |= 0x08; /* Synchronous Connection Complete */
380 events[5] |= 0x10; /* Synchronous Connection Changed */
381 }
382
2177bab5
JH
383 if (lmp_sniffsubr_capable(hdev))
384 events[5] |= 0x20; /* Sniff Subrating */
385
386 if (lmp_pause_enc_capable(hdev))
387 events[5] |= 0x80; /* Encryption Key Refresh Complete */
388
389 if (lmp_ext_inq_capable(hdev))
390 events[5] |= 0x40; /* Extended Inquiry Result */
391
392 if (lmp_no_flush_capable(hdev))
393 events[7] |= 0x01; /* Enhanced Flush Complete */
394
395 if (lmp_lsto_capable(hdev))
396 events[6] |= 0x80; /* Link Supervision Timeout Changed */
397
398 if (lmp_ssp_capable(hdev)) {
399 events[6] |= 0x01; /* IO Capability Request */
400 events[6] |= 0x02; /* IO Capability Response */
401 events[6] |= 0x04; /* User Confirmation Request */
402 events[6] |= 0x08; /* User Passkey Request */
403 events[6] |= 0x10; /* Remote OOB Data Request */
404 events[6] |= 0x20; /* Simple Pairing Complete */
405 events[7] |= 0x04; /* User Passkey Notification */
406 events[7] |= 0x08; /* Keypress Notification */
407 events[7] |= 0x10; /* Remote Host Supported
408 * Features Notification
409 */
410 }
411
412 if (lmp_le_capable(hdev))
413 events[7] |= 0x20; /* LE Meta-Event */
414
42c6b129 415 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
416}
417
a1d01db1 418static int hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 419{
42c6b129
JH
420 struct hci_dev *hdev = req->hdev;
421
0af801b9
JH
422 if (hdev->dev_type == HCI_AMP)
423 return amp_init2(req);
424
2177bab5 425 if (lmp_bredr_capable(hdev))
42c6b129 426 bredr_setup(req);
56f87901 427 else
a358dc11 428 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
429
430 if (lmp_le_capable(hdev))
42c6b129 431 le_setup(req);
2177bab5 432
0f3adeae
MH
433 /* All Bluetooth 1.2 and later controllers should support the
434 * HCI command for reading the local supported commands.
435 *
436 * Unfortunately some controllers indicate Bluetooth 1.2 support,
437 * but do not have support for this command. If that is the case,
438 * the driver can quirk the behavior and skip reading the local
439 * supported commands.
3f8e2d75 440 */
0f3adeae
MH
441 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 443 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
444
445 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
446 /* When SSP is available, then the host features page
447 * should also be available as well. However some
448 * controllers list the max_page as 0 as long as SSP
449 * has not been enabled. To achieve proper debugging
450 * output, force the minimum max_page to 1 at least.
451 */
452 hdev->max_page = 0x01;
453
d7a5a11d 454 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 455 u8 mode = 0x01;
574ea3c7 456
42c6b129
JH
457 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458 sizeof(mode), &mode);
2177bab5
JH
459 } else {
460 struct hci_cp_write_eir cp;
461
462 memset(hdev->eir, 0, sizeof(hdev->eir));
463 memset(&cp, 0, sizeof(cp));
464
42c6b129 465 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
466 }
467 }
468
043ec9bf
MH
469 if (lmp_inq_rssi_capable(hdev) ||
470 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
471 u8 mode;
472
473 /* If Extended Inquiry Result events are supported, then
474 * they are clearly preferred over Inquiry Result with RSSI
475 * events.
476 */
477 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
478
479 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
480 }
2177bab5
JH
481
482 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 483 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
484
485 if (lmp_ext_feat_capable(hdev)) {
486 struct hci_cp_read_local_ext_features cp;
487
488 cp.page = 0x01;
42c6b129
JH
489 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
490 sizeof(cp), &cp);
2177bab5
JH
491 }
492
d7a5a11d 493 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 494 u8 enable = 1;
42c6b129
JH
495 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
496 &enable);
2177bab5 497 }
a1d01db1
JH
498
499 return 0;
2177bab5
JH
500}
501
42c6b129 502static void hci_setup_link_policy(struct hci_request *req)
2177bab5 503{
42c6b129 504 struct hci_dev *hdev = req->hdev;
2177bab5
JH
505 struct hci_cp_write_def_link_policy cp;
506 u16 link_policy = 0;
507
508 if (lmp_rswitch_capable(hdev))
509 link_policy |= HCI_LP_RSWITCH;
510 if (lmp_hold_capable(hdev))
511 link_policy |= HCI_LP_HOLD;
512 if (lmp_sniff_capable(hdev))
513 link_policy |= HCI_LP_SNIFF;
514 if (lmp_park_capable(hdev))
515 link_policy |= HCI_LP_PARK;
516
517 cp.policy = cpu_to_le16(link_policy);
42c6b129 518 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
519}
520
42c6b129 521static void hci_set_le_support(struct hci_request *req)
2177bab5 522{
42c6b129 523 struct hci_dev *hdev = req->hdev;
2177bab5
JH
524 struct hci_cp_write_le_host_supported cp;
525
c73eee91
JH
526 /* LE-only devices do not support explicit enablement */
527 if (!lmp_bredr_capable(hdev))
528 return;
529
2177bab5
JH
530 memset(&cp, 0, sizeof(cp));
531
d7a5a11d 532 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 533 cp.le = 0x01;
32226e4f 534 cp.simul = 0x00;
2177bab5
JH
535 }
536
537 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
538 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
539 &cp);
2177bab5
JH
540}
541
d62e6d67
JH
542static void hci_set_event_mask_page_2(struct hci_request *req)
543{
544 struct hci_dev *hdev = req->hdev;
545 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
313f6888 546 bool changed = false;
d62e6d67
JH
547
548 /* If Connectionless Slave Broadcast master role is supported
549 * enable all necessary events for it.
550 */
53b834d2 551 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
552 events[1] |= 0x40; /* Triggered Clock Capture */
553 events[1] |= 0x80; /* Synchronization Train Complete */
554 events[2] |= 0x10; /* Slave Page Response Timeout */
555 events[2] |= 0x20; /* CSB Channel Map Change */
313f6888 556 changed = true;
d62e6d67
JH
557 }
558
559 /* If Connectionless Slave Broadcast slave role is supported
560 * enable all necessary events for it.
561 */
53b834d2 562 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
563 events[2] |= 0x01; /* Synchronization Train Received */
564 events[2] |= 0x02; /* CSB Receive */
565 events[2] |= 0x04; /* CSB Timeout */
566 events[2] |= 0x08; /* Truncated Page Complete */
313f6888 567 changed = true;
d62e6d67
JH
568 }
569
40c59fcb 570 /* Enable Authenticated Payload Timeout Expired event if supported */
313f6888 571 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
40c59fcb 572 events[2] |= 0x80;
313f6888
MH
573 changed = true;
574 }
40c59fcb 575
313f6888
MH
576 /* Some Broadcom based controllers indicate support for Set Event
577 * Mask Page 2 command, but then actually do not support it. Since
578 * the default value is all bits set to zero, the command is only
579 * required if the event mask has to be changed. In case no change
580 * to the event mask is needed, skip this command.
581 */
582 if (changed)
583 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584 sizeof(events), events);
d62e6d67
JH
585}
586
a1d01db1 587static int hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 588{
42c6b129 589 struct hci_dev *hdev = req->hdev;
d2c5d77f 590 u8 p;
42c6b129 591
0da71f1b
MH
592 hci_setup_event_mask(req);
593
e81be90b
JH
594 if (hdev->commands[6] & 0x20 &&
595 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
596 struct hci_cp_read_stored_link_key cp;
597
598 bacpy(&cp.bdaddr, BDADDR_ANY);
599 cp.read_all = 0x01;
600 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
601 }
602
2177bab5 603 if (hdev->commands[5] & 0x10)
42c6b129 604 hci_setup_link_policy(req);
2177bab5 605
417287de
MH
606 if (hdev->commands[8] & 0x01)
607 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
608
cde1a8a9
IFM
609 if (hdev->commands[18] & 0x04 &&
610 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
00bce3fb
AM
611 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
612
417287de
MH
613 /* Some older Broadcom based Bluetooth 1.2 controllers do not
614 * support the Read Page Scan Type command. Check support for
615 * this command in the bit mask of supported commands.
616 */
617 if (hdev->commands[13] & 0x01)
618 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
619
9193c6e8
AG
620 if (lmp_le_capable(hdev)) {
621 u8 events[8];
622
623 memset(events, 0, sizeof(events));
4d6c705b
MH
624
625 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
626 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
627
628 /* If controller supports the Connection Parameters Request
629 * Link Layer Procedure, enable the corresponding event.
630 */
631 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
632 events[0] |= 0x20; /* LE Remote Connection
633 * Parameter Request
634 */
635
a9f6068e
MH
636 /* If the controller supports the Data Length Extension
637 * feature, enable the corresponding event.
638 */
639 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
640 events[0] |= 0x40; /* LE Data Length Change */
641
ff3b8df2
MH
642 /* If the controller supports LL Privacy feature, enable
643 * the corresponding event.
644 */
645 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
646 events[1] |= 0x02; /* LE Enhanced Connection
647 * Complete
648 */
649
4b71bba4 650 /* If the controller supports Extended Scanner Filter
91641b79 651 * Policies, enable the corresponding event.
4b71bba4
MH
652 */
653 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
654 events[1] |= 0x04; /* LE Direct Advertising
655 * Report
656 */
657
9756d33b
MH
658 /* If the controller supports Channel Selection Algorithm #2
659 * feature, enable the corresponding event.
660 */
661 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
662 events[2] |= 0x08; /* LE Channel Selection
663 * Algorithm
664 */
665
7d26f5c4
MH
666 /* If the controller supports the LE Set Scan Enable command,
667 * enable the corresponding advertising report event.
668 */
669 if (hdev->commands[26] & 0x08)
670 events[0] |= 0x02; /* LE Advertising Report */
671
672 /* If the controller supports the LE Create Connection
673 * command, enable the corresponding event.
674 */
675 if (hdev->commands[26] & 0x10)
676 events[0] |= 0x01; /* LE Connection Complete */
677
678 /* If the controller supports the LE Connection Update
679 * command, enable the corresponding event.
680 */
681 if (hdev->commands[27] & 0x04)
682 events[0] |= 0x04; /* LE Connection Update
683 * Complete
684 */
685
686 /* If the controller supports the LE Read Remote Used Features
687 * command, enable the corresponding event.
688 */
689 if (hdev->commands[27] & 0x20)
690 events[0] |= 0x08; /* LE Read Remote Used
691 * Features Complete
692 */
693
5a34bd5f
MH
694 /* If the controller supports the LE Read Local P-256
695 * Public Key command, enable the corresponding event.
696 */
697 if (hdev->commands[34] & 0x02)
698 events[0] |= 0x80; /* LE Read Local P-256
699 * Public Key Complete
700 */
701
702 /* If the controller supports the LE Generate DHKey
703 * command, enable the corresponding event.
704 */
705 if (hdev->commands[34] & 0x04)
706 events[1] |= 0x01; /* LE Generate DHKey Complete */
707
27bbca44
MH
708 /* If the controller supports the LE Set Default PHY or
709 * LE Set PHY commands, enable the corresponding event.
710 */
711 if (hdev->commands[35] & (0x20 | 0x40))
712 events[1] |= 0x08; /* LE PHY Update Complete */
713
c215e939
JK
714 /* If the controller supports LE Set Extended Scan Parameters
715 * and LE Set Extended Scan Enable commands, enable the
716 * corresponding event.
717 */
718 if (use_ext_scan(hdev))
719 events[1] |= 0x10; /* LE Extended Advertising
720 * Report
721 */
722
acf0aeae
JK
723 /* If the controller supports the LE Extended Advertising
724 * command, enable the corresponding event.
725 */
726 if (ext_adv_capable(hdev))
727 events[2] |= 0x02; /* LE Advertising Set
728 * Terminated
729 */
730
9193c6e8
AG
731 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
732 events);
733
6b49bcb4
JK
734 /* Read LE Advertising Channel TX Power */
735 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
736 /* HCI TS spec forbids mixing of legacy and extended
737 * advertising commands wherein READ_ADV_TX_POWER is
738 * also included. So do not call it if extended adv
739 * is supported otherwise controller will return
740 * COMMAND_DISALLOWED for extended commands.
741 */
15a49cca
MH
742 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
743 }
744
7c395ea5
DW
745 if (hdev->commands[38] & 0x80) {
746 /* Read LE Min/Max Tx Power*/
747 hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
748 0, NULL);
749 }
750
2ab216a7
MH
751 if (hdev->commands[26] & 0x40) {
752 /* Read LE White List Size */
753 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
754 0, NULL);
755 }
756
757 if (hdev->commands[26] & 0x80) {
758 /* Clear LE White List */
759 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
760 }
761
cfdb0c2d
AN
762 if (hdev->commands[34] & 0x40) {
763 /* Read LE Resolving List Size */
764 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
765 0, NULL);
766 }
767
545f2596
AN
768 if (hdev->commands[34] & 0x20) {
769 /* Clear LE Resolving List */
770 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
771 }
772
a31489d2 773 if (hdev->commands[35] & 0x04) {
b2cc2339
SN
774 __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
775
776 /* Set RPA timeout */
777 hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
778 &rpa_timeout);
779 }
780
a9f6068e
MH
781 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
782 /* Read LE Maximum Data Length */
783 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
784
785 /* Read LE Suggested Default Data Length */
786 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
787 }
788
6b49bcb4
JK
789 if (ext_adv_capable(hdev)) {
790 /* Read LE Number of Supported Advertising Sets */
791 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
792 0, NULL);
793 }
794
42c6b129 795 hci_set_le_support(req);
9193c6e8 796 }
d2c5d77f
JH
797
798 /* Read features beyond page 1 if available */
799 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
800 struct hci_cp_read_local_ext_features cp;
801
802 cp.page = p;
803 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
804 sizeof(cp), &cp);
805 }
a1d01db1
JH
806
807 return 0;
2177bab5
JH
808}
809
a1d01db1 810static int hci_init4_req(struct hci_request *req, unsigned long opt)
5d4e7e8d
JH
811{
812 struct hci_dev *hdev = req->hdev;
813
36f260ce
MH
814 /* Some Broadcom based Bluetooth controllers do not support the
815 * Delete Stored Link Key command. They are clearly indicating its
816 * absence in the bit mask of supported commands.
817 *
bb6d6895 818 * Check the supported commands and only if the command is marked
36f260ce
MH
819 * as supported send it. If not supported assume that the controller
820 * does not have actual support for stored link keys which makes this
821 * command redundant anyway.
822 *
823 * Some controllers indicate that they support handling deleting
824 * stored link keys, but they don't. The quirk lets a driver
825 * just disable this command.
826 */
827 if (hdev->commands[6] & 0x80 &&
828 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
829 struct hci_cp_delete_stored_link_key cp;
830
831 bacpy(&cp.bdaddr, BDADDR_ANY);
832 cp.delete_all = 0x01;
833 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
834 sizeof(cp), &cp);
835 }
836
d62e6d67
JH
837 /* Set event mask page 2 if the HCI command for it is supported */
838 if (hdev->commands[22] & 0x04)
839 hci_set_event_mask_page_2(req);
840
109e3191
MH
841 /* Read local codec list if the HCI command is supported */
842 if (hdev->commands[29] & 0x20)
843 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
844
a4790360
MH
845 /* Read local pairing options if the HCI command is supported */
846 if (hdev->commands[41] & 0x08)
847 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
848
f4fe73ed
MH
849 /* Get MWS transport configuration if the HCI command is supported */
850 if (hdev->commands[30] & 0x08)
851 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
852
5d4e7e8d 853 /* Check for Synchronization Train support */
53b834d2 854 if (lmp_sync_train_capable(hdev))
5d4e7e8d 855 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
856
857 /* Enable Secure Connections if supported and configured */
d7a5a11d 858 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 859 bredr_sc_enabled(hdev)) {
a6d0d690 860 u8 support = 0x01;
574ea3c7 861
a6d0d690
MH
862 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
863 sizeof(support), &support);
864 }
a1d01db1 865
00bce3fb
AM
866 /* Set erroneous data reporting if supported to the wideband speech
867 * setting value
868 */
cde1a8a9
IFM
869 if (hdev->commands[18] & 0x08 &&
870 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
00bce3fb
AM
871 bool enabled = hci_dev_test_flag(hdev,
872 HCI_WIDEBAND_SPEECH_ENABLED);
873
874 if (enabled !=
875 (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
876 struct hci_cp_write_def_err_data_reporting cp;
877
878 cp.err_data_reporting = enabled ?
879 ERR_DATA_REPORTING_ENABLED :
880 ERR_DATA_REPORTING_DISABLED;
881
882 hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
883 sizeof(cp), &cp);
884 }
885 }
886
12204875
MH
887 /* Set Suggested Default Data Length to maximum if supported */
888 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
889 struct hci_cp_le_write_def_data_len cp;
890
727ea61a
BDC
891 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
892 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
12204875
MH
893 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
894 }
895
de2ba303
MH
896 /* Set Default PHY parameters if command is supported */
897 if (hdev->commands[35] & 0x20) {
898 struct hci_cp_le_set_default_phy cp;
899
6decb5b4
JK
900 cp.all_phys = 0x00;
901 cp.tx_phys = hdev->le_tx_def_phys;
902 cp.rx_phys = hdev->le_rx_def_phys;
de2ba303
MH
903
904 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
905 }
906
a1d01db1 907 return 0;
5d4e7e8d
JH
908}
909
2177bab5
JH
910static int __hci_init(struct hci_dev *hdev)
911{
912 int err;
913
4ebeee2d 914 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
2177bab5
JH
915 if (err < 0)
916 return err;
917
f640ee98
MH
918 if (hci_dev_test_flag(hdev, HCI_SETUP))
919 hci_debugfs_create_basic(hdev);
4b4148e9 920
4ebeee2d 921 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
0af801b9
JH
922 if (err < 0)
923 return err;
924
ca8bee5d 925 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
2177bab5 926 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 927 * first two stages of init.
2177bab5 928 */
ca8bee5d 929 if (hdev->dev_type != HCI_PRIMARY)
2177bab5
JH
930 return 0;
931
4ebeee2d 932 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
5d4e7e8d
JH
933 if (err < 0)
934 return err;
935
4ebeee2d 936 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
baf27f6e
MH
937 if (err < 0)
938 return err;
939
ec6cef9c
MH
940 /* This function is only called when the controller is actually in
941 * configured state. When the controller is marked as unconfigured,
942 * this initialization procedure is not run.
943 *
944 * It means that it is possible that a controller runs through its
945 * setup phase and then discovers missing settings. If that is the
946 * case, then this function will not be called. It then will only
947 * be called during the config phase.
948 *
949 * So only when in setup phase or config phase, create the debugfs
950 * entries and register the SMP channels.
baf27f6e 951 */
d7a5a11d
MH
952 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
953 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
954 return 0;
955
60c5f5fb
MH
956 hci_debugfs_create_common(hdev);
957
71c3b60e 958 if (lmp_bredr_capable(hdev))
60c5f5fb 959 hci_debugfs_create_bredr(hdev);
2bfa3531 960
162a3bac 961 if (lmp_le_capable(hdev))
60c5f5fb 962 hci_debugfs_create_le(hdev);
e7b8fc92 963
baf27f6e 964 return 0;
2177bab5
JH
965}
966
a1d01db1 967static int hci_init0_req(struct hci_request *req, unsigned long opt)
0ebca7d6
MH
968{
969 struct hci_dev *hdev = req->hdev;
970
971 BT_DBG("%s %ld", hdev->name, opt);
972
973 /* Reset */
974 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
975 hci_reset_req(req, 0);
976
977 /* Read Local Version */
978 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
979
980 /* Read BD Address */
981 if (hdev->set_bdaddr)
982 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
a1d01db1
JH
983
984 return 0;
0ebca7d6
MH
985}
986
987static int __hci_unconf_init(struct hci_dev *hdev)
988{
989 int err;
990
cc78b44b
MH
991 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
992 return 0;
993
4ebeee2d 994 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
0ebca7d6
MH
995 if (err < 0)
996 return err;
997
f640ee98
MH
998 if (hci_dev_test_flag(hdev, HCI_SETUP))
999 hci_debugfs_create_basic(hdev);
1000
0ebca7d6
MH
1001 return 0;
1002}
1003
a1d01db1 1004static int hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1005{
1006 __u8 scan = opt;
1007
42c6b129 1008 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1009
1010 /* Inquiry and Page scans */
42c6b129 1011 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
a1d01db1 1012 return 0;
1da177e4
LT
1013}
1014
a1d01db1 1015static int hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1016{
1017 __u8 auth = opt;
1018
42c6b129 1019 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1020
1021 /* Authentication */
42c6b129 1022 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
a1d01db1 1023 return 0;
1da177e4
LT
1024}
1025
a1d01db1 1026static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1027{
1028 __u8 encrypt = opt;
1029
42c6b129 1030 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1031
e4e8e37c 1032 /* Encryption */
42c6b129 1033 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
a1d01db1 1034 return 0;
1da177e4
LT
1035}
1036
a1d01db1 1037static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1038{
1039 __le16 policy = cpu_to_le16(opt);
1040
42c6b129 1041 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1042
1043 /* Default link policy */
42c6b129 1044 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
a1d01db1 1045 return 0;
e4e8e37c
MH
1046}
1047
8e87d142 1048/* Get HCI device by index.
1da177e4
LT
1049 * Device is held on return. */
1050struct hci_dev *hci_dev_get(int index)
1051{
8035ded4 1052 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1053
1054 BT_DBG("%d", index);
1055
1056 if (index < 0)
1057 return NULL;
1058
1059 read_lock(&hci_dev_list_lock);
8035ded4 1060 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1061 if (d->id == index) {
1062 hdev = hci_dev_hold(d);
1063 break;
1064 }
1065 }
1066 read_unlock(&hci_dev_list_lock);
1067 return hdev;
1068}
1da177e4
LT
1069
1070/* ---- Inquiry support ---- */
ff9ef578 1071
30dc78e1
JH
1072bool hci_discovery_active(struct hci_dev *hdev)
1073{
1074 struct discovery_state *discov = &hdev->discovery;
1075
6fbe195d 1076 switch (discov->state) {
343f935b 1077 case DISCOVERY_FINDING:
6fbe195d 1078 case DISCOVERY_RESOLVING:
30dc78e1
JH
1079 return true;
1080
6fbe195d
AG
1081 default:
1082 return false;
1083 }
30dc78e1
JH
1084}
1085
ff9ef578
JH
1086void hci_discovery_set_state(struct hci_dev *hdev, int state)
1087{
bb3e0a33
JH
1088 int old_state = hdev->discovery.state;
1089
ff9ef578
JH
1090 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1091
bb3e0a33 1092 if (old_state == state)
ff9ef578
JH
1093 return;
1094
bb3e0a33
JH
1095 hdev->discovery.state = state;
1096
ff9ef578
JH
1097 switch (state) {
1098 case DISCOVERY_STOPPED:
c54c3860
AG
1099 hci_update_background_scan(hdev);
1100
bb3e0a33 1101 if (old_state != DISCOVERY_STARTING)
7b99b659 1102 mgmt_discovering(hdev, 0);
ff9ef578
JH
1103 break;
1104 case DISCOVERY_STARTING:
1105 break;
343f935b 1106 case DISCOVERY_FINDING:
ff9ef578
JH
1107 mgmt_discovering(hdev, 1);
1108 break;
30dc78e1
JH
1109 case DISCOVERY_RESOLVING:
1110 break;
ff9ef578
JH
1111 case DISCOVERY_STOPPING:
1112 break;
1113 }
ff9ef578
JH
1114}
1115
1f9b9a5d 1116void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1117{
30883512 1118 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1119 struct inquiry_entry *p, *n;
1da177e4 1120
561aafbc
JH
1121 list_for_each_entry_safe(p, n, &cache->all, all) {
1122 list_del(&p->all);
b57c1a56 1123 kfree(p);
1da177e4 1124 }
561aafbc
JH
1125
1126 INIT_LIST_HEAD(&cache->unknown);
1127 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1128}
1129
a8c5fb1a
GP
1130struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1131 bdaddr_t *bdaddr)
1da177e4 1132{
30883512 1133 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1134 struct inquiry_entry *e;
1135
6ed93dc6 1136 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1137
561aafbc
JH
1138 list_for_each_entry(e, &cache->all, all) {
1139 if (!bacmp(&e->data.bdaddr, bdaddr))
1140 return e;
1141 }
1142
1143 return NULL;
1144}
1145
1146struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1147 bdaddr_t *bdaddr)
561aafbc 1148{
30883512 1149 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1150 struct inquiry_entry *e;
1151
6ed93dc6 1152 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1153
1154 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1155 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1156 return e;
1157 }
1158
1159 return NULL;
1da177e4
LT
1160}
1161
30dc78e1 1162struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1163 bdaddr_t *bdaddr,
1164 int state)
30dc78e1
JH
1165{
1166 struct discovery_state *cache = &hdev->discovery;
1167 struct inquiry_entry *e;
1168
6ed93dc6 1169 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1170
1171 list_for_each_entry(e, &cache->resolve, list) {
1172 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1173 return e;
1174 if (!bacmp(&e->data.bdaddr, bdaddr))
1175 return e;
1176 }
1177
1178 return NULL;
1179}
1180
a3d4e20a 1181void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1182 struct inquiry_entry *ie)
a3d4e20a
JH
1183{
1184 struct discovery_state *cache = &hdev->discovery;
1185 struct list_head *pos = &cache->resolve;
1186 struct inquiry_entry *p;
1187
1188 list_del(&ie->list);
1189
1190 list_for_each_entry(p, &cache->resolve, list) {
1191 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1192 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1193 break;
1194 pos = &p->list;
1195 }
1196
1197 list_add(&ie->list, pos);
1198}
1199
af58925c
MH
1200u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1201 bool name_known)
1da177e4 1202{
30883512 1203 struct discovery_state *cache = &hdev->discovery;
70f23020 1204 struct inquiry_entry *ie;
af58925c 1205 u32 flags = 0;
1da177e4 1206
6ed93dc6 1207 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1208
6928a924 1209 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1210
af58925c
MH
1211 if (!data->ssp_mode)
1212 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1213
70f23020 1214 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1215 if (ie) {
af58925c
MH
1216 if (!ie->data.ssp_mode)
1217 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1218
a3d4e20a 1219 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1220 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1221 ie->data.rssi = data->rssi;
1222 hci_inquiry_cache_update_resolve(hdev, ie);
1223 }
1224
561aafbc 1225 goto update;
a3d4e20a 1226 }
561aafbc
JH
1227
1228 /* Entry not in the cache. Add new one. */
27f70f3e 1229 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1230 if (!ie) {
1231 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1232 goto done;
1233 }
561aafbc
JH
1234
1235 list_add(&ie->all, &cache->all);
1236
1237 if (name_known) {
1238 ie->name_state = NAME_KNOWN;
1239 } else {
1240 ie->name_state = NAME_NOT_KNOWN;
1241 list_add(&ie->list, &cache->unknown);
1242 }
70f23020 1243
561aafbc
JH
1244update:
1245 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1246 ie->name_state != NAME_PENDING) {
561aafbc
JH
1247 ie->name_state = NAME_KNOWN;
1248 list_del(&ie->list);
1da177e4
LT
1249 }
1250
70f23020
AE
1251 memcpy(&ie->data, data, sizeof(*data));
1252 ie->timestamp = jiffies;
1da177e4 1253 cache->timestamp = jiffies;
3175405b
JH
1254
1255 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1256 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1257
af58925c
MH
1258done:
1259 return flags;
1da177e4
LT
1260}
1261
1262static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1263{
30883512 1264 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1265 struct inquiry_info *info = (struct inquiry_info *) buf;
1266 struct inquiry_entry *e;
1267 int copied = 0;
1268
561aafbc 1269 list_for_each_entry(e, &cache->all, all) {
1da177e4 1270 struct inquiry_data *data = &e->data;
b57c1a56
JH
1271
1272 if (copied >= num)
1273 break;
1274
1da177e4
LT
1275 bacpy(&info->bdaddr, &data->bdaddr);
1276 info->pscan_rep_mode = data->pscan_rep_mode;
1277 info->pscan_period_mode = data->pscan_period_mode;
1278 info->pscan_mode = data->pscan_mode;
1279 memcpy(info->dev_class, data->dev_class, 3);
1280 info->clock_offset = data->clock_offset;
b57c1a56 1281
1da177e4 1282 info++;
b57c1a56 1283 copied++;
1da177e4
LT
1284 }
1285
1286 BT_DBG("cache %p, copied %d", cache, copied);
1287 return copied;
1288}
1289
a1d01db1 1290static int hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1291{
1292 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1293 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1294 struct hci_cp_inquiry cp;
1295
1296 BT_DBG("%s", hdev->name);
1297
1298 if (test_bit(HCI_INQUIRY, &hdev->flags))
a1d01db1 1299 return 0;
1da177e4
LT
1300
1301 /* Start Inquiry */
1302 memcpy(&cp.lap, &ir->lap, 3);
1303 cp.length = ir->length;
1304 cp.num_rsp = ir->num_rsp;
42c6b129 1305 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
a1d01db1
JH
1306
1307 return 0;
1da177e4
LT
1308}
1309
1310int hci_inquiry(void __user *arg)
1311{
1312 __u8 __user *ptr = arg;
1313 struct hci_inquiry_req ir;
1314 struct hci_dev *hdev;
1315 int err = 0, do_inquiry = 0, max_rsp;
1316 long timeo;
1317 __u8 *buf;
1318
1319 if (copy_from_user(&ir, ptr, sizeof(ir)))
1320 return -EFAULT;
1321
5a08ecce
AE
1322 hdev = hci_dev_get(ir.dev_id);
1323 if (!hdev)
1da177e4
LT
1324 return -ENODEV;
1325
d7a5a11d 1326 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1327 err = -EBUSY;
1328 goto done;
1329 }
1330
d7a5a11d 1331 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1332 err = -EOPNOTSUPP;
1333 goto done;
1334 }
1335
ca8bee5d 1336 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
1337 err = -EOPNOTSUPP;
1338 goto done;
1339 }
1340
d7a5a11d 1341 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1342 err = -EOPNOTSUPP;
1343 goto done;
1344 }
1345
09fd0de5 1346 hci_dev_lock(hdev);
8e87d142 1347 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1348 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1349 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1350 do_inquiry = 1;
1351 }
09fd0de5 1352 hci_dev_unlock(hdev);
1da177e4 1353
04837f64 1354 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1355
1356 if (do_inquiry) {
01178cd4 1357 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
4ebeee2d 1358 timeo, NULL);
70f23020
AE
1359 if (err < 0)
1360 goto done;
3e13fa1e
AG
1361
1362 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1363 * cleared). If it is interrupted by a signal, return -EINTR.
1364 */
74316201 1365 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
28a758c8
PB
1366 TASK_INTERRUPTIBLE)) {
1367 err = -EINTR;
1368 goto done;
1369 }
70f23020 1370 }
1da177e4 1371
8fc9ced3
GP
1372 /* for unlimited number of responses we will use buffer with
1373 * 255 entries
1374 */
1da177e4
LT
1375 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1376
1377 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1378 * copy it to the user space.
1379 */
6da2ec56 1380 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
70f23020 1381 if (!buf) {
1da177e4
LT
1382 err = -ENOMEM;
1383 goto done;
1384 }
1385
09fd0de5 1386 hci_dev_lock(hdev);
1da177e4 1387 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1388 hci_dev_unlock(hdev);
1da177e4
LT
1389
1390 BT_DBG("num_rsp %d", ir.num_rsp);
1391
1392 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1393 ptr += sizeof(ir);
1394 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1395 ir.num_rsp))
1da177e4 1396 err = -EFAULT;
8e87d142 1397 } else
1da177e4
LT
1398 err = -EFAULT;
1399
1400 kfree(buf);
1401
1402done:
1403 hci_dev_put(hdev);
1404 return err;
1405}
1406
7a0e5b15
MK
1407/**
1408 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1409 * (BD_ADDR) for a HCI device from
1410 * a firmware node property.
1411 * @hdev: The HCI device
1412 *
1413 * Search the firmware node for 'local-bd-address'.
1414 *
1415 * All-zero BD addresses are rejected, because those could be properties
1416 * that exist in the firmware tables, but were not updated by the firmware. For
1417 * example, the DTS could define 'local-bd-address', with zero BD addresses.
1418 */
1419static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1420{
1421 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1422 bdaddr_t ba;
1423 int ret;
1424
1425 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1426 (u8 *)&ba, sizeof(ba));
1427 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1428 return;
1429
1430 bacpy(&hdev->public_addr, &ba);
1431}
1432
cbed0ca1 1433static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1434{
1da177e4
LT
1435 int ret = 0;
1436
1da177e4
LT
1437 BT_DBG("%s %p", hdev->name, hdev);
1438
b504430c 1439 hci_req_sync_lock(hdev);
1da177e4 1440
d7a5a11d 1441 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1442 ret = -ENODEV;
1443 goto done;
1444 }
1445
d7a5a11d
MH
1446 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1447 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1448 /* Check for rfkill but allow the HCI setup stage to
1449 * proceed (which in itself doesn't cause any RF activity).
1450 */
d7a5a11d 1451 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1452 ret = -ERFKILL;
1453 goto done;
1454 }
1455
1456 /* Check for valid public address or a configured static
91641b79 1457 * random address, but let the HCI setup proceed to
a5c8f270
MH
1458 * be able to determine if there is a public address
1459 * or not.
1460 *
c6beca0e
MH
1461 * In case of user channel usage, it is not important
1462 * if a public address or static random address is
1463 * available.
1464 *
a5c8f270
MH
1465 * This check is only valid for BR/EDR controllers
1466 * since AMP controllers do not have an address.
1467 */
d7a5a11d 1468 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
ca8bee5d 1469 hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
1470 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1471 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1472 ret = -EADDRNOTAVAIL;
1473 goto done;
1474 }
611b30f7
MH
1475 }
1476
1da177e4
LT
1477 if (test_bit(HCI_UP, &hdev->flags)) {
1478 ret = -EALREADY;
1479 goto done;
1480 }
1481
1da177e4
LT
1482 if (hdev->open(hdev)) {
1483 ret = -EIO;
1484 goto done;
1485 }
1486
e9ca8bf1 1487 set_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1488 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
4a3f95b7 1489
f41c70c4
MH
1490 atomic_set(&hdev->cmd_cnt, 1);
1491 set_bit(HCI_INIT, &hdev->flags);
1492
740011cf
SW
1493 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1494 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
7fdf6c6a
MH
1495 bool invalid_bdaddr;
1496
e131d74a
MH
1497 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1498
af202f84
MH
1499 if (hdev->setup)
1500 ret = hdev->setup(hdev);
f41c70c4 1501
7fdf6c6a
MH
1502 /* The transport driver can set the quirk to mark the
1503 * BD_ADDR invalid before creating the HCI device or in
1504 * its setup callback.
1505 */
1506 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1507 &hdev->quirks);
1508
7a0e5b15
MK
1509 if (ret)
1510 goto setup_failed;
1511
1512 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1513 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1514 hci_dev_get_bd_addr_from_property(hdev);
1515
1516 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
7fdf6c6a 1517 hdev->set_bdaddr) {
7a0e5b15
MK
1518 ret = hdev->set_bdaddr(hdev,
1519 &hdev->public_addr);
7fdf6c6a
MH
1520
1521 /* If setting of the BD_ADDR from the device
1522 * property succeeds, then treat the address
1523 * as valid even if the invalid BD_ADDR
1524 * quirk indicates otherwise.
1525 */
1526 if (!ret)
1527 invalid_bdaddr = false;
1528 }
7a0e5b15
MK
1529 }
1530
1531setup_failed:
af202f84
MH
1532 /* The transport driver can set these quirks before
1533 * creating the HCI device or in its setup callback.
1534 *
7fdf6c6a
MH
1535 * For the invalid BD_ADDR quirk it is possible that
1536 * it becomes a valid address if the bootloader does
1537 * provide it (see above).
1538 *
af202f84
MH
1539 * In case any of them is set, the controller has to
1540 * start up as unconfigured.
1541 */
eb1904f4 1542 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
7fdf6c6a 1543 invalid_bdaddr)
a1536da2 1544 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1545
0ebca7d6
MH
1546 /* For an unconfigured controller it is required to
1547 * read at least the version information provided by
1548 * the Read Local Version Information command.
1549 *
1550 * If the set_bdaddr driver callback is provided, then
1551 * also the original Bluetooth public device address
1552 * will be read using the Read BD Address command.
1553 */
d7a5a11d 1554 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1555 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1556 }
1557
d7a5a11d 1558 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1559 /* If public address change is configured, ensure that
1560 * the address gets programmed. If the driver does not
1561 * support changing the public address, fail the power
1562 * on procedure.
1563 */
1564 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1565 hdev->set_bdaddr)
24c457e2
MH
1566 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1567 else
1568 ret = -EADDRNOTAVAIL;
1569 }
1570
f41c70c4 1571 if (!ret) {
d7a5a11d 1572 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
98a63aaf 1573 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
f41c70c4 1574 ret = __hci_init(hdev);
98a63aaf
MH
1575 if (!ret && hdev->post_init)
1576 ret = hdev->post_init(hdev);
1577 }
1da177e4
LT
1578 }
1579
7e995b9e
MH
1580 /* If the HCI Reset command is clearing all diagnostic settings,
1581 * then they need to be reprogrammed after the init procedure
1582 * completed.
1583 */
1584 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
b56c7b25 1585 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
7e995b9e
MH
1586 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1587 ret = hdev->set_diag(hdev, true);
1588
145373cb 1589 msft_do_open(hdev);
f67743f9 1590 aosp_do_open(hdev);
145373cb 1591
f41c70c4
MH
1592 clear_bit(HCI_INIT, &hdev->flags);
1593
1da177e4
LT
1594 if (!ret) {
1595 hci_dev_hold(hdev);
a1536da2 1596 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
a73c046a 1597 hci_adv_instances_set_rpa_expired(hdev, true);
1da177e4 1598 set_bit(HCI_UP, &hdev->flags);
05fcd4c4 1599 hci_sock_dev_event(hdev, HCI_DEV_UP);
6d5d2ee6 1600 hci_leds_update_powered(hdev, true);
d7a5a11d
MH
1601 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1602 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1603 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1604 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
2ff13894 1605 hci_dev_test_flag(hdev, HCI_MGMT) &&
ca8bee5d 1606 hdev->dev_type == HCI_PRIMARY) {
2ff13894
JH
1607 ret = __hci_req_hci_power_on(hdev);
1608 mgmt_power_on(hdev, ret);
56e5cb86 1609 }
8e87d142 1610 } else {
1da177e4 1611 /* Init failed, cleanup */
3eff45ea 1612 flush_work(&hdev->tx_work);
6a137cae
LM
1613
1614 /* Since hci_rx_work() is possible to awake new cmd_work
1615 * it should be flushed first to avoid unexpected call of
1616 * hci_cmd_work()
1617 */
b78752cc 1618 flush_work(&hdev->rx_work);
6a137cae 1619 flush_work(&hdev->cmd_work);
1da177e4
LT
1620
1621 skb_queue_purge(&hdev->cmd_q);
1622 skb_queue_purge(&hdev->rx_q);
1623
1624 if (hdev->flush)
1625 hdev->flush(hdev);
1626
1627 if (hdev->sent_cmd) {
1628 kfree_skb(hdev->sent_cmd);
1629 hdev->sent_cmd = NULL;
1630 }
1631
e9ca8bf1 1632 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1633 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1634
1da177e4 1635 hdev->close(hdev);
fee746b0 1636 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1637 }
1638
1639done:
b504430c 1640 hci_req_sync_unlock(hdev);
1da177e4
LT
1641 return ret;
1642}
1643
cbed0ca1
JH
1644/* ---- HCI ioctl helpers ---- */
1645
1646int hci_dev_open(__u16 dev)
1647{
1648 struct hci_dev *hdev;
1649 int err;
1650
1651 hdev = hci_dev_get(dev);
1652 if (!hdev)
1653 return -ENODEV;
1654
4a964404 1655 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1656 * up as user channel. Trying to bring them up as normal devices
1657 * will result into a failure. Only user channel operation is
1658 * possible.
1659 *
1660 * When this function is called for a user channel, the flag
1661 * HCI_USER_CHANNEL will be set first before attempting to
1662 * open the device.
1663 */
d7a5a11d
MH
1664 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1665 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1666 err = -EOPNOTSUPP;
1667 goto done;
1668 }
1669
e1d08f40
JH
1670 /* We need to ensure that no other power on/off work is pending
1671 * before proceeding to call hci_dev_do_open. This is
1672 * particularly important if the setup procedure has not yet
1673 * completed.
1674 */
a69d8927 1675 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1676 cancel_delayed_work(&hdev->power_off);
1677
a5c8f270
MH
1678 /* After this call it is guaranteed that the setup procedure
1679 * has finished. This means that error conditions like RFKILL
1680 * or no valid public or static random address apply.
1681 */
e1d08f40
JH
1682 flush_workqueue(hdev->req_workqueue);
1683
12aa4f0a 1684 /* For controllers not using the management interface and that
b6ae8457 1685 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1686 * so that pairing works for them. Once the management interface
1687 * is in use this bit will be cleared again and userspace has
1688 * to explicitly enable it.
1689 */
d7a5a11d
MH
1690 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1691 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1692 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1693
cbed0ca1
JH
1694 err = hci_dev_do_open(hdev);
1695
fee746b0 1696done:
cbed0ca1 1697 hci_dev_put(hdev);
cbed0ca1
JH
1698 return err;
1699}
1700
d7347f3c
JH
1701/* This function requires the caller holds hdev->lock */
1702static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1703{
1704 struct hci_conn_params *p;
1705
f161dd41
JH
1706 list_for_each_entry(p, &hdev->le_conn_params, list) {
1707 if (p->conn) {
1708 hci_conn_drop(p->conn);
f8aaf9b6 1709 hci_conn_put(p->conn);
f161dd41
JH
1710 p->conn = NULL;
1711 }
d7347f3c 1712 list_del_init(&p->action);
f161dd41 1713 }
d7347f3c
JH
1714
1715 BT_DBG("All LE pending actions cleared");
1716}
1717
6b3cc1db 1718int hci_dev_do_close(struct hci_dev *hdev)
1da177e4 1719{
acc649c6
MH
1720 bool auto_off;
1721
1da177e4
LT
1722 BT_DBG("%s %p", hdev->name, hdev);
1723
d24d8144 1724 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1725 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1726 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1727 /* Execute vendor specific shutdown routine */
1728 if (hdev->shutdown)
1729 hdev->shutdown(hdev);
1730 }
1731
78c04c0b
VCG
1732 cancel_delayed_work(&hdev->power_off);
1733
7df0f73e 1734 hci_request_cancel_all(hdev);
b504430c 1735 hci_req_sync_lock(hdev);
1da177e4
LT
1736
1737 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1738 cancel_delayed_work_sync(&hdev->cmd_timer);
b504430c 1739 hci_req_sync_unlock(hdev);
1da177e4
LT
1740 return 0;
1741 }
1742
6d5d2ee6
HK
1743 hci_leds_update_powered(hdev, false);
1744
3eff45ea
GP
1745 /* Flush RX and TX works */
1746 flush_work(&hdev->tx_work);
b78752cc 1747 flush_work(&hdev->rx_work);
1da177e4 1748
16ab91ab 1749 if (hdev->discov_timeout > 0) {
16ab91ab 1750 hdev->discov_timeout = 0;
a358dc11
MH
1751 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1752 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1753 }
1754
a69d8927 1755 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1756 cancel_delayed_work(&hdev->service_cache);
1757
a73c046a
JK
1758 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1759 struct adv_info *adv_instance;
1760
4518bb0f 1761 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1762
a73c046a
JK
1763 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1764 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1765 }
1766
76727c02
JH
1767 /* Avoid potential lockdep warnings from the *_flush() calls by
1768 * ensuring the workqueue is empty up front.
1769 */
1770 drain_workqueue(hdev->workqueue);
1771
09fd0de5 1772 hci_dev_lock(hdev);
1aeb9c65 1773
8f502f84
JH
1774 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1775
acc649c6
MH
1776 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1777
ca8bee5d 1778 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
baab7932 1779 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
2ff13894
JH
1780 hci_dev_test_flag(hdev, HCI_MGMT))
1781 __mgmt_power_off(hdev);
1aeb9c65 1782
1f9b9a5d 1783 hci_inquiry_cache_flush(hdev);
d7347f3c 1784 hci_pend_le_actions_clear(hdev);
f161dd41 1785 hci_conn_hash_flush(hdev);
09fd0de5 1786 hci_dev_unlock(hdev);
1da177e4 1787
64dae967
MH
1788 smp_unregister(hdev);
1789
05fcd4c4 1790 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1da177e4 1791
f67743f9 1792 aosp_do_close(hdev);
145373cb
MC
1793 msft_do_close(hdev);
1794
1da177e4
LT
1795 if (hdev->flush)
1796 hdev->flush(hdev);
1797
1798 /* Reset device */
1799 skb_queue_purge(&hdev->cmd_q);
1800 atomic_set(&hdev->cmd_cnt, 1);
acc649c6
MH
1801 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1802 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4 1803 set_bit(HCI_INIT, &hdev->flags);
4ebeee2d 1804 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1da177e4
LT
1805 clear_bit(HCI_INIT, &hdev->flags);
1806 }
1807
c347b765
GP
1808 /* flush cmd work */
1809 flush_work(&hdev->cmd_work);
1da177e4
LT
1810
1811 /* Drop queues */
1812 skb_queue_purge(&hdev->rx_q);
1813 skb_queue_purge(&hdev->cmd_q);
1814 skb_queue_purge(&hdev->raw_q);
1815
1816 /* Drop last sent command */
1817 if (hdev->sent_cmd) {
65cc2b49 1818 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1819 kfree_skb(hdev->sent_cmd);
1820 hdev->sent_cmd = NULL;
1821 }
1822
e9ca8bf1 1823 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1824 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1825
9952d90e
APS
1826 if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1827 wake_up(&hdev->suspend_wait_q);
1828
1da177e4
LT
1829 /* After this point our queues are empty
1830 * and no tasks are scheduled. */
1831 hdev->close(hdev);
1832
35b973c9 1833 /* Clear flags */
fee746b0 1834 hdev->flags &= BIT(HCI_RAW);
eacb44df 1835 hci_dev_clear_volatile_flags(hdev);
35b973c9 1836
ced5c338 1837 /* Controller radio is available but is currently powered down */
536619e8 1838 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1839
e59fda8d 1840 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1841 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1842 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1843
b504430c 1844 hci_req_sync_unlock(hdev);
1da177e4
LT
1845
1846 hci_dev_put(hdev);
1847 return 0;
1848}
1849
1850int hci_dev_close(__u16 dev)
1851{
1852 struct hci_dev *hdev;
1853 int err;
1854
70f23020
AE
1855 hdev = hci_dev_get(dev);
1856 if (!hdev)
1da177e4 1857 return -ENODEV;
8ee56540 1858
d7a5a11d 1859 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1860 err = -EBUSY;
1861 goto done;
1862 }
1863
a69d8927 1864 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1865 cancel_delayed_work(&hdev->power_off);
1866
1da177e4 1867 err = hci_dev_do_close(hdev);
8ee56540 1868
0736cfa8 1869done:
1da177e4
LT
1870 hci_dev_put(hdev);
1871 return err;
1872}
1873
5c912495 1874static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1875{
5c912495 1876 int ret;
1da177e4 1877
5c912495 1878 BT_DBG("%s %p", hdev->name, hdev);
1da177e4 1879
b504430c 1880 hci_req_sync_lock(hdev);
1da177e4 1881
1da177e4
LT
1882 /* Drop queues */
1883 skb_queue_purge(&hdev->rx_q);
1884 skb_queue_purge(&hdev->cmd_q);
1885
76727c02
JH
1886 /* Avoid potential lockdep warnings from the *_flush() calls by
1887 * ensuring the workqueue is empty up front.
1888 */
1889 drain_workqueue(hdev->workqueue);
1890
09fd0de5 1891 hci_dev_lock(hdev);
1f9b9a5d 1892 hci_inquiry_cache_flush(hdev);
1da177e4 1893 hci_conn_hash_flush(hdev);
09fd0de5 1894 hci_dev_unlock(hdev);
1da177e4
LT
1895
1896 if (hdev->flush)
1897 hdev->flush(hdev);
1898
8e87d142 1899 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1900 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1901
4ebeee2d 1902 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1da177e4 1903
b504430c 1904 hci_req_sync_unlock(hdev);
1da177e4
LT
1905 return ret;
1906}
1907
5c912495
MH
1908int hci_dev_reset(__u16 dev)
1909{
1910 struct hci_dev *hdev;
1911 int err;
1912
1913 hdev = hci_dev_get(dev);
1914 if (!hdev)
1915 return -ENODEV;
1916
1917 if (!test_bit(HCI_UP, &hdev->flags)) {
1918 err = -ENETDOWN;
1919 goto done;
1920 }
1921
d7a5a11d 1922 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1923 err = -EBUSY;
1924 goto done;
1925 }
1926
d7a5a11d 1927 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1928 err = -EOPNOTSUPP;
1929 goto done;
1930 }
1931
1932 err = hci_dev_do_reset(hdev);
1933
1934done:
1935 hci_dev_put(hdev);
1936 return err;
1937}
1938
1da177e4
LT
1939int hci_dev_reset_stat(__u16 dev)
1940{
1941 struct hci_dev *hdev;
1942 int ret = 0;
1943
70f23020
AE
1944 hdev = hci_dev_get(dev);
1945 if (!hdev)
1da177e4
LT
1946 return -ENODEV;
1947
d7a5a11d 1948 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1949 ret = -EBUSY;
1950 goto done;
1951 }
1952
d7a5a11d 1953 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1954 ret = -EOPNOTSUPP;
1955 goto done;
1956 }
1957
1da177e4
LT
1958 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1959
0736cfa8 1960done:
1da177e4 1961 hci_dev_put(hdev);
1da177e4
LT
1962 return ret;
1963}
1964
123abc08
JH
1965static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1966{
bc6d2d04 1967 bool conn_changed, discov_changed;
123abc08
JH
1968
1969 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1970
1971 if ((scan & SCAN_PAGE))
238be788
MH
1972 conn_changed = !hci_dev_test_and_set_flag(hdev,
1973 HCI_CONNECTABLE);
123abc08 1974 else
a69d8927
MH
1975 conn_changed = hci_dev_test_and_clear_flag(hdev,
1976 HCI_CONNECTABLE);
123abc08 1977
bc6d2d04 1978 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1979 discov_changed = !hci_dev_test_and_set_flag(hdev,
1980 HCI_DISCOVERABLE);
bc6d2d04 1981 } else {
a358dc11 1982 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1983 discov_changed = hci_dev_test_and_clear_flag(hdev,
1984 HCI_DISCOVERABLE);
bc6d2d04
JH
1985 }
1986
d7a5a11d 1987 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1988 return;
1989
bc6d2d04
JH
1990 if (conn_changed || discov_changed) {
1991 /* In case this was disabled through mgmt */
a1536da2 1992 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1993
d7a5a11d 1994 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
cab054ab 1995 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
bc6d2d04 1996
123abc08 1997 mgmt_new_settings(hdev);
bc6d2d04 1998 }
123abc08
JH
1999}
2000
1da177e4
LT
2001int hci_dev_cmd(unsigned int cmd, void __user *arg)
2002{
2003 struct hci_dev *hdev;
2004 struct hci_dev_req dr;
2005 int err = 0;
2006
2007 if (copy_from_user(&dr, arg, sizeof(dr)))
2008 return -EFAULT;
2009
70f23020
AE
2010 hdev = hci_dev_get(dr.dev_id);
2011 if (!hdev)
1da177e4
LT
2012 return -ENODEV;
2013
d7a5a11d 2014 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
2015 err = -EBUSY;
2016 goto done;
2017 }
2018
d7a5a11d 2019 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
2020 err = -EOPNOTSUPP;
2021 goto done;
2022 }
2023
ca8bee5d 2024 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
2025 err = -EOPNOTSUPP;
2026 goto done;
2027 }
2028
d7a5a11d 2029 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
2030 err = -EOPNOTSUPP;
2031 goto done;
2032 }
2033
1da177e4
LT
2034 switch (cmd) {
2035 case HCISETAUTH:
01178cd4 2036 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 2037 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2038 break;
2039
2040 case HCISETENCRYPT:
2041 if (!lmp_encrypt_capable(hdev)) {
2042 err = -EOPNOTSUPP;
2043 break;
2044 }
2045
2046 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2047 /* Auth must be enabled first */
01178cd4 2048 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 2049 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2050 if (err)
2051 break;
2052 }
2053
01178cd4 2054 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
4ebeee2d 2055 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2056 break;
2057
2058 case HCISETSCAN:
01178cd4 2059 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
4ebeee2d 2060 HCI_INIT_TIMEOUT, NULL);
91a668b0 2061
bc6d2d04
JH
2062 /* Ensure that the connectable and discoverable states
2063 * get correctly modified as this was a non-mgmt change.
91a668b0 2064 */
123abc08
JH
2065 if (!err)
2066 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
2067 break;
2068
1da177e4 2069 case HCISETLINKPOL:
01178cd4 2070 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
4ebeee2d 2071 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2072 break;
2073
2074 case HCISETLINKMODE:
e4e8e37c
MH
2075 hdev->link_mode = ((__u16) dr.dev_opt) &
2076 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2077 break;
2078
2079 case HCISETPTYPE:
b7c23df8
JK
2080 if (hdev->pkt_type == (__u16) dr.dev_opt)
2081 break;
2082
e4e8e37c 2083 hdev->pkt_type = (__u16) dr.dev_opt;
b7c23df8 2084 mgmt_phy_configuration_changed(hdev, NULL);
1da177e4
LT
2085 break;
2086
2087 case HCISETACLMTU:
e4e8e37c
MH
2088 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2089 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2090 break;
2091
2092 case HCISETSCOMTU:
e4e8e37c
MH
2093 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2094 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2095 break;
2096
2097 default:
2098 err = -EINVAL;
2099 break;
2100 }
e4e8e37c 2101
0736cfa8 2102done:
1da177e4
LT
2103 hci_dev_put(hdev);
2104 return err;
2105}
2106
2107int hci_get_dev_list(void __user *arg)
2108{
8035ded4 2109 struct hci_dev *hdev;
1da177e4
LT
2110 struct hci_dev_list_req *dl;
2111 struct hci_dev_req *dr;
1da177e4
LT
2112 int n = 0, size, err;
2113 __u16 dev_num;
2114
2115 if (get_user(dev_num, (__u16 __user *) arg))
2116 return -EFAULT;
2117
2118 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2119 return -EINVAL;
2120
2121 size = sizeof(*dl) + dev_num * sizeof(*dr);
2122
70f23020
AE
2123 dl = kzalloc(size, GFP_KERNEL);
2124 if (!dl)
1da177e4
LT
2125 return -ENOMEM;
2126
2127 dr = dl->dev_req;
2128
f20d09d5 2129 read_lock(&hci_dev_list_lock);
8035ded4 2130 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2131 unsigned long flags = hdev->flags;
c542a06c 2132
2e84d8db
MH
2133 /* When the auto-off is configured it means the transport
2134 * is running, but in that case still indicate that the
2135 * device is actually down.
2136 */
d7a5a11d 2137 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 2138 flags &= ~BIT(HCI_UP);
c542a06c 2139
1da177e4 2140 (dr + n)->dev_id = hdev->id;
2e84d8db 2141 (dr + n)->dev_opt = flags;
c542a06c 2142
1da177e4
LT
2143 if (++n >= dev_num)
2144 break;
2145 }
f20d09d5 2146 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2147
2148 dl->dev_num = n;
2149 size = sizeof(*dl) + n * sizeof(*dr);
2150
2151 err = copy_to_user(arg, dl, size);
2152 kfree(dl);
2153
2154 return err ? -EFAULT : 0;
2155}
2156
2157int hci_get_dev_info(void __user *arg)
2158{
2159 struct hci_dev *hdev;
2160 struct hci_dev_info di;
2e84d8db 2161 unsigned long flags;
1da177e4
LT
2162 int err = 0;
2163
2164 if (copy_from_user(&di, arg, sizeof(di)))
2165 return -EFAULT;
2166
70f23020
AE
2167 hdev = hci_dev_get(di.dev_id);
2168 if (!hdev)
1da177e4
LT
2169 return -ENODEV;
2170
2e84d8db
MH
2171 /* When the auto-off is configured it means the transport
2172 * is running, but in that case still indicate that the
2173 * device is actually down.
2174 */
d7a5a11d 2175 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
2176 flags = hdev->flags & ~BIT(HCI_UP);
2177 else
2178 flags = hdev->flags;
c542a06c 2179
1da177e4
LT
2180 strcpy(di.name, hdev->name);
2181 di.bdaddr = hdev->bdaddr;
60f2a3ed 2182 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2183 di.flags = flags;
1da177e4 2184 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2185 if (lmp_bredr_capable(hdev)) {
2186 di.acl_mtu = hdev->acl_mtu;
2187 di.acl_pkts = hdev->acl_pkts;
2188 di.sco_mtu = hdev->sco_mtu;
2189 di.sco_pkts = hdev->sco_pkts;
2190 } else {
2191 di.acl_mtu = hdev->le_mtu;
2192 di.acl_pkts = hdev->le_pkts;
2193 di.sco_mtu = 0;
2194 di.sco_pkts = 0;
2195 }
1da177e4
LT
2196 di.link_policy = hdev->link_policy;
2197 di.link_mode = hdev->link_mode;
2198
2199 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2200 memcpy(&di.features, &hdev->features, sizeof(di.features));
2201
2202 if (copy_to_user(arg, &di, sizeof(di)))
2203 err = -EFAULT;
2204
2205 hci_dev_put(hdev);
2206
2207 return err;
2208}
2209
2210/* ---- Interface to HCI drivers ---- */
2211
611b30f7
MH
2212static int hci_rfkill_set_block(void *data, bool blocked)
2213{
2214 struct hci_dev *hdev = data;
2215
2216 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2217
d7a5a11d 2218 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2219 return -EBUSY;
2220
5e130367 2221 if (blocked) {
a1536da2 2222 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2223 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2224 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2225 hci_dev_do_close(hdev);
5e130367 2226 } else {
a358dc11 2227 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2228 }
611b30f7
MH
2229
2230 return 0;
2231}
2232
2233static const struct rfkill_ops hci_rfkill_ops = {
2234 .set_block = hci_rfkill_set_block,
2235};
2236
ab81cbf9
JH
2237static void hci_power_on(struct work_struct *work)
2238{
2239 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2240 int err;
ab81cbf9
JH
2241
2242 BT_DBG("%s", hdev->name);
2243
2ff13894
JH
2244 if (test_bit(HCI_UP, &hdev->flags) &&
2245 hci_dev_test_flag(hdev, HCI_MGMT) &&
2246 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
d82142a8 2247 cancel_delayed_work(&hdev->power_off);
2ff13894
JH
2248 hci_req_sync_lock(hdev);
2249 err = __hci_req_hci_power_on(hdev);
2250 hci_req_sync_unlock(hdev);
2251 mgmt_power_on(hdev, err);
2252 return;
2253 }
2254
cbed0ca1 2255 err = hci_dev_do_open(hdev);
96570ffc 2256 if (err < 0) {
3ad67582 2257 hci_dev_lock(hdev);
96570ffc 2258 mgmt_set_powered_failed(hdev, err);
3ad67582 2259 hci_dev_unlock(hdev);
ab81cbf9 2260 return;
96570ffc 2261 }
ab81cbf9 2262
a5c8f270
MH
2263 /* During the HCI setup phase, a few error conditions are
2264 * ignored and they need to be checked now. If they are still
2265 * valid, it is important to turn the device back off.
2266 */
d7a5a11d
MH
2267 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2268 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
ca8bee5d 2269 (hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
2270 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2271 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2272 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2273 hci_dev_do_close(hdev);
d7a5a11d 2274 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2275 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2276 HCI_AUTO_OFF_TIMEOUT);
bf543036 2277 }
ab81cbf9 2278
a69d8927 2279 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2280 /* For unconfigured devices, set the HCI_RAW flag
2281 * so that userspace can easily identify them.
4a964404 2282 */
d7a5a11d 2283 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2284 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2285
2286 /* For fully configured devices, this will send
2287 * the Index Added event. For unconfigured devices,
2288 * it will send Unconfigued Index Added event.
2289 *
2290 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2291 * and no event will be send.
2292 */
2293 mgmt_index_added(hdev);
a69d8927 2294 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2295 /* When the controller is now configured, then it
2296 * is important to clear the HCI_RAW flag.
2297 */
d7a5a11d 2298 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2299 clear_bit(HCI_RAW, &hdev->flags);
2300
d603b76b
MH
2301 /* Powering on the controller with HCI_CONFIG set only
2302 * happens with the transition from unconfigured to
2303 * configured. This will send the Index Added event.
2304 */
744cf19e 2305 mgmt_index_added(hdev);
fee746b0 2306 }
ab81cbf9
JH
2307}
2308
2309static void hci_power_off(struct work_struct *work)
2310{
3243553f 2311 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2312 power_off.work);
ab81cbf9
JH
2313
2314 BT_DBG("%s", hdev->name);
2315
8ee56540 2316 hci_dev_do_close(hdev);
ab81cbf9
JH
2317}
2318
c7741d16
MH
2319static void hci_error_reset(struct work_struct *work)
2320{
2321 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2322
2323 BT_DBG("%s", hdev->name);
2324
2325 if (hdev->hw_error)
2326 hdev->hw_error(hdev, hdev->hw_error_code);
2327 else
2064ee33 2328 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
c7741d16
MH
2329
2330 if (hci_dev_do_close(hdev))
2331 return;
2332
c7741d16
MH
2333 hci_dev_do_open(hdev);
2334}
2335
35f7498a 2336void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2337{
4821002c 2338 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2339
4821002c
JH
2340 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2341 list_del(&uuid->list);
2aeb9a1a
JH
2342 kfree(uuid);
2343 }
2aeb9a1a
JH
2344}
2345
35f7498a 2346void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2347{
0378b597 2348 struct link_key *key;
55ed8ca1 2349
d7d41682 2350 list_for_each_entry(key, &hdev->link_keys, list) {
0378b597
JH
2351 list_del_rcu(&key->list);
2352 kfree_rcu(key, rcu);
55ed8ca1 2353 }
55ed8ca1
JH
2354}
2355
35f7498a 2356void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2357{
970d0f1b 2358 struct smp_ltk *k;
b899efaf 2359
d7d41682 2360 list_for_each_entry(k, &hdev->long_term_keys, list) {
970d0f1b
JH
2361 list_del_rcu(&k->list);
2362 kfree_rcu(k, rcu);
b899efaf 2363 }
b899efaf
VCG
2364}
2365
970c4e46
JH
2366void hci_smp_irks_clear(struct hci_dev *hdev)
2367{
adae20cb 2368 struct smp_irk *k;
970c4e46 2369
d7d41682 2370 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
adae20cb
JH
2371 list_del_rcu(&k->list);
2372 kfree_rcu(k, rcu);
970c4e46
JH
2373 }
2374}
2375
600a8749
AM
2376void hci_blocked_keys_clear(struct hci_dev *hdev)
2377{
2378 struct blocked_key *b;
2379
d7d41682 2380 list_for_each_entry(b, &hdev->blocked_keys, list) {
600a8749
AM
2381 list_del_rcu(&b->list);
2382 kfree_rcu(b, rcu);
2383 }
2384}
2385
2386bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2387{
2388 bool blocked = false;
2389 struct blocked_key *b;
2390
2391 rcu_read_lock();
0c2ac7d4 2392 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
600a8749
AM
2393 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2394 blocked = true;
2395 break;
2396 }
2397 }
2398
2399 rcu_read_unlock();
2400 return blocked;
2401}
2402
55ed8ca1
JH
2403struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2404{
8035ded4 2405 struct link_key *k;
55ed8ca1 2406
0378b597
JH
2407 rcu_read_lock();
2408 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2409 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2410 rcu_read_unlock();
600a8749
AM
2411
2412 if (hci_is_blocked_key(hdev,
2413 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2414 k->val)) {
2415 bt_dev_warn_ratelimited(hdev,
2416 "Link key blocked for %pMR",
2417 &k->bdaddr);
2418 return NULL;
2419 }
2420
55ed8ca1 2421 return k;
0378b597
JH
2422 }
2423 }
2424 rcu_read_unlock();
55ed8ca1
JH
2425
2426 return NULL;
2427}
2428
745c0ce3 2429static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2430 u8 key_type, u8 old_key_type)
d25e28ab
JH
2431{
2432 /* Legacy key */
2433 if (key_type < 0x03)
745c0ce3 2434 return true;
d25e28ab
JH
2435
2436 /* Debug keys are insecure so don't store them persistently */
2437 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2438 return false;
d25e28ab
JH
2439
2440 /* Changed combination key and there's no previous one */
2441 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2442 return false;
d25e28ab
JH
2443
2444 /* Security mode 3 case */
2445 if (!conn)
745c0ce3 2446 return true;
d25e28ab 2447
e3befab9
JH
2448 /* BR/EDR key derived using SC from an LE link */
2449 if (conn->type == LE_LINK)
2450 return true;
2451
d25e28ab
JH
2452 /* Neither local nor remote side had no-bonding as requirement */
2453 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2454 return true;
d25e28ab
JH
2455
2456 /* Local side had dedicated bonding as requirement */
2457 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2458 return true;
d25e28ab
JH
2459
2460 /* Remote side had dedicated bonding as requirement */
2461 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2462 return true;
d25e28ab
JH
2463
2464 /* If none of the above criteria match, then don't store the key
2465 * persistently */
745c0ce3 2466 return false;
d25e28ab
JH
2467}
2468
e804d25d 2469static u8 ltk_role(u8 type)
98a0b845 2470{
e804d25d
JH
2471 if (type == SMP_LTK)
2472 return HCI_ROLE_MASTER;
98a0b845 2473
e804d25d 2474 return HCI_ROLE_SLAVE;
98a0b845
JH
2475}
2476
f3a73d97
JH
2477struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2478 u8 addr_type, u8 role)
75d262c2 2479{
c9839a11 2480 struct smp_ltk *k;
75d262c2 2481
970d0f1b
JH
2482 rcu_read_lock();
2483 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2484 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2485 continue;
2486
923e2414 2487 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2488 rcu_read_unlock();
600a8749
AM
2489
2490 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2491 k->val)) {
2492 bt_dev_warn_ratelimited(hdev,
2493 "LTK blocked for %pMR",
2494 &k->bdaddr);
2495 return NULL;
2496 }
2497
75d262c2 2498 return k;
970d0f1b
JH
2499 }
2500 }
2501 rcu_read_unlock();
75d262c2
VCG
2502
2503 return NULL;
2504}
75d262c2 2505
970c4e46
JH
2506struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2507{
600a8749 2508 struct smp_irk *irk_to_return = NULL;
970c4e46
JH
2509 struct smp_irk *irk;
2510
adae20cb
JH
2511 rcu_read_lock();
2512 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2513 if (!bacmp(&irk->rpa, rpa)) {
600a8749
AM
2514 irk_to_return = irk;
2515 goto done;
adae20cb 2516 }
970c4e46
JH
2517 }
2518
adae20cb 2519 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2520 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2521 bacpy(&irk->rpa, rpa);
600a8749
AM
2522 irk_to_return = irk;
2523 goto done;
970c4e46
JH
2524 }
2525 }
600a8749
AM
2526
2527done:
2528 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2529 irk_to_return->val)) {
2530 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2531 &irk_to_return->bdaddr);
2532 irk_to_return = NULL;
2533 }
2534
adae20cb 2535 rcu_read_unlock();
970c4e46 2536
600a8749 2537 return irk_to_return;
970c4e46
JH
2538}
2539
2540struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2541 u8 addr_type)
2542{
600a8749 2543 struct smp_irk *irk_to_return = NULL;
970c4e46
JH
2544 struct smp_irk *irk;
2545
6cfc9988
JH
2546 /* Identity Address must be public or static random */
2547 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2548 return NULL;
2549
adae20cb
JH
2550 rcu_read_lock();
2551 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2552 if (addr_type == irk->addr_type &&
adae20cb 2553 bacmp(bdaddr, &irk->bdaddr) == 0) {
600a8749
AM
2554 irk_to_return = irk;
2555 goto done;
adae20cb 2556 }
970c4e46 2557 }
600a8749
AM
2558
2559done:
2560
2561 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2562 irk_to_return->val)) {
2563 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2564 &irk_to_return->bdaddr);
2565 irk_to_return = NULL;
2566 }
2567
adae20cb 2568 rcu_read_unlock();
970c4e46 2569
600a8749 2570 return irk_to_return;
970c4e46
JH
2571}
2572
567fa2aa 2573struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2574 bdaddr_t *bdaddr, u8 *val, u8 type,
2575 u8 pin_len, bool *persistent)
55ed8ca1
JH
2576{
2577 struct link_key *key, *old_key;
745c0ce3 2578 u8 old_key_type;
55ed8ca1
JH
2579
2580 old_key = hci_find_link_key(hdev, bdaddr);
2581 if (old_key) {
2582 old_key_type = old_key->type;
2583 key = old_key;
2584 } else {
12adcf3a 2585 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2586 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2587 if (!key)
567fa2aa 2588 return NULL;
0378b597 2589 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2590 }
2591
6ed93dc6 2592 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2593
d25e28ab
JH
2594 /* Some buggy controller combinations generate a changed
2595 * combination key for legacy pairing even when there's no
2596 * previous key */
2597 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2598 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2599 type = HCI_LK_COMBINATION;
655fe6ec
JH
2600 if (conn)
2601 conn->key_type = type;
2602 }
d25e28ab 2603
55ed8ca1 2604 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2605 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2606 key->pin_len = pin_len;
2607
b6020ba0 2608 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2609 key->type = old_key_type;
4748fed2
JH
2610 else
2611 key->type = type;
2612
7652ff6a
JH
2613 if (persistent)
2614 *persistent = hci_persistent_key(hdev, conn, type,
2615 old_key_type);
4df378a1 2616
567fa2aa 2617 return key;
55ed8ca1
JH
2618}
2619
ca9142b8 2620struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2621 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2622 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2623{
c9839a11 2624 struct smp_ltk *key, *old_key;
e804d25d 2625 u8 role = ltk_role(type);
75d262c2 2626
f3a73d97 2627 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2628 if (old_key)
75d262c2 2629 key = old_key;
c9839a11 2630 else {
0a14ab41 2631 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2632 if (!key)
ca9142b8 2633 return NULL;
970d0f1b 2634 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2635 }
2636
75d262c2 2637 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2638 key->bdaddr_type = addr_type;
2639 memcpy(key->val, tk, sizeof(key->val));
2640 key->authenticated = authenticated;
2641 key->ediv = ediv;
fe39c7b2 2642 key->rand = rand;
c9839a11
VCG
2643 key->enc_size = enc_size;
2644 key->type = type;
75d262c2 2645
ca9142b8 2646 return key;
75d262c2
VCG
2647}
2648
ca9142b8
JH
2649struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2650 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2651{
2652 struct smp_irk *irk;
2653
2654 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2655 if (!irk) {
2656 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2657 if (!irk)
ca9142b8 2658 return NULL;
970c4e46
JH
2659
2660 bacpy(&irk->bdaddr, bdaddr);
2661 irk->addr_type = addr_type;
2662
adae20cb 2663 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2664 }
2665
2666 memcpy(irk->val, val, 16);
2667 bacpy(&irk->rpa, rpa);
2668
ca9142b8 2669 return irk;
970c4e46
JH
2670}
2671
55ed8ca1
JH
2672int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2673{
2674 struct link_key *key;
2675
2676 key = hci_find_link_key(hdev, bdaddr);
2677 if (!key)
2678 return -ENOENT;
2679
6ed93dc6 2680 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2681
0378b597
JH
2682 list_del_rcu(&key->list);
2683 kfree_rcu(key, rcu);
55ed8ca1
JH
2684
2685 return 0;
2686}
2687
e0b2b27e 2688int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2689{
970d0f1b 2690 struct smp_ltk *k;
c51ffa0b 2691 int removed = 0;
b899efaf 2692
970d0f1b 2693 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2694 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2695 continue;
2696
6ed93dc6 2697 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2698
970d0f1b
JH
2699 list_del_rcu(&k->list);
2700 kfree_rcu(k, rcu);
c51ffa0b 2701 removed++;
b899efaf
VCG
2702 }
2703
c51ffa0b 2704 return removed ? 0 : -ENOENT;
b899efaf
VCG
2705}
2706
a7ec7338
JH
2707void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2708{
adae20cb 2709 struct smp_irk *k;
a7ec7338 2710
adae20cb 2711 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2712 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2713 continue;
2714
2715 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2716
adae20cb
JH
2717 list_del_rcu(&k->list);
2718 kfree_rcu(k, rcu);
a7ec7338
JH
2719 }
2720}
2721
55e76b38
JH
2722bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2723{
2724 struct smp_ltk *k;
4ba9faf3 2725 struct smp_irk *irk;
55e76b38
JH
2726 u8 addr_type;
2727
2728 if (type == BDADDR_BREDR) {
2729 if (hci_find_link_key(hdev, bdaddr))
2730 return true;
2731 return false;
2732 }
2733
2734 /* Convert to HCI addr type which struct smp_ltk uses */
2735 if (type == BDADDR_LE_PUBLIC)
2736 addr_type = ADDR_LE_DEV_PUBLIC;
2737 else
2738 addr_type = ADDR_LE_DEV_RANDOM;
2739
4ba9faf3
JH
2740 irk = hci_get_irk(hdev, bdaddr, addr_type);
2741 if (irk) {
2742 bdaddr = &irk->bdaddr;
2743 addr_type = irk->addr_type;
2744 }
2745
55e76b38
JH
2746 rcu_read_lock();
2747 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2748 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2749 rcu_read_unlock();
55e76b38 2750 return true;
87c8b28d 2751 }
55e76b38
JH
2752 }
2753 rcu_read_unlock();
2754
2755 return false;
2756}
2757
6bd32326 2758/* HCI command timer function */
65cc2b49 2759static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2760{
65cc2b49
MH
2761 struct hci_dev *hdev = container_of(work, struct hci_dev,
2762 cmd_timer.work);
6bd32326 2763
bda4f23a
AE
2764 if (hdev->sent_cmd) {
2765 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2766 u16 opcode = __le16_to_cpu(sent->opcode);
2767
2064ee33 2768 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
bda4f23a 2769 } else {
2064ee33 2770 bt_dev_err(hdev, "command tx timeout");
bda4f23a
AE
2771 }
2772
e2bef384
RJ
2773 if (hdev->cmd_timeout)
2774 hdev->cmd_timeout(hdev);
2775
6bd32326 2776 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2777 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2778}
2779
2763eda6 2780struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2781 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2782{
2783 struct oob_data *data;
2784
6928a924
JH
2785 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2786 if (bacmp(bdaddr, &data->bdaddr) != 0)
2787 continue;
2788 if (data->bdaddr_type != bdaddr_type)
2789 continue;
2790 return data;
2791 }
2763eda6
SJ
2792
2793 return NULL;
2794}
2795
6928a924
JH
2796int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2797 u8 bdaddr_type)
2763eda6
SJ
2798{
2799 struct oob_data *data;
2800
6928a924 2801 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2802 if (!data)
2803 return -ENOENT;
2804
6928a924 2805 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2806
2807 list_del(&data->list);
2808 kfree(data);
2809
2810 return 0;
2811}
2812
35f7498a 2813void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2814{
2815 struct oob_data *data, *n;
2816
2817 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2818 list_del(&data->list);
2819 kfree(data);
2820 }
2763eda6
SJ
2821}
2822
0798872e 2823int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2824 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2825 u8 *hash256, u8 *rand256)
2763eda6
SJ
2826{
2827 struct oob_data *data;
2828
6928a924 2829 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2830 if (!data) {
0a14ab41 2831 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2832 if (!data)
2833 return -ENOMEM;
2834
2835 bacpy(&data->bdaddr, bdaddr);
6928a924 2836 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2837 list_add(&data->list, &hdev->remote_oob_data);
2838 }
2839
81328d5c
JH
2840 if (hash192 && rand192) {
2841 memcpy(data->hash192, hash192, sizeof(data->hash192));
2842 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2843 if (hash256 && rand256)
2844 data->present = 0x03;
81328d5c
JH
2845 } else {
2846 memset(data->hash192, 0, sizeof(data->hash192));
2847 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2848 if (hash256 && rand256)
2849 data->present = 0x02;
2850 else
2851 data->present = 0x00;
0798872e
MH
2852 }
2853
81328d5c
JH
2854 if (hash256 && rand256) {
2855 memcpy(data->hash256, hash256, sizeof(data->hash256));
2856 memcpy(data->rand256, rand256, sizeof(data->rand256));
2857 } else {
2858 memset(data->hash256, 0, sizeof(data->hash256));
2859 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2860 if (hash192 && rand192)
2861 data->present = 0x01;
81328d5c 2862 }
0798872e 2863
6ed93dc6 2864 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2865
2866 return 0;
2867}
2868
d2609b34
FG
2869/* This function requires the caller holds hdev->lock */
2870struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2871{
2872 struct adv_info *adv_instance;
2873
2874 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2875 if (adv_instance->instance == instance)
2876 return adv_instance;
2877 }
2878
2879 return NULL;
2880}
2881
2882/* This function requires the caller holds hdev->lock */
74b93e9f
PK
2883struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2884{
d2609b34
FG
2885 struct adv_info *cur_instance;
2886
2887 cur_instance = hci_find_adv_instance(hdev, instance);
2888 if (!cur_instance)
2889 return NULL;
2890
2891 if (cur_instance == list_last_entry(&hdev->adv_instances,
2892 struct adv_info, list))
2893 return list_first_entry(&hdev->adv_instances,
2894 struct adv_info, list);
2895 else
2896 return list_next_entry(cur_instance, list);
2897}
2898
2899/* This function requires the caller holds hdev->lock */
2900int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2901{
2902 struct adv_info *adv_instance;
2903
2904 adv_instance = hci_find_adv_instance(hdev, instance);
2905 if (!adv_instance)
2906 return -ENOENT;
2907
2908 BT_DBG("%s removing %dMR", hdev->name, instance);
2909
cab054ab
JH
2910 if (hdev->cur_adv_instance == instance) {
2911 if (hdev->adv_instance_timeout) {
2912 cancel_delayed_work(&hdev->adv_instance_expire);
2913 hdev->adv_instance_timeout = 0;
2914 }
2915 hdev->cur_adv_instance = 0x00;
5d900e46
FG
2916 }
2917
a73c046a
JK
2918 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2919
d2609b34
FG
2920 list_del(&adv_instance->list);
2921 kfree(adv_instance);
2922
2923 hdev->adv_instance_cnt--;
2924
2925 return 0;
2926}
2927
a73c046a
JK
2928void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2929{
2930 struct adv_info *adv_instance, *n;
2931
2932 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2933 adv_instance->rpa_expired = rpa_expired;
2934}
2935
d2609b34
FG
2936/* This function requires the caller holds hdev->lock */
2937void hci_adv_instances_clear(struct hci_dev *hdev)
2938{
2939 struct adv_info *adv_instance, *n;
2940
5d900e46
FG
2941 if (hdev->adv_instance_timeout) {
2942 cancel_delayed_work(&hdev->adv_instance_expire);
2943 hdev->adv_instance_timeout = 0;
2944 }
2945
d2609b34 2946 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
a73c046a 2947 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
d2609b34
FG
2948 list_del(&adv_instance->list);
2949 kfree(adv_instance);
2950 }
2951
2952 hdev->adv_instance_cnt = 0;
cab054ab 2953 hdev->cur_adv_instance = 0x00;
d2609b34
FG
2954}
2955
a73c046a
JK
2956static void adv_instance_rpa_expired(struct work_struct *work)
2957{
2958 struct adv_info *adv_instance = container_of(work, struct adv_info,
2959 rpa_expired_cb.work);
2960
2961 BT_DBG("");
2962
2963 adv_instance->rpa_expired = true;
2964}
2965
d2609b34
FG
2966/* This function requires the caller holds hdev->lock */
2967int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2968 u16 adv_data_len, u8 *adv_data,
2969 u16 scan_rsp_len, u8 *scan_rsp_data,
9bf9f4b6
DW
2970 u16 timeout, u16 duration, s8 tx_power,
2971 u32 min_interval, u32 max_interval)
d2609b34
FG
2972{
2973 struct adv_info *adv_instance;
2974
2975 adv_instance = hci_find_adv_instance(hdev, instance);
2976 if (adv_instance) {
2977 memset(adv_instance->adv_data, 0,
2978 sizeof(adv_instance->adv_data));
2979 memset(adv_instance->scan_rsp_data, 0,
2980 sizeof(adv_instance->scan_rsp_data));
2981 } else {
1d0fac2c 2982 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
87597482 2983 instance < 1 || instance > hdev->le_num_of_adv_sets)
d2609b34
FG
2984 return -EOVERFLOW;
2985
39ecfad6 2986 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2987 if (!adv_instance)
2988 return -ENOMEM;
2989
fffd38bc 2990 adv_instance->pending = true;
d2609b34
FG
2991 adv_instance->instance = instance;
2992 list_add(&adv_instance->list, &hdev->adv_instances);
2993 hdev->adv_instance_cnt++;
2994 }
2995
2996 adv_instance->flags = flags;
2997 adv_instance->adv_data_len = adv_data_len;
2998 adv_instance->scan_rsp_len = scan_rsp_len;
9bf9f4b6
DW
2999 adv_instance->min_interval = min_interval;
3000 adv_instance->max_interval = max_interval;
3001 adv_instance->tx_power = tx_power;
d2609b34
FG
3002
3003 if (adv_data_len)
3004 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3005
3006 if (scan_rsp_len)
3007 memcpy(adv_instance->scan_rsp_data,
3008 scan_rsp_data, scan_rsp_len);
3009
3010 adv_instance->timeout = timeout;
5d900e46 3011 adv_instance->remaining_time = timeout;
d2609b34
FG
3012
3013 if (duration == 0)
10873f99 3014 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
d2609b34
FG
3015 else
3016 adv_instance->duration = duration;
3017
a73c046a
JK
3018 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3019 adv_instance_rpa_expired);
3020
d2609b34
FG
3021 BT_DBG("%s for %dMR", hdev->name, instance);
3022
3023 return 0;
3024}
3025
31aab5c2
DW
3026/* This function requires the caller holds hdev->lock */
3027int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3028 u16 adv_data_len, u8 *adv_data,
3029 u16 scan_rsp_len, u8 *scan_rsp_data)
3030{
3031 struct adv_info *adv_instance;
3032
3033 adv_instance = hci_find_adv_instance(hdev, instance);
3034
3035 /* If advertisement doesn't exist, we can't modify its data */
3036 if (!adv_instance)
3037 return -ENOENT;
3038
3039 if (adv_data_len) {
3040 memset(adv_instance->adv_data, 0,
3041 sizeof(adv_instance->adv_data));
3042 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3043 adv_instance->adv_data_len = adv_data_len;
3044 }
3045
3046 if (scan_rsp_len) {
3047 memset(adv_instance->scan_rsp_data, 0,
3048 sizeof(adv_instance->scan_rsp_data));
3049 memcpy(adv_instance->scan_rsp_data,
3050 scan_rsp_data, scan_rsp_len);
3051 adv_instance->scan_rsp_len = scan_rsp_len;
3052 }
3053
3054 return 0;
3055}
3056
e5e1e7fd
MC
3057/* This function requires the caller holds hdev->lock */
3058void hci_adv_monitors_clear(struct hci_dev *hdev)
3059{
b139553d
MC
3060 struct adv_monitor *monitor;
3061 int handle;
3062
3063 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
66bd095a 3064 hci_free_adv_monitor(hdev, monitor);
b139553d 3065
e5e1e7fd
MC
3066 idr_destroy(&hdev->adv_monitors_idr);
3067}
3068
66bd095a
AP
3069/* Frees the monitor structure and do some bookkeepings.
3070 * This function requires the caller holds hdev->lock.
3071 */
3072void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
b139553d
MC
3073{
3074 struct adv_pattern *pattern;
3075 struct adv_pattern *tmp;
3076
3077 if (!monitor)
3078 return;
3079
66bd095a
AP
3080 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3081 list_del(&pattern->list);
b139553d 3082 kfree(pattern);
66bd095a
AP
3083 }
3084
3085 if (monitor->handle)
3086 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3087
3088 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3089 hdev->adv_monitors_cnt--;
3090 mgmt_adv_monitor_removed(hdev, monitor->handle);
3091 }
b139553d
MC
3092
3093 kfree(monitor);
3094}
3095
a2a4dedf
AP
3096int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3097{
3098 return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3099}
3100
66bd095a
AP
3101int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3102{
3103 return mgmt_remove_adv_monitor_complete(hdev, status);
3104}
3105
a2a4dedf
AP
3106/* Assigns handle to a monitor, and if offloading is supported and power is on,
3107 * also attempts to forward the request to the controller.
3108 * Returns true if request is forwarded (result is pending), false otherwise.
3109 * This function requires the caller holds hdev->lock.
3110 */
3111bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3112 int *err)
b139553d
MC
3113{
3114 int min, max, handle;
3115
a2a4dedf
AP
3116 *err = 0;
3117
3118 if (!monitor) {
3119 *err = -EINVAL;
3120 return false;
3121 }
b139553d
MC
3122
3123 min = HCI_MIN_ADV_MONITOR_HANDLE;
3124 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3125 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3126 GFP_KERNEL);
a2a4dedf
AP
3127 if (handle < 0) {
3128 *err = handle;
3129 return false;
3130 }
b139553d 3131
b139553d 3132 monitor->handle = handle;
8208f5a9 3133
a2a4dedf
AP
3134 if (!hdev_is_powered(hdev))
3135 return false;
8208f5a9 3136
a2a4dedf
AP
3137 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3138 case HCI_ADV_MONITOR_EXT_NONE:
3139 hci_update_background_scan(hdev);
3140 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3141 /* Message was not forwarded to controller - not an error */
3142 return false;
3143 case HCI_ADV_MONITOR_EXT_MSFT:
3144 *err = msft_add_monitor_pattern(hdev, monitor);
3145 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3146 *err);
3147 break;
3148 }
3149
3150 return (*err == 0);
b139553d
MC
3151}
3152
66bd095a
AP
3153/* Attempts to tell the controller and free the monitor. If somehow the
3154 * controller doesn't have a corresponding handle, remove anyway.
3155 * Returns true if request is forwarded (result is pending), false otherwise.
3156 * This function requires the caller holds hdev->lock.
3157 */
3158static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3159 struct adv_monitor *monitor,
3160 u16 handle, int *err)
bd2fbc6c 3161{
66bd095a 3162 *err = 0;
bd2fbc6c 3163
66bd095a
AP
3164 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3165 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3166 goto free_monitor;
3167 case HCI_ADV_MONITOR_EXT_MSFT:
3168 *err = msft_remove_monitor(hdev, monitor, handle);
3169 break;
3170 }
bd2fbc6c 3171
66bd095a
AP
3172 /* In case no matching handle registered, just free the monitor */
3173 if (*err == -ENOENT)
3174 goto free_monitor;
3175
3176 return (*err == 0);
3177
3178free_monitor:
3179 if (*err == -ENOENT)
3180 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3181 monitor->handle);
3182 hci_free_adv_monitor(hdev, monitor);
3183
3184 *err = 0;
3185 return false;
bd2fbc6c
MC
3186}
3187
66bd095a
AP
3188/* Returns true if request is forwarded (result is pending), false otherwise.
3189 * This function requires the caller holds hdev->lock.
3190 */
3191bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3192{
3193 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3194 bool pending;
3195
3196 if (!monitor) {
3197 *err = -EINVAL;
3198 return false;
3199 }
3200
3201 pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3202 if (!*err && !pending)
3203 hci_update_background_scan(hdev);
3204
3205 bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3206 hdev->name, handle, *err, pending ? "" : "not ");
3207
3208 return pending;
3209}
3210
3211/* Returns true if request is forwarded (result is pending), false otherwise.
3212 * This function requires the caller holds hdev->lock.
3213 */
3214bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
bd2fbc6c
MC
3215{
3216 struct adv_monitor *monitor;
66bd095a
AP
3217 int idr_next_id = 0;
3218 bool pending = false;
3219 bool update = false;
bd2fbc6c 3220
66bd095a
AP
3221 *err = 0;
3222
3223 while (!*err && !pending) {
3224 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
bd2fbc6c 3225 if (!monitor)
66bd095a 3226 break;
bd2fbc6c 3227
66bd095a
AP
3228 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3229
3230 if (!*err && !pending)
3231 update = true;
bd2fbc6c
MC
3232 }
3233
66bd095a
AP
3234 if (update)
3235 hci_update_background_scan(hdev);
8208f5a9 3236
66bd095a
AP
3237 bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3238 hdev->name, *err, pending ? "" : "not ");
3239
3240 return pending;
bd2fbc6c
MC
3241}
3242
8208f5a9
MC
3243/* This function requires the caller holds hdev->lock */
3244bool hci_is_adv_monitoring(struct hci_dev *hdev)
3245{
3246 return !idr_is_empty(&hdev->adv_monitors_idr);
3247}
3248
a2a4dedf
AP
3249int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3250{
3251 if (msft_monitor_supported(hdev))
3252 return HCI_ADV_MONITOR_EXT_MSFT;
3253
3254 return HCI_ADV_MONITOR_EXT_NONE;
3255}
3256
dcc36c16 3257struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 3258 bdaddr_t *bdaddr, u8 type)
b2a66aad 3259{
8035ded4 3260 struct bdaddr_list *b;
b2a66aad 3261
dcc36c16 3262 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 3263 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3264 return b;
b9ee0a78 3265 }
b2a66aad
AJ
3266
3267 return NULL;
3268}
3269
b950aa88
AN
3270struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3271 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3272 u8 type)
3273{
3274 struct bdaddr_list_with_irk *b;
3275
3276 list_for_each_entry(b, bdaddr_list, list) {
3277 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3278 return b;
3279 }
3280
3281 return NULL;
3282}
3283
8baaa403
APS
3284struct bdaddr_list_with_flags *
3285hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3286 bdaddr_t *bdaddr, u8 type)
3287{
3288 struct bdaddr_list_with_flags *b;
3289
3290 list_for_each_entry(b, bdaddr_list, list) {
3291 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3292 return b;
3293 }
3294
3295 return NULL;
3296}
3297
dcc36c16 3298void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad 3299{
7eb7404f 3300 struct bdaddr_list *b, *n;
b2a66aad 3301
7eb7404f
GT
3302 list_for_each_entry_safe(b, n, bdaddr_list, list) {
3303 list_del(&b->list);
b2a66aad
AJ
3304 kfree(b);
3305 }
b2a66aad
AJ
3306}
3307
dcc36c16 3308int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3309{
3310 struct bdaddr_list *entry;
b2a66aad 3311
b9ee0a78 3312 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3313 return -EBADF;
3314
dcc36c16 3315 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 3316 return -EEXIST;
b2a66aad 3317
27f70f3e 3318 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
3319 if (!entry)
3320 return -ENOMEM;
b2a66aad
AJ
3321
3322 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3323 entry->bdaddr_type = type;
b2a66aad 3324
dcc36c16 3325 list_add(&entry->list, list);
b2a66aad 3326
2a8357f2 3327 return 0;
b2a66aad
AJ
3328}
3329
b950aa88
AN
3330int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3331 u8 type, u8 *peer_irk, u8 *local_irk)
3332{
3333 struct bdaddr_list_with_irk *entry;
3334
3335 if (!bacmp(bdaddr, BDADDR_ANY))
3336 return -EBADF;
3337
3338 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3339 return -EEXIST;
3340
3341 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3342 if (!entry)
3343 return -ENOMEM;
3344
3345 bacpy(&entry->bdaddr, bdaddr);
3346 entry->bdaddr_type = type;
3347
3348 if (peer_irk)
3349 memcpy(entry->peer_irk, peer_irk, 16);
3350
3351 if (local_irk)
3352 memcpy(entry->local_irk, local_irk, 16);
3353
3354 list_add(&entry->list, list);
3355
3356 return 0;
3357}
3358
8baaa403
APS
3359int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3360 u8 type, u32 flags)
3361{
3362 struct bdaddr_list_with_flags *entry;
3363
3364 if (!bacmp(bdaddr, BDADDR_ANY))
3365 return -EBADF;
3366
3367 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3368 return -EEXIST;
3369
3370 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3371 if (!entry)
3372 return -ENOMEM;
3373
3374 bacpy(&entry->bdaddr, bdaddr);
3375 entry->bdaddr_type = type;
3376 entry->current_flags = flags;
3377
3378 list_add(&entry->list, list);
3379
3380 return 0;
3381}
3382
dcc36c16 3383int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3384{
3385 struct bdaddr_list *entry;
b2a66aad 3386
35f7498a 3387 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 3388 hci_bdaddr_list_clear(list);
35f7498a
JH
3389 return 0;
3390 }
b2a66aad 3391
dcc36c16 3392 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
3393 if (!entry)
3394 return -ENOENT;
3395
3396 list_del(&entry->list);
3397 kfree(entry);
3398
3399 return 0;
3400}
3401
b950aa88
AN
3402int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3403 u8 type)
3404{
3405 struct bdaddr_list_with_irk *entry;
3406
3407 if (!bacmp(bdaddr, BDADDR_ANY)) {
3408 hci_bdaddr_list_clear(list);
3409 return 0;
3410 }
3411
3412 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3413 if (!entry)
3414 return -ENOENT;
3415
3416 list_del(&entry->list);
3417 kfree(entry);
3418
3419 return 0;
3420}
3421
8baaa403
APS
3422int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3423 u8 type)
3424{
3425 struct bdaddr_list_with_flags *entry;
3426
3427 if (!bacmp(bdaddr, BDADDR_ANY)) {
3428 hci_bdaddr_list_clear(list);
3429 return 0;
3430 }
3431
3432 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3433 if (!entry)
3434 return -ENOENT;
3435
3436 list_del(&entry->list);
3437 kfree(entry);
3438
3439 return 0;
3440}
3441
15819a70
AG
3442/* This function requires the caller holds hdev->lock */
3443struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3444 bdaddr_t *addr, u8 addr_type)
3445{
3446 struct hci_conn_params *params;
3447
3448 list_for_each_entry(params, &hdev->le_conn_params, list) {
3449 if (bacmp(&params->addr, addr) == 0 &&
3450 params->addr_type == addr_type) {
3451 return params;
3452 }
3453 }
3454
3455 return NULL;
3456}
3457
4b10966f 3458/* This function requires the caller holds hdev->lock */
501f8827
JH
3459struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3460 bdaddr_t *addr, u8 addr_type)
a9b0a04c 3461{
912b42ef 3462 struct hci_conn_params *param;
a9b0a04c 3463
6540351e
MH
3464 switch (addr_type) {
3465 case ADDR_LE_DEV_PUBLIC_RESOLVED:
3466 addr_type = ADDR_LE_DEV_PUBLIC;
3467 break;
3468 case ADDR_LE_DEV_RANDOM_RESOLVED:
3469 addr_type = ADDR_LE_DEV_RANDOM;
3470 break;
3471 }
3472
501f8827 3473 list_for_each_entry(param, list, action) {
912b42ef
JH
3474 if (bacmp(&param->addr, addr) == 0 &&
3475 param->addr_type == addr_type)
3476 return param;
4b10966f
MH
3477 }
3478
3479 return NULL;
a9b0a04c
AG
3480}
3481
15819a70 3482/* This function requires the caller holds hdev->lock */
51d167c0
MH
3483struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3484 bdaddr_t *addr, u8 addr_type)
15819a70
AG
3485{
3486 struct hci_conn_params *params;
3487
3488 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 3489 if (params)
51d167c0 3490 return params;
15819a70
AG
3491
3492 params = kzalloc(sizeof(*params), GFP_KERNEL);
3493 if (!params) {
2064ee33 3494 bt_dev_err(hdev, "out of memory");
51d167c0 3495 return NULL;
15819a70
AG
3496 }
3497
3498 bacpy(&params->addr, addr);
3499 params->addr_type = addr_type;
cef952ce
AG
3500
3501 list_add(&params->list, &hdev->le_conn_params);
93450c75 3502 INIT_LIST_HEAD(&params->action);
cef952ce 3503
bf5b3c8b
MH
3504 params->conn_min_interval = hdev->le_conn_min_interval;
3505 params->conn_max_interval = hdev->le_conn_max_interval;
3506 params->conn_latency = hdev->le_conn_latency;
3507 params->supervision_timeout = hdev->le_supv_timeout;
3508 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3509
3510 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3511
51d167c0 3512 return params;
bf5b3c8b
MH
3513}
3514
f6c63249 3515static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 3516{
f8aaf9b6 3517 if (params->conn) {
f161dd41 3518 hci_conn_drop(params->conn);
f8aaf9b6
JH
3519 hci_conn_put(params->conn);
3520 }
f161dd41 3521
95305baa 3522 list_del(&params->action);
15819a70
AG
3523 list_del(&params->list);
3524 kfree(params);
f6c63249
JH
3525}
3526
3527/* This function requires the caller holds hdev->lock */
3528void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3529{
3530 struct hci_conn_params *params;
3531
3532 params = hci_conn_params_lookup(hdev, addr, addr_type);
3533 if (!params)
3534 return;
3535
3536 hci_conn_params_free(params);
15819a70 3537
95305baa
JH
3538 hci_update_background_scan(hdev);
3539
15819a70
AG
3540 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3541}
3542
3543/* This function requires the caller holds hdev->lock */
55af49a8 3544void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3545{
3546 struct hci_conn_params *params, *tmp;
3547
3548 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3549 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3550 continue;
f75113a2 3551
91641b79 3552 /* If trying to establish one time connection to disabled
f75113a2
JP
3553 * device, leave the params, but mark them as just once.
3554 */
3555 if (params->explicit_connect) {
3556 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3557 continue;
3558 }
3559
15819a70
AG
3560 list_del(&params->list);
3561 kfree(params);
3562 }
3563
55af49a8 3564 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3565}
3566
3567/* This function requires the caller holds hdev->lock */
030e7f81 3568static void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3569{
15819a70 3570 struct hci_conn_params *params, *tmp;
77a77a30 3571
f6c63249
JH
3572 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3573 hci_conn_params_free(params);
77a77a30 3574
15819a70 3575 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3576}
3577
a1f4c318
JH
3578/* Copy the Identity Address of the controller.
3579 *
3580 * If the controller has a public BD_ADDR, then by default use that one.
3581 * If this is a LE only controller without a public address, default to
3582 * the static random address.
3583 *
3584 * For debugging purposes it is possible to force controllers with a
3585 * public address to use the static random address instead.
50b5b952
MH
3586 *
3587 * In case BR/EDR has been disabled on a dual-mode controller and
3588 * userspace has configured a static address, then that address
3589 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
3590 */
3591void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3592 u8 *bdaddr_type)
3593{
b7cb93e5 3594 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 3595 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 3596 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 3597 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
3598 bacpy(bdaddr, &hdev->static_addr);
3599 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3600 } else {
3601 bacpy(bdaddr, &hdev->bdaddr);
3602 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3603 }
3604}
3605
0e995280
APS
3606static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3607{
3608 int i;
3609
3610 for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3611 clear_bit(i, hdev->suspend_tasks);
3612
3613 wake_up(&hdev->suspend_wait_q);
3614}
3615
9952d90e
APS
3616static int hci_suspend_wait_event(struct hci_dev *hdev)
3617{
3618#define WAKE_COND \
3619 (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
3620 __SUSPEND_NUM_TASKS)
3621
3622 int i;
3623 int ret = wait_event_timeout(hdev->suspend_wait_q,
3624 WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3625
3626 if (ret == 0) {
a9ec8423 3627 bt_dev_err(hdev, "Timed out waiting for suspend events");
9952d90e
APS
3628 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3629 if (test_bit(i, hdev->suspend_tasks))
a9ec8423 3630 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
9952d90e
APS
3631 clear_bit(i, hdev->suspend_tasks);
3632 }
3633
3634 ret = -ETIMEDOUT;
3635 } else {
3636 ret = 0;
3637 }
3638
3639 return ret;
3640}
3641
3642static void hci_prepare_suspend(struct work_struct *work)
3643{
3644 struct hci_dev *hdev =
3645 container_of(work, struct hci_dev, suspend_prepare);
3646
3647 hci_dev_lock(hdev);
3648 hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3649 hci_dev_unlock(hdev);
3650}
3651
8731840a
APS
3652static int hci_change_suspend_state(struct hci_dev *hdev,
3653 enum suspended_state next)
3654{
3655 hdev->suspend_state_next = next;
3656 set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3657 queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3658 return hci_suspend_wait_event(hdev);
3659}
3660
2f20216c
APS
3661static void hci_clear_wake_reason(struct hci_dev *hdev)
3662{
3663 hci_dev_lock(hdev);
3664
3665 hdev->wake_reason = 0;
3666 bacpy(&hdev->wake_addr, BDADDR_ANY);
3667 hdev->wake_addr_type = 0;
3668
3669 hci_dev_unlock(hdev);
3670}
3671
9952d90e
APS
3672static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3673 void *data)
3674{
3675 struct hci_dev *hdev =
3676 container_of(nb, struct hci_dev, suspend_notifier);
3677 int ret = 0;
2f20216c 3678 u8 state = BT_RUNNING;
9952d90e
APS
3679
3680 /* If powering down, wait for completion. */
3681 if (mgmt_powering_down(hdev)) {
3682 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3683 ret = hci_suspend_wait_event(hdev);
3684 if (ret)
3685 goto done;
3686 }
3687
3688 /* Suspend notifier should only act on events when powered. */
5ff20cbe
VS
3689 if (!hdev_is_powered(hdev) ||
3690 hci_dev_test_flag(hdev, HCI_UNREGISTER))
9952d90e
APS
3691 goto done;
3692
3693 if (action == PM_SUSPEND_PREPARE) {
4f40afc6
APS
3694 /* Suspend consists of two actions:
3695 * - First, disconnect everything and make the controller not
3696 * connectable (disabling scanning)
3697 * - Second, program event filter/whitelist and enable scan
3698 */
8731840a 3699 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
2f20216c
APS
3700 if (!ret)
3701 state = BT_SUSPEND_DISCONNECT;
4f40afc6 3702
81dafad5
APS
3703 /* Only configure whitelist if disconnect succeeded and wake
3704 * isn't being prevented.
3705 */
2f20216c 3706 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
8731840a 3707 ret = hci_change_suspend_state(hdev,
0d2c9825 3708 BT_SUSPEND_CONFIGURE_WAKE);
2f20216c
APS
3709 if (!ret)
3710 state = BT_SUSPEND_CONFIGURE_WAKE;
3711 }
3712
3713 hci_clear_wake_reason(hdev);
3714 mgmt_suspending(hdev, state);
3715
9952d90e 3716 } else if (action == PM_POST_SUSPEND) {
8731840a 3717 ret = hci_change_suspend_state(hdev, BT_RUNNING);
2f20216c
APS
3718
3719 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3720 hdev->wake_addr_type);
9952d90e
APS
3721 }
3722
3723done:
a9ec8423
APS
3724 /* We always allow suspend even if suspend preparation failed and
3725 * attempt to recover in resume.
3726 */
3727 if (ret)
3728 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3729 action, ret);
3730
24b06572 3731 return NOTIFY_DONE;
9952d90e 3732}
8731840a 3733
9be0dab7
DH
3734/* Alloc HCI device */
3735struct hci_dev *hci_alloc_dev(void)
3736{
3737 struct hci_dev *hdev;
3738
27f70f3e 3739 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3740 if (!hdev)
3741 return NULL;
3742
b1b813d4
DH
3743 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3744 hdev->esco_type = (ESCO_HV1);
3745 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3746 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3747 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3748 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3749 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3750 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
3751 hdev->adv_instance_cnt = 0;
3752 hdev->cur_adv_instance = 0x00;
5d900e46 3753 hdev->adv_instance_timeout = 0;
b1b813d4 3754
c4f1f408
HC
3755 hdev->advmon_allowlist_duration = 300;
3756 hdev->advmon_no_filter_duration = 500;
80af16a3 3757 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
c4f1f408 3758
b1b813d4
DH
3759 hdev->sniff_max_interval = 800;
3760 hdev->sniff_min_interval = 80;
3761
3f959d46 3762 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3763 hdev->le_adv_min_interval = 0x0800;
3764 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3765 hdev->le_scan_interval = 0x0060;
3766 hdev->le_scan_window = 0x0030;
10873f99
AM
3767 hdev->le_scan_int_suspend = 0x0400;
3768 hdev->le_scan_window_suspend = 0x0012;
3769 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3770 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
ba29d036
MH
3771 hdev->le_scan_int_adv_monitor = 0x0060;
3772 hdev->le_scan_window_adv_monitor = 0x0030;
10873f99
AM
3773 hdev->le_scan_int_connect = 0x0060;
3774 hdev->le_scan_window_connect = 0x0060;
b48c3b59
JH
3775 hdev->le_conn_min_interval = 0x0018;
3776 hdev->le_conn_max_interval = 0x0028;
04fb7d90
MH
3777 hdev->le_conn_latency = 0x0000;
3778 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3779 hdev->le_def_tx_len = 0x001b;
3780 hdev->le_def_tx_time = 0x0148;
3781 hdev->le_max_tx_len = 0x001b;
3782 hdev->le_max_tx_time = 0x0148;
3783 hdev->le_max_rx_len = 0x001b;
3784 hdev->le_max_rx_time = 0x0148;
30d65e08
MK
3785 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3786 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
6decb5b4
JK
3787 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3788 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
1d0fac2c 3789 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
10873f99 3790 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
49b020c1 3791 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
7c395ea5
DW
3792 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3793 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
bef64738 3794
d6bfd59c 3795 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3796 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3797 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3798 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
302975cb 3799 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
58a96fc3 3800 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
d6bfd59c 3801
10873f99
AM
3802 /* default 1.28 sec page scan */
3803 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3804 hdev->def_page_scan_int = 0x0800;
3805 hdev->def_page_scan_window = 0x0012;
3806
b1b813d4
DH
3807 mutex_init(&hdev->lock);
3808 mutex_init(&hdev->req_lock);
3809
3810 INIT_LIST_HEAD(&hdev->mgmt_pending);
3811 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3812 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3813 INIT_LIST_HEAD(&hdev->uuids);
3814 INIT_LIST_HEAD(&hdev->link_keys);
3815 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3816 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3817 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3818 INIT_LIST_HEAD(&hdev->le_white_list);
cfdb0c2d 3819 INIT_LIST_HEAD(&hdev->le_resolv_list);
15819a70 3820 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3821 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3822 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3823 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3824 INIT_LIST_HEAD(&hdev->adv_instances);
600a8749 3825 INIT_LIST_HEAD(&hdev->blocked_keys);
b1b813d4
DH
3826
3827 INIT_WORK(&hdev->rx_work, hci_rx_work);
3828 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3829 INIT_WORK(&hdev->tx_work, hci_tx_work);
3830 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3831 INIT_WORK(&hdev->error_reset, hci_error_reset);
9952d90e 3832 INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
b1b813d4 3833
b1b813d4 3834 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
b1b813d4 3835
b1b813d4
DH
3836 skb_queue_head_init(&hdev->rx_q);
3837 skb_queue_head_init(&hdev->cmd_q);
3838 skb_queue_head_init(&hdev->raw_q);
3839
3840 init_waitqueue_head(&hdev->req_wait_q);
9952d90e 3841 init_waitqueue_head(&hdev->suspend_wait_q);
b1b813d4 3842
65cc2b49 3843 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3844
5fc16cc4
JH
3845 hci_request_setup(hdev);
3846
b1b813d4
DH
3847 hci_init_sysfs(hdev);
3848 discovery_init(hdev);
9be0dab7
DH
3849
3850 return hdev;
3851}
3852EXPORT_SYMBOL(hci_alloc_dev);
3853
3854/* Free HCI device */
3855void hci_free_dev(struct hci_dev *hdev)
3856{
9be0dab7
DH
3857 /* will free via device release */
3858 put_device(&hdev->dev);
3859}
3860EXPORT_SYMBOL(hci_free_dev);
3861
1da177e4
LT
3862/* Register HCI device */
3863int hci_register_dev(struct hci_dev *hdev)
3864{
b1b813d4 3865 int id, error;
1da177e4 3866
74292d5a 3867 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3868 return -EINVAL;
3869
08add513
MM
3870 /* Do not allow HCI_AMP devices to register at index 0,
3871 * so the index can be used as the AMP controller ID.
3872 */
3df92b31 3873 switch (hdev->dev_type) {
ca8bee5d 3874 case HCI_PRIMARY:
3df92b31
SL
3875 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3876 break;
3877 case HCI_AMP:
3878 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3879 break;
3880 default:
3881 return -EINVAL;
1da177e4 3882 }
8e87d142 3883
3df92b31
SL
3884 if (id < 0)
3885 return id;
3886
1da177e4
LT
3887 sprintf(hdev->name, "hci%d", id);
3888 hdev->id = id;
2d8b3a11
AE
3889
3890 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3891
29e2dd0d 3892 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
33ca954d
DH
3893 if (!hdev->workqueue) {
3894 error = -ENOMEM;
3895 goto err;
3896 }
f48fd9c8 3897
29e2dd0d
TH
3898 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3899 hdev->name);
6ead1bbc
JH
3900 if (!hdev->req_workqueue) {
3901 destroy_workqueue(hdev->workqueue);
3902 error = -ENOMEM;
3903 goto err;
3904 }
3905
0153e2ec
MH
3906 if (!IS_ERR_OR_NULL(bt_debugfs))
3907 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3908
bdc3e0f1
MH
3909 dev_set_name(&hdev->dev, "%s", hdev->name);
3910
3911 error = device_add(&hdev->dev);
33ca954d 3912 if (error < 0)
54506918 3913 goto err_wqueue;
1da177e4 3914
6d5d2ee6
HK
3915 hci_leds_init(hdev);
3916
611b30f7 3917 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3918 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3919 hdev);
611b30f7
MH
3920 if (hdev->rfkill) {
3921 if (rfkill_register(hdev->rfkill) < 0) {
3922 rfkill_destroy(hdev->rfkill);
3923 hdev->rfkill = NULL;
3924 }
3925 }
3926
5e130367 3927 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3928 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3929
a1536da2
MH
3930 hci_dev_set_flag(hdev, HCI_SETUP);
3931 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3932
ca8bee5d 3933 if (hdev->dev_type == HCI_PRIMARY) {
56f87901
JH
3934 /* Assume BR/EDR support until proven otherwise (such as
3935 * through reading supported features during init.
3936 */
a1536da2 3937 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3938 }
ce2be9ac 3939
fcee3377
GP
3940 write_lock(&hci_dev_list_lock);
3941 list_add(&hdev->list, &hci_dev_list);
3942 write_unlock(&hci_dev_list_lock);
3943
4a964404
MH
3944 /* Devices that are marked for raw-only usage are unconfigured
3945 * and should not be included in normal operation.
fee746b0
MH
3946 */
3947 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3948 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3949
05fcd4c4 3950 hci_sock_dev_event(hdev, HCI_DEV_REG);
dc946bd8 3951 hci_dev_hold(hdev);
1da177e4 3952
219991e6
HG
3953 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3954 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3955 error = register_pm_notifier(&hdev->suspend_notifier);
3956 if (error)
3957 goto err_wqueue;
3958 }
9952d90e 3959
19202573 3960 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3961
e5e1e7fd
MC
3962 idr_init(&hdev->adv_monitors_idr);
3963
1da177e4 3964 return id;
f48fd9c8 3965
33ca954d
DH
3966err_wqueue:
3967 destroy_workqueue(hdev->workqueue);
6ead1bbc 3968 destroy_workqueue(hdev->req_workqueue);
33ca954d 3969err:
3df92b31 3970 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3971
33ca954d 3972 return error;
1da177e4
LT
3973}
3974EXPORT_SYMBOL(hci_register_dev);
3975
3976/* Unregister HCI device */
59735631 3977void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3978{
2d7cc19e 3979 int id;
ef222013 3980
c13854ce 3981 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3982
a1536da2 3983 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3984
3df92b31
SL
3985 id = hdev->id;
3986
f20d09d5 3987 write_lock(&hci_dev_list_lock);
1da177e4 3988 list_del(&hdev->list);
f20d09d5 3989 write_unlock(&hci_dev_list_lock);
1da177e4 3990
b9b5ef18
GP
3991 cancel_work_sync(&hdev->power_on);
3992
219991e6
HG
3993 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3994 hci_suspend_clear_tasks(hdev);
3995 unregister_pm_notifier(&hdev->suspend_notifier);
3996 cancel_work_sync(&hdev->suspend_prepare);
3997 }
4e8c36c3
APS
3998
3999 hci_dev_do_close(hdev);
9952d90e 4000
ab81cbf9 4001 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
4002 !hci_dev_test_flag(hdev, HCI_SETUP) &&
4003 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 4004 hci_dev_lock(hdev);
744cf19e 4005 mgmt_index_removed(hdev);
09fd0de5 4006 hci_dev_unlock(hdev);
56e5cb86 4007 }
ab81cbf9 4008
2e58ef3e
JH
4009 /* mgmt_index_removed should take care of emptying the
4010 * pending list */
4011 BUG_ON(!list_empty(&hdev->mgmt_pending));
4012
05fcd4c4 4013 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
1da177e4 4014
611b30f7
MH
4015 if (hdev->rfkill) {
4016 rfkill_unregister(hdev->rfkill);
4017 rfkill_destroy(hdev->rfkill);
4018 }
4019
bdc3e0f1 4020 device_del(&hdev->dev);
147e2d59 4021
0153e2ec 4022 debugfs_remove_recursive(hdev->debugfs);
5177a838
MH
4023 kfree_const(hdev->hw_info);
4024 kfree_const(hdev->fw_info);
0153e2ec 4025
f48fd9c8 4026 destroy_workqueue(hdev->workqueue);
6ead1bbc 4027 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4028
09fd0de5 4029 hci_dev_lock(hdev);
dcc36c16 4030 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 4031 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 4032 hci_uuids_clear(hdev);
55ed8ca1 4033 hci_link_keys_clear(hdev);
b899efaf 4034 hci_smp_ltks_clear(hdev);
970c4e46 4035 hci_smp_irks_clear(hdev);
2763eda6 4036 hci_remote_oob_data_clear(hdev);
d2609b34 4037 hci_adv_instances_clear(hdev);
e5e1e7fd 4038 hci_adv_monitors_clear(hdev);
dcc36c16 4039 hci_bdaddr_list_clear(&hdev->le_white_list);
cfdb0c2d 4040 hci_bdaddr_list_clear(&hdev->le_resolv_list);
373110c5 4041 hci_conn_params_clear_all(hdev);
22078800 4042 hci_discovery_filter_clear(hdev);
600a8749 4043 hci_blocked_keys_clear(hdev);
09fd0de5 4044 hci_dev_unlock(hdev);
e2e0cacb 4045
dc946bd8 4046 hci_dev_put(hdev);
3df92b31
SL
4047
4048 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4049}
4050EXPORT_SYMBOL(hci_unregister_dev);
4051
4052/* Suspend HCI device */
4053int hci_suspend_dev(struct hci_dev *hdev)
4054{
05fcd4c4 4055 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
1da177e4
LT
4056 return 0;
4057}
4058EXPORT_SYMBOL(hci_suspend_dev);
4059
4060/* Resume HCI device */
4061int hci_resume_dev(struct hci_dev *hdev)
4062{
05fcd4c4 4063 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
1da177e4
LT
4064 return 0;
4065}
4066EXPORT_SYMBOL(hci_resume_dev);
4067
75e0569f
MH
4068/* Reset HCI device */
4069int hci_reset_dev(struct hci_dev *hdev)
4070{
1e4b6e91 4071 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
75e0569f
MH
4072 struct sk_buff *skb;
4073
4074 skb = bt_skb_alloc(3, GFP_ATOMIC);
4075 if (!skb)
4076 return -ENOMEM;
4077
d79f34e3 4078 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
59ae1d12 4079 skb_put_data(skb, hw_err, 3);
75e0569f
MH
4080
4081 /* Send Hardware Error to upper stack */
4082 return hci_recv_frame(hdev, skb);
4083}
4084EXPORT_SYMBOL(hci_reset_dev);
4085
76bca880 4086/* Receive frame from HCI drivers */
e1a26170 4087int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4088{
76bca880 4089 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4090 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4091 kfree_skb(skb);
4092 return -ENXIO;
4093 }
4094
d79f34e3
MH
4095 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4096 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
cc974003
MH
4097 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4098 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
fe806dce
MH
4099 kfree_skb(skb);
4100 return -EINVAL;
4101 }
4102
d82603c6 4103 /* Incoming skb */
76bca880
MH
4104 bt_cb(skb)->incoming = 1;
4105
4106 /* Time stamp */
4107 __net_timestamp(skb);
4108
76bca880 4109 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4110 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4111
76bca880
MH
4112 return 0;
4113}
4114EXPORT_SYMBOL(hci_recv_frame);
4115
e875ff84
MH
4116/* Receive diagnostic message from HCI drivers */
4117int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4118{
581d6fd6 4119 /* Mark as diagnostic packet */
d79f34e3 4120 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
581d6fd6 4121
e875ff84
MH
4122 /* Time stamp */
4123 __net_timestamp(skb);
4124
581d6fd6
MH
4125 skb_queue_tail(&hdev->rx_q, skb);
4126 queue_work(hdev->workqueue, &hdev->rx_work);
e875ff84 4127
e875ff84
MH
4128 return 0;
4129}
4130EXPORT_SYMBOL(hci_recv_diag);
4131
5177a838
MH
4132void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4133{
4134 va_list vargs;
4135
4136 va_start(vargs, fmt);
4137 kfree_const(hdev->hw_info);
4138 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4139 va_end(vargs);
4140}
4141EXPORT_SYMBOL(hci_set_hw_info);
4142
4143void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4144{
4145 va_list vargs;
4146
4147 va_start(vargs, fmt);
4148 kfree_const(hdev->fw_info);
4149 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4150 va_end(vargs);
4151}
4152EXPORT_SYMBOL(hci_set_fw_info);
4153
1da177e4
LT
4154/* ---- Interface to upper protocols ---- */
4155
1da177e4
LT
4156int hci_register_cb(struct hci_cb *cb)
4157{
4158 BT_DBG("%p name %s", cb, cb->name);
4159
fba7ecf0 4160 mutex_lock(&hci_cb_list_lock);
00629e0f 4161 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 4162 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
4163
4164 return 0;
4165}
4166EXPORT_SYMBOL(hci_register_cb);
4167
4168int hci_unregister_cb(struct hci_cb *cb)
4169{
4170 BT_DBG("%p name %s", cb, cb->name);
4171
fba7ecf0 4172 mutex_lock(&hci_cb_list_lock);
1da177e4 4173 list_del(&cb->list);
fba7ecf0 4174 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
4175
4176 return 0;
4177}
4178EXPORT_SYMBOL(hci_unregister_cb);
4179
51086991 4180static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4181{
cdc52faa
MH
4182 int err;
4183
d79f34e3
MH
4184 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4185 skb->len);
1da177e4 4186
cd82e61c
MH
4187 /* Time stamp */
4188 __net_timestamp(skb);
1da177e4 4189
cd82e61c
MH
4190 /* Send copy to monitor */
4191 hci_send_to_monitor(hdev, skb);
4192
4193 if (atomic_read(&hdev->promisc)) {
4194 /* Send copy to the sockets */
470fe1b5 4195 hci_send_to_sock(hdev, skb);
1da177e4
LT
4196 }
4197
4198 /* Get rid of skb owner, prior to sending to the driver. */
4199 skb_orphan(skb);
4200
73d0d3c8
MH
4201 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4202 kfree_skb(skb);
4203 return;
4204 }
4205
cdc52faa
MH
4206 err = hdev->send(hdev, skb);
4207 if (err < 0) {
2064ee33 4208 bt_dev_err(hdev, "sending frame failed (%d)", err);
cdc52faa
MH
4209 kfree_skb(skb);
4210 }
1da177e4
LT
4211}
4212
1ca3a9d0 4213/* Send HCI command */
07dc93dd
JH
4214int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4215 const void *param)
1ca3a9d0
JH
4216{
4217 struct sk_buff *skb;
4218
4219 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4220
4221 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4222 if (!skb) {
2064ee33 4223 bt_dev_err(hdev, "no memory for command");
1ca3a9d0
JH
4224 return -ENOMEM;
4225 }
4226
49c922bb 4227 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
4228 * single-command requests.
4229 */
44d27137 4230 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
11714b3d 4231
1da177e4 4232 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4233 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4234
4235 return 0;
4236}
1da177e4 4237
d6ee6ad7
LP
4238int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4239 const void *param)
4240{
4241 struct sk_buff *skb;
4242
4243 if (hci_opcode_ogf(opcode) != 0x3f) {
4244 /* A controller receiving a command shall respond with either
4245 * a Command Status Event or a Command Complete Event.
4246 * Therefore, all standard HCI commands must be sent via the
4247 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4248 * Some vendors do not comply with this rule for vendor-specific
4249 * commands and do not return any event. We want to support
4250 * unresponded commands for such cases only.
4251 */
4252 bt_dev_err(hdev, "unresponded command not supported");
4253 return -EINVAL;
4254 }
4255
4256 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4257 if (!skb) {
4258 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4259 opcode);
4260 return -ENOMEM;
4261 }
4262
4263 hci_send_frame(hdev, skb);
4264
4265 return 0;
4266}
4267EXPORT_SYMBOL(__hci_cmd_send);
4268
1da177e4 4269/* Get data from the previously sent command */
a9de9248 4270void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4271{
4272 struct hci_command_hdr *hdr;
4273
4274 if (!hdev->sent_cmd)
4275 return NULL;
4276
4277 hdr = (void *) hdev->sent_cmd->data;
4278
a9de9248 4279 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4280 return NULL;
4281
f0e09510 4282 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4283
4284 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4285}
4286
91641b79 4287/* Send HCI command and wait for command complete event */
fbef168f
LP
4288struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4289 const void *param, u32 timeout)
4290{
4291 struct sk_buff *skb;
4292
4293 if (!test_bit(HCI_UP, &hdev->flags))
4294 return ERR_PTR(-ENETDOWN);
4295
4296 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4297
b504430c 4298 hci_req_sync_lock(hdev);
fbef168f 4299 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
b504430c 4300 hci_req_sync_unlock(hdev);
fbef168f
LP
4301
4302 return skb;
4303}
4304EXPORT_SYMBOL(hci_cmd_sync);
4305
1da177e4
LT
4306/* Send ACL data */
4307static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4308{
4309 struct hci_acl_hdr *hdr;
4310 int len = skb->len;
4311
badff6d0
ACM
4312 skb_push(skb, HCI_ACL_HDR_SIZE);
4313 skb_reset_transport_header(skb);
9c70220b 4314 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4315 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4316 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4317}
4318
ee22be7e 4319static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4320 struct sk_buff *skb, __u16 flags)
1da177e4 4321{
ee22be7e 4322 struct hci_conn *conn = chan->conn;
1da177e4
LT
4323 struct hci_dev *hdev = conn->hdev;
4324 struct sk_buff *list;
4325
087bfd99
GP
4326 skb->len = skb_headlen(skb);
4327 skb->data_len = 0;
4328
d79f34e3 4329 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
204a6e54
AE
4330
4331 switch (hdev->dev_type) {
ca8bee5d 4332 case HCI_PRIMARY:
204a6e54
AE
4333 hci_add_acl_hdr(skb, conn->handle, flags);
4334 break;
4335 case HCI_AMP:
4336 hci_add_acl_hdr(skb, chan->handle, flags);
4337 break;
4338 default:
2064ee33 4339 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
204a6e54
AE
4340 return;
4341 }
087bfd99 4342
70f23020
AE
4343 list = skb_shinfo(skb)->frag_list;
4344 if (!list) {
1da177e4
LT
4345 /* Non fragmented */
4346 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4347
73d80deb 4348 skb_queue_tail(queue, skb);
1da177e4
LT
4349 } else {
4350 /* Fragmented */
4351 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4352
4353 skb_shinfo(skb)->frag_list = NULL;
4354
9cfd5a23
JR
4355 /* Queue all fragments atomically. We need to use spin_lock_bh
4356 * here because of 6LoWPAN links, as there this function is
4357 * called from softirq and using normal spin lock could cause
4358 * deadlocks.
4359 */
4360 spin_lock_bh(&queue->lock);
1da177e4 4361
73d80deb 4362 __skb_queue_tail(queue, skb);
e702112f
AE
4363
4364 flags &= ~ACL_START;
4365 flags |= ACL_CONT;
1da177e4
LT
4366 do {
4367 skb = list; list = list->next;
8e87d142 4368
d79f34e3 4369 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
e702112f 4370 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4371
4372 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4373
73d80deb 4374 __skb_queue_tail(queue, skb);
1da177e4
LT
4375 } while (list);
4376
9cfd5a23 4377 spin_unlock_bh(&queue->lock);
1da177e4 4378 }
73d80deb
LAD
4379}
4380
4381void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4382{
ee22be7e 4383 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4384
f0e09510 4385 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4386
ee22be7e 4387 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4388
3eff45ea 4389 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4390}
1da177e4
LT
4391
4392/* Send SCO data */
0d861d8b 4393void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4394{
4395 struct hci_dev *hdev = conn->hdev;
4396 struct hci_sco_hdr hdr;
4397
4398 BT_DBG("%s len %d", hdev->name, skb->len);
4399
aca3192c 4400 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4401 hdr.dlen = skb->len;
4402
badff6d0
ACM
4403 skb_push(skb, HCI_SCO_HDR_SIZE);
4404 skb_reset_transport_header(skb);
9c70220b 4405 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4406
d79f34e3 4407 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
c78ae283 4408
1da177e4 4409 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4410 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4411}
1da177e4
LT
4412
4413/* ---- HCI TX task (outgoing data) ---- */
4414
4415/* HCI Connection scheduler */
6039aa73
GP
4416static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4417 int *quote)
1da177e4
LT
4418{
4419 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4420 struct hci_conn *conn = NULL, *c;
abc5de8f 4421 unsigned int num = 0, min = ~0;
1da177e4 4422
8e87d142 4423 /* We don't have to lock device here. Connections are always
1da177e4 4424 * added and removed with TX task disabled. */
bf4c6325
GP
4425
4426 rcu_read_lock();
4427
4428 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4429 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4430 continue;
769be974
MH
4431
4432 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4433 continue;
4434
1da177e4
LT
4435 num++;
4436
4437 if (c->sent < min) {
4438 min = c->sent;
4439 conn = c;
4440 }
52087a79
LAD
4441
4442 if (hci_conn_num(hdev, type) == num)
4443 break;
1da177e4
LT
4444 }
4445
bf4c6325
GP
4446 rcu_read_unlock();
4447
1da177e4 4448 if (conn) {
6ed58ec5
VT
4449 int cnt, q;
4450
4451 switch (conn->type) {
4452 case ACL_LINK:
4453 cnt = hdev->acl_cnt;
4454 break;
4455 case SCO_LINK:
4456 case ESCO_LINK:
4457 cnt = hdev->sco_cnt;
4458 break;
4459 case LE_LINK:
4460 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4461 break;
4462 default:
4463 cnt = 0;
2064ee33 4464 bt_dev_err(hdev, "unknown link type %d", conn->type);
6ed58ec5
VT
4465 }
4466
4467 q = cnt / num;
1da177e4
LT
4468 *quote = q ? q : 1;
4469 } else
4470 *quote = 0;
4471
4472 BT_DBG("conn %p quote %d", conn, *quote);
4473 return conn;
4474}
4475
6039aa73 4476static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4477{
4478 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4479 struct hci_conn *c;
1da177e4 4480
2064ee33 4481 bt_dev_err(hdev, "link tx timeout");
1da177e4 4482
bf4c6325
GP
4483 rcu_read_lock();
4484
1da177e4 4485 /* Kill stalled connections */
bf4c6325 4486 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4487 if (c->type == type && c->sent) {
2064ee33
MH
4488 bt_dev_err(hdev, "killing stalled connection %pMR",
4489 &c->dst);
bed71748 4490 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4491 }
4492 }
bf4c6325
GP
4493
4494 rcu_read_unlock();
1da177e4
LT
4495}
4496
6039aa73
GP
4497static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4498 int *quote)
1da177e4 4499{
73d80deb
LAD
4500 struct hci_conn_hash *h = &hdev->conn_hash;
4501 struct hci_chan *chan = NULL;
abc5de8f 4502 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4503 struct hci_conn *conn;
73d80deb
LAD
4504 int cnt, q, conn_num = 0;
4505
4506 BT_DBG("%s", hdev->name);
4507
bf4c6325
GP
4508 rcu_read_lock();
4509
4510 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4511 struct hci_chan *tmp;
4512
4513 if (conn->type != type)
4514 continue;
4515
4516 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4517 continue;
4518
4519 conn_num++;
4520
8192edef 4521 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4522 struct sk_buff *skb;
4523
4524 if (skb_queue_empty(&tmp->data_q))
4525 continue;
4526
4527 skb = skb_peek(&tmp->data_q);
4528 if (skb->priority < cur_prio)
4529 continue;
4530
4531 if (skb->priority > cur_prio) {
4532 num = 0;
4533 min = ~0;
4534 cur_prio = skb->priority;
4535 }
4536
4537 num++;
4538
4539 if (conn->sent < min) {
4540 min = conn->sent;
4541 chan = tmp;
4542 }
4543 }
4544
4545 if (hci_conn_num(hdev, type) == conn_num)
4546 break;
4547 }
4548
bf4c6325
GP
4549 rcu_read_unlock();
4550
73d80deb
LAD
4551 if (!chan)
4552 return NULL;
4553
4554 switch (chan->conn->type) {
4555 case ACL_LINK:
4556 cnt = hdev->acl_cnt;
4557 break;
bd1eb66b
AE
4558 case AMP_LINK:
4559 cnt = hdev->block_cnt;
4560 break;
73d80deb
LAD
4561 case SCO_LINK:
4562 case ESCO_LINK:
4563 cnt = hdev->sco_cnt;
4564 break;
4565 case LE_LINK:
4566 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4567 break;
4568 default:
4569 cnt = 0;
2064ee33 4570 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
73d80deb
LAD
4571 }
4572
4573 q = cnt / num;
4574 *quote = q ? q : 1;
4575 BT_DBG("chan %p quote %d", chan, *quote);
4576 return chan;
4577}
4578
02b20f0b
LAD
4579static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4580{
4581 struct hci_conn_hash *h = &hdev->conn_hash;
4582 struct hci_conn *conn;
4583 int num = 0;
4584
4585 BT_DBG("%s", hdev->name);
4586
bf4c6325
GP
4587 rcu_read_lock();
4588
4589 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4590 struct hci_chan *chan;
4591
4592 if (conn->type != type)
4593 continue;
4594
4595 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4596 continue;
4597
4598 num++;
4599
8192edef 4600 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4601 struct sk_buff *skb;
4602
4603 if (chan->sent) {
4604 chan->sent = 0;
4605 continue;
4606 }
4607
4608 if (skb_queue_empty(&chan->data_q))
4609 continue;
4610
4611 skb = skb_peek(&chan->data_q);
4612 if (skb->priority >= HCI_PRIO_MAX - 1)
4613 continue;
4614
4615 skb->priority = HCI_PRIO_MAX - 1;
4616
4617 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4618 skb->priority);
02b20f0b
LAD
4619 }
4620
4621 if (hci_conn_num(hdev, type) == num)
4622 break;
4623 }
bf4c6325
GP
4624
4625 rcu_read_unlock();
4626
02b20f0b
LAD
4627}
4628
b71d385a
AE
4629static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4630{
4631 /* Calculate count of blocks used by this packet */
4632 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4633}
4634
6039aa73 4635static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4636{
d7a5a11d 4637 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
4638 /* ACL tx timeout must be longer than maximum
4639 * link supervision timeout (40.9 seconds) */
63d2bc1b 4640 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4641 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4642 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4643 }
63d2bc1b 4644}
1da177e4 4645
7fedd3bb
APS
4646/* Schedule SCO */
4647static void hci_sched_sco(struct hci_dev *hdev)
4648{
4649 struct hci_conn *conn;
4650 struct sk_buff *skb;
4651 int quote;
4652
4653 BT_DBG("%s", hdev->name);
4654
4655 if (!hci_conn_num(hdev, SCO_LINK))
4656 return;
4657
4658 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4659 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4660 BT_DBG("skb %p len %d", skb, skb->len);
4661 hci_send_frame(hdev, skb);
4662
4663 conn->sent++;
4664 if (conn->sent == ~0)
4665 conn->sent = 0;
4666 }
4667 }
4668}
4669
4670static void hci_sched_esco(struct hci_dev *hdev)
4671{
4672 struct hci_conn *conn;
4673 struct sk_buff *skb;
4674 int quote;
4675
4676 BT_DBG("%s", hdev->name);
4677
4678 if (!hci_conn_num(hdev, ESCO_LINK))
4679 return;
4680
4681 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4682 &quote))) {
4683 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4684 BT_DBG("skb %p len %d", skb, skb->len);
4685 hci_send_frame(hdev, skb);
4686
4687 conn->sent++;
4688 if (conn->sent == ~0)
4689 conn->sent = 0;
4690 }
4691 }
4692}
4693
6039aa73 4694static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4695{
4696 unsigned int cnt = hdev->acl_cnt;
4697 struct hci_chan *chan;
4698 struct sk_buff *skb;
4699 int quote;
4700
4701 __check_timeout(hdev, cnt);
04837f64 4702
73d80deb 4703 while (hdev->acl_cnt &&
a8c5fb1a 4704 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4705 u32 priority = (skb_peek(&chan->data_q))->priority;
4706 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4707 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4708 skb->len, skb->priority);
73d80deb 4709
ec1cce24
LAD
4710 /* Stop if priority has changed */
4711 if (skb->priority < priority)
4712 break;
4713
4714 skb = skb_dequeue(&chan->data_q);
4715
73d80deb 4716 hci_conn_enter_active_mode(chan->conn,
04124681 4717 bt_cb(skb)->force_active);
04837f64 4718
57d17d70 4719 hci_send_frame(hdev, skb);
1da177e4
LT
4720 hdev->acl_last_tx = jiffies;
4721
4722 hdev->acl_cnt--;
73d80deb
LAD
4723 chan->sent++;
4724 chan->conn->sent++;
7fedd3bb
APS
4725
4726 /* Send pending SCO packets right away */
4727 hci_sched_sco(hdev);
4728 hci_sched_esco(hdev);
1da177e4
LT
4729 }
4730 }
02b20f0b
LAD
4731
4732 if (cnt != hdev->acl_cnt)
4733 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4734}
4735
6039aa73 4736static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4737{
63d2bc1b 4738 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4739 struct hci_chan *chan;
4740 struct sk_buff *skb;
4741 int quote;
bd1eb66b 4742 u8 type;
b71d385a 4743
63d2bc1b 4744 __check_timeout(hdev, cnt);
b71d385a 4745
bd1eb66b
AE
4746 BT_DBG("%s", hdev->name);
4747
4748 if (hdev->dev_type == HCI_AMP)
4749 type = AMP_LINK;
4750 else
4751 type = ACL_LINK;
4752
b71d385a 4753 while (hdev->block_cnt > 0 &&
bd1eb66b 4754 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4755 u32 priority = (skb_peek(&chan->data_q))->priority;
4756 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4757 int blocks;
4758
4759 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4760 skb->len, skb->priority);
b71d385a
AE
4761
4762 /* Stop if priority has changed */
4763 if (skb->priority < priority)
4764 break;
4765
4766 skb = skb_dequeue(&chan->data_q);
4767
4768 blocks = __get_blocks(hdev, skb);
4769 if (blocks > hdev->block_cnt)
4770 return;
4771
4772 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4773 bt_cb(skb)->force_active);
b71d385a 4774
57d17d70 4775 hci_send_frame(hdev, skb);
b71d385a
AE
4776 hdev->acl_last_tx = jiffies;
4777
4778 hdev->block_cnt -= blocks;
4779 quote -= blocks;
4780
4781 chan->sent += blocks;
4782 chan->conn->sent += blocks;
4783 }
4784 }
4785
4786 if (cnt != hdev->block_cnt)
bd1eb66b 4787 hci_prio_recalculate(hdev, type);
b71d385a
AE
4788}
4789
6039aa73 4790static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4791{
4792 BT_DBG("%s", hdev->name);
4793
bd1eb66b 4794 /* No ACL link over BR/EDR controller */
ca8bee5d 4795 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
bd1eb66b
AE
4796 return;
4797
4798 /* No AMP link over AMP controller */
4799 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4800 return;
4801
4802 switch (hdev->flow_ctl_mode) {
4803 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4804 hci_sched_acl_pkt(hdev);
4805 break;
4806
4807 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4808 hci_sched_acl_blk(hdev);
4809 break;
4810 }
4811}
4812
6039aa73 4813static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4814{
73d80deb 4815 struct hci_chan *chan;
6ed58ec5 4816 struct sk_buff *skb;
02b20f0b 4817 int quote, cnt, tmp;
6ed58ec5
VT
4818
4819 BT_DBG("%s", hdev->name);
4820
52087a79
LAD
4821 if (!hci_conn_num(hdev, LE_LINK))
4822 return;
4823
6ed58ec5 4824 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1b1d29e5
LAD
4825
4826 __check_timeout(hdev, cnt);
4827
02b20f0b 4828 tmp = cnt;
73d80deb 4829 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4830 u32 priority = (skb_peek(&chan->data_q))->priority;
4831 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4832 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4833 skb->len, skb->priority);
6ed58ec5 4834
ec1cce24
LAD
4835 /* Stop if priority has changed */
4836 if (skb->priority < priority)
4837 break;
4838
4839 skb = skb_dequeue(&chan->data_q);
4840
57d17d70 4841 hci_send_frame(hdev, skb);
6ed58ec5
VT
4842 hdev->le_last_tx = jiffies;
4843
4844 cnt--;
73d80deb
LAD
4845 chan->sent++;
4846 chan->conn->sent++;
7fedd3bb
APS
4847
4848 /* Send pending SCO packets right away */
4849 hci_sched_sco(hdev);
4850 hci_sched_esco(hdev);
6ed58ec5
VT
4851 }
4852 }
73d80deb 4853
6ed58ec5
VT
4854 if (hdev->le_pkts)
4855 hdev->le_cnt = cnt;
4856 else
4857 hdev->acl_cnt = cnt;
02b20f0b
LAD
4858
4859 if (cnt != tmp)
4860 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4861}
4862
3eff45ea 4863static void hci_tx_work(struct work_struct *work)
1da177e4 4864{
3eff45ea 4865 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4866 struct sk_buff *skb;
4867
6ed58ec5 4868 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4869 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4870
d7a5a11d 4871 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e 4872 /* Schedule queues and send stuff to HCI driver */
52de599e
MH
4873 hci_sched_sco(hdev);
4874 hci_sched_esco(hdev);
7fedd3bb 4875 hci_sched_acl(hdev);
52de599e
MH
4876 hci_sched_le(hdev);
4877 }
6ed58ec5 4878
1da177e4
LT
4879 /* Send next queued raw (unknown type) packet */
4880 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4881 hci_send_frame(hdev, skb);
1da177e4
LT
4882}
4883
25985edc 4884/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4885
4886/* ACL data packet */
6039aa73 4887static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4888{
4889 struct hci_acl_hdr *hdr = (void *) skb->data;
4890 struct hci_conn *conn;
4891 __u16 handle, flags;
4892
4893 skb_pull(skb, HCI_ACL_HDR_SIZE);
4894
4895 handle = __le16_to_cpu(hdr->handle);
4896 flags = hci_flags(handle);
4897 handle = hci_handle(handle);
4898
f0e09510 4899 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4900 handle, flags);
1da177e4
LT
4901
4902 hdev->stat.acl_rx++;
4903
4904 hci_dev_lock(hdev);
4905 conn = hci_conn_hash_lookup_handle(hdev, handle);
4906 hci_dev_unlock(hdev);
8e87d142 4907
1da177e4 4908 if (conn) {
65983fc7 4909 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4910
1da177e4 4911 /* Send to upper protocol */
686ebf28
UF
4912 l2cap_recv_acldata(conn, skb, flags);
4913 return;
1da177e4 4914 } else {
2064ee33
MH
4915 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4916 handle);
1da177e4
LT
4917 }
4918
4919 kfree_skb(skb);
4920}
4921
4922/* SCO data packet */
6039aa73 4923static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4924{
4925 struct hci_sco_hdr *hdr = (void *) skb->data;
4926 struct hci_conn *conn;
debdedf2 4927 __u16 handle, flags;
1da177e4
LT
4928
4929 skb_pull(skb, HCI_SCO_HDR_SIZE);
4930
4931 handle = __le16_to_cpu(hdr->handle);
debdedf2
MH
4932 flags = hci_flags(handle);
4933 handle = hci_handle(handle);
1da177e4 4934
debdedf2
MH
4935 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4936 handle, flags);
1da177e4
LT
4937
4938 hdev->stat.sco_rx++;
4939
4940 hci_dev_lock(hdev);
4941 conn = hci_conn_hash_lookup_handle(hdev, handle);
4942 hci_dev_unlock(hdev);
4943
4944 if (conn) {
1da177e4 4945 /* Send to upper protocol */
00398e1d 4946 bt_cb(skb)->sco.pkt_status = flags & 0x03;
686ebf28
UF
4947 sco_recv_scodata(conn, skb);
4948 return;
1da177e4 4949 } else {
2064ee33
MH
4950 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4951 handle);
1da177e4
LT
4952 }
4953
4954 kfree_skb(skb);
4955}
4956
9238f36a
JH
4957static bool hci_req_is_complete(struct hci_dev *hdev)
4958{
4959 struct sk_buff *skb;
4960
4961 skb = skb_peek(&hdev->cmd_q);
4962 if (!skb)
4963 return true;
4964
44d27137 4965 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
9238f36a
JH
4966}
4967
42c6b129
JH
4968static void hci_resend_last(struct hci_dev *hdev)
4969{
4970 struct hci_command_hdr *sent;
4971 struct sk_buff *skb;
4972 u16 opcode;
4973
4974 if (!hdev->sent_cmd)
4975 return;
4976
4977 sent = (void *) hdev->sent_cmd->data;
4978 opcode = __le16_to_cpu(sent->opcode);
4979 if (opcode == HCI_OP_RESET)
4980 return;
4981
4982 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4983 if (!skb)
4984 return;
4985
4986 skb_queue_head(&hdev->cmd_q, skb);
4987 queue_work(hdev->workqueue, &hdev->cmd_work);
4988}
4989
e6214487
JH
4990void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4991 hci_req_complete_t *req_complete,
4992 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4993{
9238f36a
JH
4994 struct sk_buff *skb;
4995 unsigned long flags;
4996
4997 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4998
42c6b129
JH
4999 /* If the completed command doesn't match the last one that was
5000 * sent we need to do special handling of it.
9238f36a 5001 */
42c6b129
JH
5002 if (!hci_sent_cmd_data(hdev, opcode)) {
5003 /* Some CSR based controllers generate a spontaneous
5004 * reset complete event during init and any pending
5005 * command will never be completed. In such a case we
5006 * need to resend whatever was the last sent
5007 * command.
5008 */
5009 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5010 hci_resend_last(hdev);
5011
9238f36a 5012 return;
42c6b129 5013 }
9238f36a 5014
f80c5dad
JPRV
5015 /* If we reach this point this event matches the last command sent */
5016 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5017
9238f36a
JH
5018 /* If the command succeeded and there's still more commands in
5019 * this request the request is not yet complete.
5020 */
5021 if (!status && !hci_req_is_complete(hdev))
5022 return;
5023
5024 /* If this was the last command in a request the complete
5025 * callback would be found in hdev->sent_cmd instead of the
5026 * command queue (hdev->cmd_q).
5027 */
44d27137
JH
5028 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5029 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
e6214487
JH
5030 return;
5031 }
53e21fbc 5032
44d27137
JH
5033 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5034 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
e6214487 5035 return;
9238f36a
JH
5036 }
5037
5038 /* Remove all pending commands belonging to this request */
5039 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5040 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
44d27137 5041 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
9238f36a
JH
5042 __skb_queue_head(&hdev->cmd_q, skb);
5043 break;
5044 }
5045
3bd7594e
DA
5046 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5047 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5048 else
5049 *req_complete = bt_cb(skb)->hci.req_complete;
9238f36a
JH
5050 kfree_skb(skb);
5051 }
5052 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
5053}
5054
b78752cc 5055static void hci_rx_work(struct work_struct *work)
1da177e4 5056{
b78752cc 5057 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5058 struct sk_buff *skb;
5059
5060 BT_DBG("%s", hdev->name);
5061
1da177e4 5062 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5063 /* Send copy to monitor */
5064 hci_send_to_monitor(hdev, skb);
5065
1da177e4
LT
5066 if (atomic_read(&hdev->promisc)) {
5067 /* Send copy to the sockets */
470fe1b5 5068 hci_send_to_sock(hdev, skb);
1da177e4
LT
5069 }
5070
eb8c101e
MK
5071 /* If the device has been opened in HCI_USER_CHANNEL,
5072 * the userspace has exclusive access to device.
5073 * When device is HCI_INIT, we still need to process
5074 * the data packets to the driver in order
5075 * to complete its setup().
5076 */
5077 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5078 !test_bit(HCI_INIT, &hdev->flags)) {
1da177e4
LT
5079 kfree_skb(skb);
5080 continue;
5081 }
5082
5083 if (test_bit(HCI_INIT, &hdev->flags)) {
5084 /* Don't process data packets in this states. */
d79f34e3 5085 switch (hci_skb_pkt_type(skb)) {
1da177e4
LT
5086 case HCI_ACLDATA_PKT:
5087 case HCI_SCODATA_PKT:
cc974003 5088 case HCI_ISODATA_PKT:
1da177e4
LT
5089 kfree_skb(skb);
5090 continue;
3ff50b79 5091 }
1da177e4
LT
5092 }
5093
5094 /* Process frame */
d79f34e3 5095 switch (hci_skb_pkt_type(skb)) {
1da177e4 5096 case HCI_EVENT_PKT:
b78752cc 5097 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5098 hci_event_packet(hdev, skb);
5099 break;
5100
5101 case HCI_ACLDATA_PKT:
5102 BT_DBG("%s ACL data packet", hdev->name);
5103 hci_acldata_packet(hdev, skb);
5104 break;
5105
5106 case HCI_SCODATA_PKT:
5107 BT_DBG("%s SCO data packet", hdev->name);
5108 hci_scodata_packet(hdev, skb);
5109 break;
5110
5111 default:
5112 kfree_skb(skb);
5113 break;
5114 }
5115 }
1da177e4
LT
5116}
5117
c347b765 5118static void hci_cmd_work(struct work_struct *work)
1da177e4 5119{
c347b765 5120 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5121 struct sk_buff *skb;
5122
2104786b
AE
5123 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5124 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5125
1da177e4 5126 /* Send queued commands */
5a08ecce
AE
5127 if (atomic_read(&hdev->cmd_cnt)) {
5128 skb = skb_dequeue(&hdev->cmd_q);
5129 if (!skb)
5130 return;
5131
7585b97a 5132 kfree_skb(hdev->sent_cmd);
1da177e4 5133
a675d7f1 5134 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5135 if (hdev->sent_cmd) {
f80c5dad
JPRV
5136 if (hci_req_status_pend(hdev))
5137 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
1da177e4 5138 atomic_dec(&hdev->cmd_cnt);
57d17d70 5139 hci_send_frame(hdev, skb);
7bdb8a5c 5140 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5141 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5142 else
65cc2b49
MH
5143 schedule_delayed_work(&hdev->cmd_timer,
5144 HCI_CMD_TIMEOUT);
1da177e4
LT
5145 } else {
5146 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5147 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5148 }
5149 }
5150}