Bluetooth: hci_qca: Remove duplicate power off in proto close
[linux-2.6-block.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
8c520a59 29#include <linux/rfkill.h>
baf27f6e 30#include <linux/debugfs.h>
99780a7b 31#include <linux/crypto.h>
7a0e5b15 32#include <linux/property.h>
9952d90e
APS
33#include <linux/suspend.h>
34#include <linux/wait.h>
47219839 35#include <asm/unaligned.h>
1da177e4
LT
36
37#include <net/bluetooth/bluetooth.h>
38#include <net/bluetooth/hci_core.h>
4bc58f51 39#include <net/bluetooth/l2cap.h>
af58925c 40#include <net/bluetooth/mgmt.h>
1da177e4 41
0857dd3b 42#include "hci_request.h"
60c5f5fb 43#include "hci_debugfs.h"
970c4e46 44#include "smp.h"
6d5d2ee6 45#include "leds.h"
145373cb 46#include "msft.h"
970c4e46 47
b78752cc 48static void hci_rx_work(struct work_struct *work);
c347b765 49static void hci_cmd_work(struct work_struct *work);
3eff45ea 50static void hci_tx_work(struct work_struct *work);
1da177e4 51
1da177e4
LT
52/* HCI device list */
53LIST_HEAD(hci_dev_list);
54DEFINE_RWLOCK(hci_dev_list_lock);
55
56/* HCI callback list */
57LIST_HEAD(hci_cb_list);
fba7ecf0 58DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 59
3df92b31
SL
60/* HCI ID Numbering */
61static DEFINE_IDA(hci_index_ida);
62
baf27f6e
MH
63/* ---- HCI debugfs entries ---- */
64
4b4148e9
MH
65static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
74b93e9f 71 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
4b4148e9
MH
72 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75}
76
77static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
4b4148e9 82 bool enable;
3bf5e97d 83 int err;
4b4148e9
MH
84
85 if (!test_bit(HCI_UP, &hdev->flags))
86 return -ENETDOWN;
87
3bf5e97d
AS
88 err = kstrtobool_from_user(user_buf, count, &enable);
89 if (err)
90 return err;
4b4148e9 91
b7cb93e5 92 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
93 return -EALREADY;
94
b504430c 95 hci_req_sync_lock(hdev);
4b4148e9
MH
96 if (enable)
97 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
98 HCI_CMD_TIMEOUT);
99 else
100 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
101 HCI_CMD_TIMEOUT);
b504430c 102 hci_req_sync_unlock(hdev);
4b4148e9
MH
103
104 if (IS_ERR(skb))
105 return PTR_ERR(skb);
106
4b4148e9
MH
107 kfree_skb(skb);
108
b7cb93e5 109 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
110
111 return count;
112}
113
114static const struct file_operations dut_mode_fops = {
115 .open = simple_open,
116 .read = dut_mode_read,
117 .write = dut_mode_write,
118 .llseek = default_llseek,
119};
120
4b4113d6
MH
121static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
122 size_t count, loff_t *ppos)
123{
124 struct hci_dev *hdev = file->private_data;
125 char buf[3];
126
74b93e9f 127 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
4b4113d6
MH
128 buf[1] = '\n';
129 buf[2] = '\0';
130 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
131}
132
133static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
134 size_t count, loff_t *ppos)
135{
136 struct hci_dev *hdev = file->private_data;
4b4113d6
MH
137 bool enable;
138 int err;
139
3bf5e97d
AS
140 err = kstrtobool_from_user(user_buf, count, &enable);
141 if (err)
142 return err;
4b4113d6 143
7e995b9e 144 /* When the diagnostic flags are not persistent and the transport
b56c7b25
MH
145 * is not active or in user channel operation, then there is no need
146 * for the vendor callback. Instead just store the desired value and
147 * the setting will be programmed when the controller gets powered on.
7e995b9e
MH
148 */
149 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
b56c7b25
MH
150 (!test_bit(HCI_RUNNING, &hdev->flags) ||
151 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
7e995b9e
MH
152 goto done;
153
b504430c 154 hci_req_sync_lock(hdev);
4b4113d6 155 err = hdev->set_diag(hdev, enable);
b504430c 156 hci_req_sync_unlock(hdev);
4b4113d6
MH
157
158 if (err < 0)
159 return err;
160
7e995b9e 161done:
4b4113d6
MH
162 if (enable)
163 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
164 else
165 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
166
167 return count;
168}
169
170static const struct file_operations vendor_diag_fops = {
171 .open = simple_open,
172 .read = vendor_diag_read,
173 .write = vendor_diag_write,
174 .llseek = default_llseek,
175};
176
f640ee98
MH
177static void hci_debugfs_create_basic(struct hci_dev *hdev)
178{
179 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
180 &dut_mode_fops);
181
182 if (hdev->set_diag)
183 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
184 &vendor_diag_fops);
185}
186
a1d01db1 187static int hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 188{
42c6b129 189 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
190
191 /* Reset device */
42c6b129
JH
192 set_bit(HCI_RESET, &req->hdev->flags);
193 hci_req_add(req, HCI_OP_RESET, 0, NULL);
a1d01db1 194 return 0;
1da177e4
LT
195}
196
42c6b129 197static void bredr_init(struct hci_request *req)
1da177e4 198{
42c6b129 199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 200
1da177e4 201 /* Read Local Supported Features */
42c6b129 202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 203
1143e5a6 204 /* Read Local Version */
42c6b129 205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
206
207 /* Read BD Address */
42c6b129 208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
209}
210
0af801b9 211static void amp_init1(struct hci_request *req)
e61ef499 212{
42c6b129 213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 214
e61ef499 215 /* Read Local Version */
42c6b129 216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 217
f6996cfe
MH
218 /* Read Local Supported Commands */
219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
220
6bcbc489 221 /* Read Local AMP Info */
42c6b129 222 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
223
224 /* Read Data Blk size */
42c6b129 225 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 226
f38ba941
MH
227 /* Read Flow Control Mode */
228 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
229
7528ca1c
MH
230 /* Read Location Data */
231 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
232}
233
a1d01db1 234static int amp_init2(struct hci_request *req)
0af801b9
JH
235{
236 /* Read Local Supported Features. Not all AMP controllers
237 * support this so it's placed conditionally in the second
238 * stage init.
239 */
240 if (req->hdev->commands[14] & 0x20)
241 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
a1d01db1
JH
242
243 return 0;
0af801b9
JH
244}
245
a1d01db1 246static int hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 247{
42c6b129 248 struct hci_dev *hdev = req->hdev;
e61ef499
AE
249
250 BT_DBG("%s %ld", hdev->name, opt);
251
11778716
AE
252 /* Reset */
253 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 254 hci_reset_req(req, 0);
11778716 255
e61ef499 256 switch (hdev->dev_type) {
ca8bee5d 257 case HCI_PRIMARY:
42c6b129 258 bredr_init(req);
e61ef499 259 break;
e61ef499 260 case HCI_AMP:
0af801b9 261 amp_init1(req);
e61ef499 262 break;
e61ef499 263 default:
2064ee33 264 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
e61ef499
AE
265 break;
266 }
a1d01db1
JH
267
268 return 0;
e61ef499
AE
269}
270
42c6b129 271static void bredr_setup(struct hci_request *req)
2177bab5 272{
2177bab5
JH
273 __le16 param;
274 __u8 flt_type;
275
276 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 277 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
278
279 /* Read Class of Device */
42c6b129 280 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
281
282 /* Read Local Name */
42c6b129 283 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
284
285 /* Read Voice Setting */
42c6b129 286 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 287
b4cb9fb2
MH
288 /* Read Number of Supported IAC */
289 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
290
4b836f39
MH
291 /* Read Current IAC LAP */
292 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
293
2177bab5
JH
294 /* Clear Event Filters */
295 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 296 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
297
298 /* Connection accept timeout ~20 secs */
dcf4adbf 299 param = cpu_to_le16(0x7d00);
42c6b129 300 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
301}
302
42c6b129 303static void le_setup(struct hci_request *req)
2177bab5 304{
c73eee91
JH
305 struct hci_dev *hdev = req->hdev;
306
2177bab5 307 /* Read LE Buffer Size */
42c6b129 308 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
309
310 /* Read LE Local Supported Features */
42c6b129 311 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 312
747d3f03
MH
313 /* Read LE Supported States */
314 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
315
c73eee91
JH
316 /* LE-only controllers have LE implicitly enabled */
317 if (!lmp_bredr_capable(hdev))
a1536da2 318 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
319}
320
42c6b129 321static void hci_setup_event_mask(struct hci_request *req)
2177bab5 322{
42c6b129
JH
323 struct hci_dev *hdev = req->hdev;
324
2177bab5
JH
325 /* The second byte is 0xff instead of 0x9f (two reserved bits
326 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
327 * command otherwise.
328 */
329 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
330
331 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
332 * any event mask for pre 1.2 devices.
333 */
334 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
335 return;
336
337 if (lmp_bredr_capable(hdev)) {
338 events[4] |= 0x01; /* Flow Specification Complete */
c7882cbd
MH
339 } else {
340 /* Use a different default for LE-only devices */
341 memset(events, 0, sizeof(events));
c7882cbd
MH
342 events[1] |= 0x20; /* Command Complete */
343 events[1] |= 0x40; /* Command Status */
344 events[1] |= 0x80; /* Hardware Error */
5c3d3b4c
MH
345
346 /* If the controller supports the Disconnect command, enable
347 * the corresponding event. In addition enable packet flow
348 * control related events.
349 */
350 if (hdev->commands[0] & 0x20) {
351 events[0] |= 0x10; /* Disconnection Complete */
352 events[2] |= 0x04; /* Number of Completed Packets */
353 events[3] |= 0x02; /* Data Buffer Overflow */
354 }
355
356 /* If the controller supports the Read Remote Version
357 * Information command, enable the corresponding event.
358 */
359 if (hdev->commands[2] & 0x80)
360 events[1] |= 0x08; /* Read Remote Version Information
361 * Complete
362 */
0da71f1b
MH
363
364 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
365 events[0] |= 0x80; /* Encryption Change */
366 events[5] |= 0x80; /* Encryption Key Refresh Complete */
367 }
2177bab5
JH
368 }
369
9fe759ce
MH
370 if (lmp_inq_rssi_capable(hdev) ||
371 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
2177bab5
JH
372 events[4] |= 0x02; /* Inquiry Result with RSSI */
373
70f56aa2
MH
374 if (lmp_ext_feat_capable(hdev))
375 events[4] |= 0x04; /* Read Remote Extended Features Complete */
376
377 if (lmp_esco_capable(hdev)) {
378 events[5] |= 0x08; /* Synchronous Connection Complete */
379 events[5] |= 0x10; /* Synchronous Connection Changed */
380 }
381
2177bab5
JH
382 if (lmp_sniffsubr_capable(hdev))
383 events[5] |= 0x20; /* Sniff Subrating */
384
385 if (lmp_pause_enc_capable(hdev))
386 events[5] |= 0x80; /* Encryption Key Refresh Complete */
387
388 if (lmp_ext_inq_capable(hdev))
389 events[5] |= 0x40; /* Extended Inquiry Result */
390
391 if (lmp_no_flush_capable(hdev))
392 events[7] |= 0x01; /* Enhanced Flush Complete */
393
394 if (lmp_lsto_capable(hdev))
395 events[6] |= 0x80; /* Link Supervision Timeout Changed */
396
397 if (lmp_ssp_capable(hdev)) {
398 events[6] |= 0x01; /* IO Capability Request */
399 events[6] |= 0x02; /* IO Capability Response */
400 events[6] |= 0x04; /* User Confirmation Request */
401 events[6] |= 0x08; /* User Passkey Request */
402 events[6] |= 0x10; /* Remote OOB Data Request */
403 events[6] |= 0x20; /* Simple Pairing Complete */
404 events[7] |= 0x04; /* User Passkey Notification */
405 events[7] |= 0x08; /* Keypress Notification */
406 events[7] |= 0x10; /* Remote Host Supported
407 * Features Notification
408 */
409 }
410
411 if (lmp_le_capable(hdev))
412 events[7] |= 0x20; /* LE Meta-Event */
413
42c6b129 414 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
415}
416
a1d01db1 417static int hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 418{
42c6b129
JH
419 struct hci_dev *hdev = req->hdev;
420
0af801b9
JH
421 if (hdev->dev_type == HCI_AMP)
422 return amp_init2(req);
423
2177bab5 424 if (lmp_bredr_capable(hdev))
42c6b129 425 bredr_setup(req);
56f87901 426 else
a358dc11 427 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
428
429 if (lmp_le_capable(hdev))
42c6b129 430 le_setup(req);
2177bab5 431
0f3adeae
MH
432 /* All Bluetooth 1.2 and later controllers should support the
433 * HCI command for reading the local supported commands.
434 *
435 * Unfortunately some controllers indicate Bluetooth 1.2 support,
436 * but do not have support for this command. If that is the case,
437 * the driver can quirk the behavior and skip reading the local
438 * supported commands.
3f8e2d75 439 */
0f3adeae
MH
440 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
441 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 442 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
443
444 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
445 /* When SSP is available, then the host features page
446 * should also be available as well. However some
447 * controllers list the max_page as 0 as long as SSP
448 * has not been enabled. To achieve proper debugging
449 * output, force the minimum max_page to 1 at least.
450 */
451 hdev->max_page = 0x01;
452
d7a5a11d 453 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 454 u8 mode = 0x01;
574ea3c7 455
42c6b129
JH
456 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
457 sizeof(mode), &mode);
2177bab5
JH
458 } else {
459 struct hci_cp_write_eir cp;
460
461 memset(hdev->eir, 0, sizeof(hdev->eir));
462 memset(&cp, 0, sizeof(cp));
463
42c6b129 464 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
465 }
466 }
467
043ec9bf
MH
468 if (lmp_inq_rssi_capable(hdev) ||
469 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
470 u8 mode;
471
472 /* If Extended Inquiry Result events are supported, then
473 * they are clearly preferred over Inquiry Result with RSSI
474 * events.
475 */
476 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
477
478 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
479 }
2177bab5
JH
480
481 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 482 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
483
484 if (lmp_ext_feat_capable(hdev)) {
485 struct hci_cp_read_local_ext_features cp;
486
487 cp.page = 0x01;
42c6b129
JH
488 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
489 sizeof(cp), &cp);
2177bab5
JH
490 }
491
d7a5a11d 492 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 493 u8 enable = 1;
42c6b129
JH
494 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
495 &enable);
2177bab5 496 }
a1d01db1
JH
497
498 return 0;
2177bab5
JH
499}
500
42c6b129 501static void hci_setup_link_policy(struct hci_request *req)
2177bab5 502{
42c6b129 503 struct hci_dev *hdev = req->hdev;
2177bab5
JH
504 struct hci_cp_write_def_link_policy cp;
505 u16 link_policy = 0;
506
507 if (lmp_rswitch_capable(hdev))
508 link_policy |= HCI_LP_RSWITCH;
509 if (lmp_hold_capable(hdev))
510 link_policy |= HCI_LP_HOLD;
511 if (lmp_sniff_capable(hdev))
512 link_policy |= HCI_LP_SNIFF;
513 if (lmp_park_capable(hdev))
514 link_policy |= HCI_LP_PARK;
515
516 cp.policy = cpu_to_le16(link_policy);
42c6b129 517 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
518}
519
42c6b129 520static void hci_set_le_support(struct hci_request *req)
2177bab5 521{
42c6b129 522 struct hci_dev *hdev = req->hdev;
2177bab5
JH
523 struct hci_cp_write_le_host_supported cp;
524
c73eee91
JH
525 /* LE-only devices do not support explicit enablement */
526 if (!lmp_bredr_capable(hdev))
527 return;
528
2177bab5
JH
529 memset(&cp, 0, sizeof(cp));
530
d7a5a11d 531 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 532 cp.le = 0x01;
32226e4f 533 cp.simul = 0x00;
2177bab5
JH
534 }
535
536 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
537 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
538 &cp);
2177bab5
JH
539}
540
d62e6d67
JH
541static void hci_set_event_mask_page_2(struct hci_request *req)
542{
543 struct hci_dev *hdev = req->hdev;
544 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
313f6888 545 bool changed = false;
d62e6d67
JH
546
547 /* If Connectionless Slave Broadcast master role is supported
548 * enable all necessary events for it.
549 */
53b834d2 550 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
551 events[1] |= 0x40; /* Triggered Clock Capture */
552 events[1] |= 0x80; /* Synchronization Train Complete */
553 events[2] |= 0x10; /* Slave Page Response Timeout */
554 events[2] |= 0x20; /* CSB Channel Map Change */
313f6888 555 changed = true;
d62e6d67
JH
556 }
557
558 /* If Connectionless Slave Broadcast slave role is supported
559 * enable all necessary events for it.
560 */
53b834d2 561 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
562 events[2] |= 0x01; /* Synchronization Train Received */
563 events[2] |= 0x02; /* CSB Receive */
564 events[2] |= 0x04; /* CSB Timeout */
565 events[2] |= 0x08; /* Truncated Page Complete */
313f6888 566 changed = true;
d62e6d67
JH
567 }
568
40c59fcb 569 /* Enable Authenticated Payload Timeout Expired event if supported */
313f6888 570 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
40c59fcb 571 events[2] |= 0x80;
313f6888
MH
572 changed = true;
573 }
40c59fcb 574
313f6888
MH
575 /* Some Broadcom based controllers indicate support for Set Event
576 * Mask Page 2 command, but then actually do not support it. Since
577 * the default value is all bits set to zero, the command is only
578 * required if the event mask has to be changed. In case no change
579 * to the event mask is needed, skip this command.
580 */
581 if (changed)
582 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
583 sizeof(events), events);
d62e6d67
JH
584}
585
a1d01db1 586static int hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 587{
42c6b129 588 struct hci_dev *hdev = req->hdev;
d2c5d77f 589 u8 p;
42c6b129 590
0da71f1b
MH
591 hci_setup_event_mask(req);
592
e81be90b
JH
593 if (hdev->commands[6] & 0x20 &&
594 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
595 struct hci_cp_read_stored_link_key cp;
596
597 bacpy(&cp.bdaddr, BDADDR_ANY);
598 cp.read_all = 0x01;
599 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
600 }
601
2177bab5 602 if (hdev->commands[5] & 0x10)
42c6b129 603 hci_setup_link_policy(req);
2177bab5 604
417287de
MH
605 if (hdev->commands[8] & 0x01)
606 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
607
cde1a8a9
IFM
608 if (hdev->commands[18] & 0x04 &&
609 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
00bce3fb
AM
610 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
611
417287de
MH
612 /* Some older Broadcom based Bluetooth 1.2 controllers do not
613 * support the Read Page Scan Type command. Check support for
614 * this command in the bit mask of supported commands.
615 */
616 if (hdev->commands[13] & 0x01)
617 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
618
9193c6e8
AG
619 if (lmp_le_capable(hdev)) {
620 u8 events[8];
621
622 memset(events, 0, sizeof(events));
4d6c705b
MH
623
624 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
625 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
626
627 /* If controller supports the Connection Parameters Request
628 * Link Layer Procedure, enable the corresponding event.
629 */
630 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
631 events[0] |= 0x20; /* LE Remote Connection
632 * Parameter Request
633 */
634
a9f6068e
MH
635 /* If the controller supports the Data Length Extension
636 * feature, enable the corresponding event.
637 */
638 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
639 events[0] |= 0x40; /* LE Data Length Change */
640
ff3b8df2
MH
641 /* If the controller supports LL Privacy feature, enable
642 * the corresponding event.
643 */
644 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
645 events[1] |= 0x02; /* LE Enhanced Connection
646 * Complete
647 */
648
4b71bba4
MH
649 /* If the controller supports Extended Scanner Filter
650 * Policies, enable the correspondig event.
651 */
652 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
653 events[1] |= 0x04; /* LE Direct Advertising
654 * Report
655 */
656
9756d33b
MH
657 /* If the controller supports Channel Selection Algorithm #2
658 * feature, enable the corresponding event.
659 */
660 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
661 events[2] |= 0x08; /* LE Channel Selection
662 * Algorithm
663 */
664
7d26f5c4
MH
665 /* If the controller supports the LE Set Scan Enable command,
666 * enable the corresponding advertising report event.
667 */
668 if (hdev->commands[26] & 0x08)
669 events[0] |= 0x02; /* LE Advertising Report */
670
671 /* If the controller supports the LE Create Connection
672 * command, enable the corresponding event.
673 */
674 if (hdev->commands[26] & 0x10)
675 events[0] |= 0x01; /* LE Connection Complete */
676
677 /* If the controller supports the LE Connection Update
678 * command, enable the corresponding event.
679 */
680 if (hdev->commands[27] & 0x04)
681 events[0] |= 0x04; /* LE Connection Update
682 * Complete
683 */
684
685 /* If the controller supports the LE Read Remote Used Features
686 * command, enable the corresponding event.
687 */
688 if (hdev->commands[27] & 0x20)
689 events[0] |= 0x08; /* LE Read Remote Used
690 * Features Complete
691 */
692
5a34bd5f
MH
693 /* If the controller supports the LE Read Local P-256
694 * Public Key command, enable the corresponding event.
695 */
696 if (hdev->commands[34] & 0x02)
697 events[0] |= 0x80; /* LE Read Local P-256
698 * Public Key Complete
699 */
700
701 /* If the controller supports the LE Generate DHKey
702 * command, enable the corresponding event.
703 */
704 if (hdev->commands[34] & 0x04)
705 events[1] |= 0x01; /* LE Generate DHKey Complete */
706
27bbca44
MH
707 /* If the controller supports the LE Set Default PHY or
708 * LE Set PHY commands, enable the corresponding event.
709 */
710 if (hdev->commands[35] & (0x20 | 0x40))
711 events[1] |= 0x08; /* LE PHY Update Complete */
712
c215e939
JK
713 /* If the controller supports LE Set Extended Scan Parameters
714 * and LE Set Extended Scan Enable commands, enable the
715 * corresponding event.
716 */
717 if (use_ext_scan(hdev))
718 events[1] |= 0x10; /* LE Extended Advertising
719 * Report
720 */
721
acf0aeae
JK
722 /* If the controller supports the LE Extended Advertising
723 * command, enable the corresponding event.
724 */
725 if (ext_adv_capable(hdev))
726 events[2] |= 0x02; /* LE Advertising Set
727 * Terminated
728 */
729
9193c6e8
AG
730 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
731 events);
732
6b49bcb4
JK
733 /* Read LE Advertising Channel TX Power */
734 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
735 /* HCI TS spec forbids mixing of legacy and extended
736 * advertising commands wherein READ_ADV_TX_POWER is
737 * also included. So do not call it if extended adv
738 * is supported otherwise controller will return
739 * COMMAND_DISALLOWED for extended commands.
740 */
15a49cca
MH
741 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
742 }
743
2ab216a7
MH
744 if (hdev->commands[26] & 0x40) {
745 /* Read LE White List Size */
746 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
747 0, NULL);
748 }
749
750 if (hdev->commands[26] & 0x80) {
751 /* Clear LE White List */
752 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
753 }
754
cfdb0c2d
AN
755 if (hdev->commands[34] & 0x40) {
756 /* Read LE Resolving List Size */
757 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
758 0, NULL);
759 }
760
545f2596
AN
761 if (hdev->commands[34] & 0x20) {
762 /* Clear LE Resolving List */
763 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
764 }
765
b2cc2339
SN
766 if (hdev->commands[35] & 0x40) {
767 __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
768
769 /* Set RPA timeout */
770 hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
771 &rpa_timeout);
772 }
773
a9f6068e
MH
774 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
775 /* Read LE Maximum Data Length */
776 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
777
778 /* Read LE Suggested Default Data Length */
779 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
780 }
781
6b49bcb4
JK
782 if (ext_adv_capable(hdev)) {
783 /* Read LE Number of Supported Advertising Sets */
784 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
785 0, NULL);
786 }
787
42c6b129 788 hci_set_le_support(req);
9193c6e8 789 }
d2c5d77f
JH
790
791 /* Read features beyond page 1 if available */
792 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
793 struct hci_cp_read_local_ext_features cp;
794
795 cp.page = p;
796 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
797 sizeof(cp), &cp);
798 }
a1d01db1
JH
799
800 return 0;
2177bab5
JH
801}
802
a1d01db1 803static int hci_init4_req(struct hci_request *req, unsigned long opt)
5d4e7e8d
JH
804{
805 struct hci_dev *hdev = req->hdev;
806
36f260ce
MH
807 /* Some Broadcom based Bluetooth controllers do not support the
808 * Delete Stored Link Key command. They are clearly indicating its
809 * absence in the bit mask of supported commands.
810 *
811 * Check the supported commands and only if the the command is marked
812 * as supported send it. If not supported assume that the controller
813 * does not have actual support for stored link keys which makes this
814 * command redundant anyway.
815 *
816 * Some controllers indicate that they support handling deleting
817 * stored link keys, but they don't. The quirk lets a driver
818 * just disable this command.
819 */
820 if (hdev->commands[6] & 0x80 &&
821 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
822 struct hci_cp_delete_stored_link_key cp;
823
824 bacpy(&cp.bdaddr, BDADDR_ANY);
825 cp.delete_all = 0x01;
826 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
827 sizeof(cp), &cp);
828 }
829
d62e6d67
JH
830 /* Set event mask page 2 if the HCI command for it is supported */
831 if (hdev->commands[22] & 0x04)
832 hci_set_event_mask_page_2(req);
833
109e3191
MH
834 /* Read local codec list if the HCI command is supported */
835 if (hdev->commands[29] & 0x20)
836 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
837
a4790360
MH
838 /* Read local pairing options if the HCI command is supported */
839 if (hdev->commands[41] & 0x08)
840 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
841
f4fe73ed
MH
842 /* Get MWS transport configuration if the HCI command is supported */
843 if (hdev->commands[30] & 0x08)
844 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
845
5d4e7e8d 846 /* Check for Synchronization Train support */
53b834d2 847 if (lmp_sync_train_capable(hdev))
5d4e7e8d 848 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
849
850 /* Enable Secure Connections if supported and configured */
d7a5a11d 851 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 852 bredr_sc_enabled(hdev)) {
a6d0d690 853 u8 support = 0x01;
574ea3c7 854
a6d0d690
MH
855 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
856 sizeof(support), &support);
857 }
a1d01db1 858
00bce3fb
AM
859 /* Set erroneous data reporting if supported to the wideband speech
860 * setting value
861 */
cde1a8a9
IFM
862 if (hdev->commands[18] & 0x08 &&
863 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
00bce3fb
AM
864 bool enabled = hci_dev_test_flag(hdev,
865 HCI_WIDEBAND_SPEECH_ENABLED);
866
867 if (enabled !=
868 (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
869 struct hci_cp_write_def_err_data_reporting cp;
870
871 cp.err_data_reporting = enabled ?
872 ERR_DATA_REPORTING_ENABLED :
873 ERR_DATA_REPORTING_DISABLED;
874
875 hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
876 sizeof(cp), &cp);
877 }
878 }
879
12204875
MH
880 /* Set Suggested Default Data Length to maximum if supported */
881 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
882 struct hci_cp_le_write_def_data_len cp;
883
727ea61a
BDC
884 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
885 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
12204875
MH
886 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
887 }
888
de2ba303
MH
889 /* Set Default PHY parameters if command is supported */
890 if (hdev->commands[35] & 0x20) {
891 struct hci_cp_le_set_default_phy cp;
892
6decb5b4
JK
893 cp.all_phys = 0x00;
894 cp.tx_phys = hdev->le_tx_def_phys;
895 cp.rx_phys = hdev->le_rx_def_phys;
de2ba303
MH
896
897 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
898 }
899
a1d01db1 900 return 0;
5d4e7e8d
JH
901}
902
2177bab5
JH
903static int __hci_init(struct hci_dev *hdev)
904{
905 int err;
906
4ebeee2d 907 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
2177bab5
JH
908 if (err < 0)
909 return err;
910
f640ee98
MH
911 if (hci_dev_test_flag(hdev, HCI_SETUP))
912 hci_debugfs_create_basic(hdev);
4b4148e9 913
4ebeee2d 914 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
0af801b9
JH
915 if (err < 0)
916 return err;
917
ca8bee5d 918 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
2177bab5 919 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 920 * first two stages of init.
2177bab5 921 */
ca8bee5d 922 if (hdev->dev_type != HCI_PRIMARY)
2177bab5
JH
923 return 0;
924
4ebeee2d 925 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
5d4e7e8d
JH
926 if (err < 0)
927 return err;
928
4ebeee2d 929 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
baf27f6e
MH
930 if (err < 0)
931 return err;
932
ec6cef9c
MH
933 /* This function is only called when the controller is actually in
934 * configured state. When the controller is marked as unconfigured,
935 * this initialization procedure is not run.
936 *
937 * It means that it is possible that a controller runs through its
938 * setup phase and then discovers missing settings. If that is the
939 * case, then this function will not be called. It then will only
940 * be called during the config phase.
941 *
942 * So only when in setup phase or config phase, create the debugfs
943 * entries and register the SMP channels.
baf27f6e 944 */
d7a5a11d
MH
945 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
946 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
947 return 0;
948
60c5f5fb
MH
949 hci_debugfs_create_common(hdev);
950
71c3b60e 951 if (lmp_bredr_capable(hdev))
60c5f5fb 952 hci_debugfs_create_bredr(hdev);
2bfa3531 953
162a3bac 954 if (lmp_le_capable(hdev))
60c5f5fb 955 hci_debugfs_create_le(hdev);
e7b8fc92 956
baf27f6e 957 return 0;
2177bab5
JH
958}
959
a1d01db1 960static int hci_init0_req(struct hci_request *req, unsigned long opt)
0ebca7d6
MH
961{
962 struct hci_dev *hdev = req->hdev;
963
964 BT_DBG("%s %ld", hdev->name, opt);
965
966 /* Reset */
967 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
968 hci_reset_req(req, 0);
969
970 /* Read Local Version */
971 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
972
973 /* Read BD Address */
974 if (hdev->set_bdaddr)
975 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
a1d01db1
JH
976
977 return 0;
0ebca7d6
MH
978}
979
980static int __hci_unconf_init(struct hci_dev *hdev)
981{
982 int err;
983
cc78b44b
MH
984 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
985 return 0;
986
4ebeee2d 987 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
0ebca7d6
MH
988 if (err < 0)
989 return err;
990
f640ee98
MH
991 if (hci_dev_test_flag(hdev, HCI_SETUP))
992 hci_debugfs_create_basic(hdev);
993
0ebca7d6
MH
994 return 0;
995}
996
a1d01db1 997static int hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
998{
999 __u8 scan = opt;
1000
42c6b129 1001 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1002
1003 /* Inquiry and Page scans */
42c6b129 1004 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
a1d01db1 1005 return 0;
1da177e4
LT
1006}
1007
a1d01db1 1008static int hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1009{
1010 __u8 auth = opt;
1011
42c6b129 1012 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1013
1014 /* Authentication */
42c6b129 1015 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
a1d01db1 1016 return 0;
1da177e4
LT
1017}
1018
a1d01db1 1019static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1020{
1021 __u8 encrypt = opt;
1022
42c6b129 1023 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1024
e4e8e37c 1025 /* Encryption */
42c6b129 1026 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
a1d01db1 1027 return 0;
1da177e4
LT
1028}
1029
a1d01db1 1030static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1031{
1032 __le16 policy = cpu_to_le16(opt);
1033
42c6b129 1034 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1035
1036 /* Default link policy */
42c6b129 1037 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
a1d01db1 1038 return 0;
e4e8e37c
MH
1039}
1040
8e87d142 1041/* Get HCI device by index.
1da177e4
LT
1042 * Device is held on return. */
1043struct hci_dev *hci_dev_get(int index)
1044{
8035ded4 1045 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1046
1047 BT_DBG("%d", index);
1048
1049 if (index < 0)
1050 return NULL;
1051
1052 read_lock(&hci_dev_list_lock);
8035ded4 1053 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1054 if (d->id == index) {
1055 hdev = hci_dev_hold(d);
1056 break;
1057 }
1058 }
1059 read_unlock(&hci_dev_list_lock);
1060 return hdev;
1061}
1da177e4
LT
1062
1063/* ---- Inquiry support ---- */
ff9ef578 1064
30dc78e1
JH
1065bool hci_discovery_active(struct hci_dev *hdev)
1066{
1067 struct discovery_state *discov = &hdev->discovery;
1068
6fbe195d 1069 switch (discov->state) {
343f935b 1070 case DISCOVERY_FINDING:
6fbe195d 1071 case DISCOVERY_RESOLVING:
30dc78e1
JH
1072 return true;
1073
6fbe195d
AG
1074 default:
1075 return false;
1076 }
30dc78e1
JH
1077}
1078
ff9ef578
JH
1079void hci_discovery_set_state(struct hci_dev *hdev, int state)
1080{
bb3e0a33
JH
1081 int old_state = hdev->discovery.state;
1082
ff9ef578
JH
1083 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1084
bb3e0a33 1085 if (old_state == state)
ff9ef578
JH
1086 return;
1087
bb3e0a33
JH
1088 hdev->discovery.state = state;
1089
ff9ef578
JH
1090 switch (state) {
1091 case DISCOVERY_STOPPED:
c54c3860
AG
1092 hci_update_background_scan(hdev);
1093
bb3e0a33 1094 if (old_state != DISCOVERY_STARTING)
7b99b659 1095 mgmt_discovering(hdev, 0);
ff9ef578
JH
1096 break;
1097 case DISCOVERY_STARTING:
1098 break;
343f935b 1099 case DISCOVERY_FINDING:
ff9ef578
JH
1100 mgmt_discovering(hdev, 1);
1101 break;
30dc78e1
JH
1102 case DISCOVERY_RESOLVING:
1103 break;
ff9ef578
JH
1104 case DISCOVERY_STOPPING:
1105 break;
1106 }
ff9ef578
JH
1107}
1108
1f9b9a5d 1109void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1110{
30883512 1111 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1112 struct inquiry_entry *p, *n;
1da177e4 1113
561aafbc
JH
1114 list_for_each_entry_safe(p, n, &cache->all, all) {
1115 list_del(&p->all);
b57c1a56 1116 kfree(p);
1da177e4 1117 }
561aafbc
JH
1118
1119 INIT_LIST_HEAD(&cache->unknown);
1120 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1121}
1122
a8c5fb1a
GP
1123struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1124 bdaddr_t *bdaddr)
1da177e4 1125{
30883512 1126 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1127 struct inquiry_entry *e;
1128
6ed93dc6 1129 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1130
561aafbc
JH
1131 list_for_each_entry(e, &cache->all, all) {
1132 if (!bacmp(&e->data.bdaddr, bdaddr))
1133 return e;
1134 }
1135
1136 return NULL;
1137}
1138
1139struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1140 bdaddr_t *bdaddr)
561aafbc 1141{
30883512 1142 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1143 struct inquiry_entry *e;
1144
6ed93dc6 1145 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1146
1147 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1148 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1149 return e;
1150 }
1151
1152 return NULL;
1da177e4
LT
1153}
1154
30dc78e1 1155struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1156 bdaddr_t *bdaddr,
1157 int state)
30dc78e1
JH
1158{
1159 struct discovery_state *cache = &hdev->discovery;
1160 struct inquiry_entry *e;
1161
6ed93dc6 1162 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1163
1164 list_for_each_entry(e, &cache->resolve, list) {
1165 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1166 return e;
1167 if (!bacmp(&e->data.bdaddr, bdaddr))
1168 return e;
1169 }
1170
1171 return NULL;
1172}
1173
a3d4e20a 1174void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1175 struct inquiry_entry *ie)
a3d4e20a
JH
1176{
1177 struct discovery_state *cache = &hdev->discovery;
1178 struct list_head *pos = &cache->resolve;
1179 struct inquiry_entry *p;
1180
1181 list_del(&ie->list);
1182
1183 list_for_each_entry(p, &cache->resolve, list) {
1184 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1185 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1186 break;
1187 pos = &p->list;
1188 }
1189
1190 list_add(&ie->list, pos);
1191}
1192
af58925c
MH
1193u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1194 bool name_known)
1da177e4 1195{
30883512 1196 struct discovery_state *cache = &hdev->discovery;
70f23020 1197 struct inquiry_entry *ie;
af58925c 1198 u32 flags = 0;
1da177e4 1199
6ed93dc6 1200 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1201
6928a924 1202 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1203
af58925c
MH
1204 if (!data->ssp_mode)
1205 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1206
70f23020 1207 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1208 if (ie) {
af58925c
MH
1209 if (!ie->data.ssp_mode)
1210 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1211
a3d4e20a 1212 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1213 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1214 ie->data.rssi = data->rssi;
1215 hci_inquiry_cache_update_resolve(hdev, ie);
1216 }
1217
561aafbc 1218 goto update;
a3d4e20a 1219 }
561aafbc
JH
1220
1221 /* Entry not in the cache. Add new one. */
27f70f3e 1222 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1223 if (!ie) {
1224 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1225 goto done;
1226 }
561aafbc
JH
1227
1228 list_add(&ie->all, &cache->all);
1229
1230 if (name_known) {
1231 ie->name_state = NAME_KNOWN;
1232 } else {
1233 ie->name_state = NAME_NOT_KNOWN;
1234 list_add(&ie->list, &cache->unknown);
1235 }
70f23020 1236
561aafbc
JH
1237update:
1238 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1239 ie->name_state != NAME_PENDING) {
561aafbc
JH
1240 ie->name_state = NAME_KNOWN;
1241 list_del(&ie->list);
1da177e4
LT
1242 }
1243
70f23020
AE
1244 memcpy(&ie->data, data, sizeof(*data));
1245 ie->timestamp = jiffies;
1da177e4 1246 cache->timestamp = jiffies;
3175405b
JH
1247
1248 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1249 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1250
af58925c
MH
1251done:
1252 return flags;
1da177e4
LT
1253}
1254
1255static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1256{
30883512 1257 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1258 struct inquiry_info *info = (struct inquiry_info *) buf;
1259 struct inquiry_entry *e;
1260 int copied = 0;
1261
561aafbc 1262 list_for_each_entry(e, &cache->all, all) {
1da177e4 1263 struct inquiry_data *data = &e->data;
b57c1a56
JH
1264
1265 if (copied >= num)
1266 break;
1267
1da177e4
LT
1268 bacpy(&info->bdaddr, &data->bdaddr);
1269 info->pscan_rep_mode = data->pscan_rep_mode;
1270 info->pscan_period_mode = data->pscan_period_mode;
1271 info->pscan_mode = data->pscan_mode;
1272 memcpy(info->dev_class, data->dev_class, 3);
1273 info->clock_offset = data->clock_offset;
b57c1a56 1274
1da177e4 1275 info++;
b57c1a56 1276 copied++;
1da177e4
LT
1277 }
1278
1279 BT_DBG("cache %p, copied %d", cache, copied);
1280 return copied;
1281}
1282
a1d01db1 1283static int hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1284{
1285 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1286 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1287 struct hci_cp_inquiry cp;
1288
1289 BT_DBG("%s", hdev->name);
1290
1291 if (test_bit(HCI_INQUIRY, &hdev->flags))
a1d01db1 1292 return 0;
1da177e4
LT
1293
1294 /* Start Inquiry */
1295 memcpy(&cp.lap, &ir->lap, 3);
1296 cp.length = ir->length;
1297 cp.num_rsp = ir->num_rsp;
42c6b129 1298 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
a1d01db1
JH
1299
1300 return 0;
1da177e4
LT
1301}
1302
1303int hci_inquiry(void __user *arg)
1304{
1305 __u8 __user *ptr = arg;
1306 struct hci_inquiry_req ir;
1307 struct hci_dev *hdev;
1308 int err = 0, do_inquiry = 0, max_rsp;
1309 long timeo;
1310 __u8 *buf;
1311
1312 if (copy_from_user(&ir, ptr, sizeof(ir)))
1313 return -EFAULT;
1314
5a08ecce
AE
1315 hdev = hci_dev_get(ir.dev_id);
1316 if (!hdev)
1da177e4
LT
1317 return -ENODEV;
1318
d7a5a11d 1319 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1320 err = -EBUSY;
1321 goto done;
1322 }
1323
d7a5a11d 1324 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1325 err = -EOPNOTSUPP;
1326 goto done;
1327 }
1328
ca8bee5d 1329 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
1330 err = -EOPNOTSUPP;
1331 goto done;
1332 }
1333
d7a5a11d 1334 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1335 err = -EOPNOTSUPP;
1336 goto done;
1337 }
1338
09fd0de5 1339 hci_dev_lock(hdev);
8e87d142 1340 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1341 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1342 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1343 do_inquiry = 1;
1344 }
09fd0de5 1345 hci_dev_unlock(hdev);
1da177e4 1346
04837f64 1347 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1348
1349 if (do_inquiry) {
01178cd4 1350 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
4ebeee2d 1351 timeo, NULL);
70f23020
AE
1352 if (err < 0)
1353 goto done;
3e13fa1e
AG
1354
1355 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1356 * cleared). If it is interrupted by a signal, return -EINTR.
1357 */
74316201 1358 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1359 TASK_INTERRUPTIBLE))
1360 return -EINTR;
70f23020 1361 }
1da177e4 1362
8fc9ced3
GP
1363 /* for unlimited number of responses we will use buffer with
1364 * 255 entries
1365 */
1da177e4
LT
1366 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1367
1368 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1369 * copy it to the user space.
1370 */
6da2ec56 1371 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
70f23020 1372 if (!buf) {
1da177e4
LT
1373 err = -ENOMEM;
1374 goto done;
1375 }
1376
09fd0de5 1377 hci_dev_lock(hdev);
1da177e4 1378 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1379 hci_dev_unlock(hdev);
1da177e4
LT
1380
1381 BT_DBG("num_rsp %d", ir.num_rsp);
1382
1383 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1384 ptr += sizeof(ir);
1385 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1386 ir.num_rsp))
1da177e4 1387 err = -EFAULT;
8e87d142 1388 } else
1da177e4
LT
1389 err = -EFAULT;
1390
1391 kfree(buf);
1392
1393done:
1394 hci_dev_put(hdev);
1395 return err;
1396}
1397
7a0e5b15
MK
1398/**
1399 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1400 * (BD_ADDR) for a HCI device from
1401 * a firmware node property.
1402 * @hdev: The HCI device
1403 *
1404 * Search the firmware node for 'local-bd-address'.
1405 *
1406 * All-zero BD addresses are rejected, because those could be properties
1407 * that exist in the firmware tables, but were not updated by the firmware. For
1408 * example, the DTS could define 'local-bd-address', with zero BD addresses.
1409 */
1410static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1411{
1412 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1413 bdaddr_t ba;
1414 int ret;
1415
1416 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1417 (u8 *)&ba, sizeof(ba));
1418 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1419 return;
1420
1421 bacpy(&hdev->public_addr, &ba);
1422}
1423
cbed0ca1 1424static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1425{
1da177e4
LT
1426 int ret = 0;
1427
1da177e4
LT
1428 BT_DBG("%s %p", hdev->name, hdev);
1429
b504430c 1430 hci_req_sync_lock(hdev);
1da177e4 1431
d7a5a11d 1432 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1433 ret = -ENODEV;
1434 goto done;
1435 }
1436
d7a5a11d
MH
1437 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1438 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1439 /* Check for rfkill but allow the HCI setup stage to
1440 * proceed (which in itself doesn't cause any RF activity).
1441 */
d7a5a11d 1442 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1443 ret = -ERFKILL;
1444 goto done;
1445 }
1446
1447 /* Check for valid public address or a configured static
1448 * random adddress, but let the HCI setup proceed to
1449 * be able to determine if there is a public address
1450 * or not.
1451 *
c6beca0e
MH
1452 * In case of user channel usage, it is not important
1453 * if a public address or static random address is
1454 * available.
1455 *
a5c8f270
MH
1456 * This check is only valid for BR/EDR controllers
1457 * since AMP controllers do not have an address.
1458 */
d7a5a11d 1459 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
ca8bee5d 1460 hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
1461 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1462 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1463 ret = -EADDRNOTAVAIL;
1464 goto done;
1465 }
611b30f7
MH
1466 }
1467
1da177e4
LT
1468 if (test_bit(HCI_UP, &hdev->flags)) {
1469 ret = -EALREADY;
1470 goto done;
1471 }
1472
1da177e4
LT
1473 if (hdev->open(hdev)) {
1474 ret = -EIO;
1475 goto done;
1476 }
1477
e9ca8bf1 1478 set_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1479 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
4a3f95b7 1480
f41c70c4
MH
1481 atomic_set(&hdev->cmd_cnt, 1);
1482 set_bit(HCI_INIT, &hdev->flags);
1483
740011cf
SW
1484 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1485 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
7fdf6c6a
MH
1486 bool invalid_bdaddr;
1487
e131d74a
MH
1488 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1489
af202f84
MH
1490 if (hdev->setup)
1491 ret = hdev->setup(hdev);
f41c70c4 1492
7fdf6c6a
MH
1493 /* The transport driver can set the quirk to mark the
1494 * BD_ADDR invalid before creating the HCI device or in
1495 * its setup callback.
1496 */
1497 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1498 &hdev->quirks);
1499
7a0e5b15
MK
1500 if (ret)
1501 goto setup_failed;
1502
1503 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1504 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1505 hci_dev_get_bd_addr_from_property(hdev);
1506
1507 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
7fdf6c6a 1508 hdev->set_bdaddr) {
7a0e5b15
MK
1509 ret = hdev->set_bdaddr(hdev,
1510 &hdev->public_addr);
7fdf6c6a
MH
1511
1512 /* If setting of the BD_ADDR from the device
1513 * property succeeds, then treat the address
1514 * as valid even if the invalid BD_ADDR
1515 * quirk indicates otherwise.
1516 */
1517 if (!ret)
1518 invalid_bdaddr = false;
1519 }
7a0e5b15
MK
1520 }
1521
1522setup_failed:
af202f84
MH
1523 /* The transport driver can set these quirks before
1524 * creating the HCI device or in its setup callback.
1525 *
7fdf6c6a
MH
1526 * For the invalid BD_ADDR quirk it is possible that
1527 * it becomes a valid address if the bootloader does
1528 * provide it (see above).
1529 *
af202f84
MH
1530 * In case any of them is set, the controller has to
1531 * start up as unconfigured.
1532 */
eb1904f4 1533 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
7fdf6c6a 1534 invalid_bdaddr)
a1536da2 1535 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1536
0ebca7d6
MH
1537 /* For an unconfigured controller it is required to
1538 * read at least the version information provided by
1539 * the Read Local Version Information command.
1540 *
1541 * If the set_bdaddr driver callback is provided, then
1542 * also the original Bluetooth public device address
1543 * will be read using the Read BD Address command.
1544 */
d7a5a11d 1545 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1546 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1547 }
1548
d7a5a11d 1549 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1550 /* If public address change is configured, ensure that
1551 * the address gets programmed. If the driver does not
1552 * support changing the public address, fail the power
1553 * on procedure.
1554 */
1555 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1556 hdev->set_bdaddr)
24c457e2
MH
1557 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1558 else
1559 ret = -EADDRNOTAVAIL;
1560 }
1561
f41c70c4 1562 if (!ret) {
d7a5a11d 1563 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
98a63aaf 1564 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
f41c70c4 1565 ret = __hci_init(hdev);
98a63aaf
MH
1566 if (!ret && hdev->post_init)
1567 ret = hdev->post_init(hdev);
1568 }
1da177e4
LT
1569 }
1570
7e995b9e
MH
1571 /* If the HCI Reset command is clearing all diagnostic settings,
1572 * then they need to be reprogrammed after the init procedure
1573 * completed.
1574 */
1575 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
b56c7b25 1576 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
7e995b9e
MH
1577 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1578 ret = hdev->set_diag(hdev, true);
1579
145373cb
MC
1580 msft_do_open(hdev);
1581
f41c70c4
MH
1582 clear_bit(HCI_INIT, &hdev->flags);
1583
1da177e4
LT
1584 if (!ret) {
1585 hci_dev_hold(hdev);
a1536da2 1586 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
a73c046a 1587 hci_adv_instances_set_rpa_expired(hdev, true);
1da177e4 1588 set_bit(HCI_UP, &hdev->flags);
05fcd4c4 1589 hci_sock_dev_event(hdev, HCI_DEV_UP);
6d5d2ee6 1590 hci_leds_update_powered(hdev, true);
d7a5a11d
MH
1591 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1592 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1593 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1594 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
2ff13894 1595 hci_dev_test_flag(hdev, HCI_MGMT) &&
ca8bee5d 1596 hdev->dev_type == HCI_PRIMARY) {
2ff13894
JH
1597 ret = __hci_req_hci_power_on(hdev);
1598 mgmt_power_on(hdev, ret);
56e5cb86 1599 }
8e87d142 1600 } else {
1da177e4 1601 /* Init failed, cleanup */
3eff45ea 1602 flush_work(&hdev->tx_work);
c347b765 1603 flush_work(&hdev->cmd_work);
b78752cc 1604 flush_work(&hdev->rx_work);
1da177e4
LT
1605
1606 skb_queue_purge(&hdev->cmd_q);
1607 skb_queue_purge(&hdev->rx_q);
1608
1609 if (hdev->flush)
1610 hdev->flush(hdev);
1611
1612 if (hdev->sent_cmd) {
1613 kfree_skb(hdev->sent_cmd);
1614 hdev->sent_cmd = NULL;
1615 }
1616
e9ca8bf1 1617 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1618 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1619
1da177e4 1620 hdev->close(hdev);
fee746b0 1621 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1622 }
1623
1624done:
b504430c 1625 hci_req_sync_unlock(hdev);
1da177e4
LT
1626 return ret;
1627}
1628
cbed0ca1
JH
1629/* ---- HCI ioctl helpers ---- */
1630
1631int hci_dev_open(__u16 dev)
1632{
1633 struct hci_dev *hdev;
1634 int err;
1635
1636 hdev = hci_dev_get(dev);
1637 if (!hdev)
1638 return -ENODEV;
1639
4a964404 1640 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1641 * up as user channel. Trying to bring them up as normal devices
1642 * will result into a failure. Only user channel operation is
1643 * possible.
1644 *
1645 * When this function is called for a user channel, the flag
1646 * HCI_USER_CHANNEL will be set first before attempting to
1647 * open the device.
1648 */
d7a5a11d
MH
1649 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1650 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1651 err = -EOPNOTSUPP;
1652 goto done;
1653 }
1654
e1d08f40
JH
1655 /* We need to ensure that no other power on/off work is pending
1656 * before proceeding to call hci_dev_do_open. This is
1657 * particularly important if the setup procedure has not yet
1658 * completed.
1659 */
a69d8927 1660 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1661 cancel_delayed_work(&hdev->power_off);
1662
a5c8f270
MH
1663 /* After this call it is guaranteed that the setup procedure
1664 * has finished. This means that error conditions like RFKILL
1665 * or no valid public or static random address apply.
1666 */
e1d08f40
JH
1667 flush_workqueue(hdev->req_workqueue);
1668
12aa4f0a 1669 /* For controllers not using the management interface and that
b6ae8457 1670 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1671 * so that pairing works for them. Once the management interface
1672 * is in use this bit will be cleared again and userspace has
1673 * to explicitly enable it.
1674 */
d7a5a11d
MH
1675 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1676 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1677 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1678
cbed0ca1
JH
1679 err = hci_dev_do_open(hdev);
1680
fee746b0 1681done:
cbed0ca1 1682 hci_dev_put(hdev);
cbed0ca1
JH
1683 return err;
1684}
1685
d7347f3c
JH
1686/* This function requires the caller holds hdev->lock */
1687static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1688{
1689 struct hci_conn_params *p;
1690
f161dd41
JH
1691 list_for_each_entry(p, &hdev->le_conn_params, list) {
1692 if (p->conn) {
1693 hci_conn_drop(p->conn);
f8aaf9b6 1694 hci_conn_put(p->conn);
f161dd41
JH
1695 p->conn = NULL;
1696 }
d7347f3c 1697 list_del_init(&p->action);
f161dd41 1698 }
d7347f3c
JH
1699
1700 BT_DBG("All LE pending actions cleared");
1701}
1702
6b3cc1db 1703int hci_dev_do_close(struct hci_dev *hdev)
1da177e4 1704{
acc649c6
MH
1705 bool auto_off;
1706
1da177e4
LT
1707 BT_DBG("%s %p", hdev->name, hdev);
1708
d24d8144 1709 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1710 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1711 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1712 /* Execute vendor specific shutdown routine */
1713 if (hdev->shutdown)
1714 hdev->shutdown(hdev);
1715 }
1716
78c04c0b
VCG
1717 cancel_delayed_work(&hdev->power_off);
1718
7df0f73e 1719 hci_request_cancel_all(hdev);
b504430c 1720 hci_req_sync_lock(hdev);
1da177e4
LT
1721
1722 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1723 cancel_delayed_work_sync(&hdev->cmd_timer);
b504430c 1724 hci_req_sync_unlock(hdev);
1da177e4
LT
1725 return 0;
1726 }
1727
6d5d2ee6
HK
1728 hci_leds_update_powered(hdev, false);
1729
3eff45ea
GP
1730 /* Flush RX and TX works */
1731 flush_work(&hdev->tx_work);
b78752cc 1732 flush_work(&hdev->rx_work);
1da177e4 1733
16ab91ab 1734 if (hdev->discov_timeout > 0) {
16ab91ab 1735 hdev->discov_timeout = 0;
a358dc11
MH
1736 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1737 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1738 }
1739
a69d8927 1740 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1741 cancel_delayed_work(&hdev->service_cache);
1742
a73c046a
JK
1743 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1744 struct adv_info *adv_instance;
1745
4518bb0f 1746 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1747
a73c046a
JK
1748 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1749 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1750 }
1751
76727c02
JH
1752 /* Avoid potential lockdep warnings from the *_flush() calls by
1753 * ensuring the workqueue is empty up front.
1754 */
1755 drain_workqueue(hdev->workqueue);
1756
09fd0de5 1757 hci_dev_lock(hdev);
1aeb9c65 1758
8f502f84
JH
1759 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1760
acc649c6
MH
1761 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1762
ca8bee5d 1763 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
baab7932 1764 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
2ff13894
JH
1765 hci_dev_test_flag(hdev, HCI_MGMT))
1766 __mgmt_power_off(hdev);
1aeb9c65 1767
1f9b9a5d 1768 hci_inquiry_cache_flush(hdev);
d7347f3c 1769 hci_pend_le_actions_clear(hdev);
f161dd41 1770 hci_conn_hash_flush(hdev);
09fd0de5 1771 hci_dev_unlock(hdev);
1da177e4 1772
64dae967
MH
1773 smp_unregister(hdev);
1774
05fcd4c4 1775 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1da177e4 1776
145373cb
MC
1777 msft_do_close(hdev);
1778
1da177e4
LT
1779 if (hdev->flush)
1780 hdev->flush(hdev);
1781
1782 /* Reset device */
1783 skb_queue_purge(&hdev->cmd_q);
1784 atomic_set(&hdev->cmd_cnt, 1);
acc649c6
MH
1785 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1786 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4 1787 set_bit(HCI_INIT, &hdev->flags);
4ebeee2d 1788 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1da177e4
LT
1789 clear_bit(HCI_INIT, &hdev->flags);
1790 }
1791
c347b765
GP
1792 /* flush cmd work */
1793 flush_work(&hdev->cmd_work);
1da177e4
LT
1794
1795 /* Drop queues */
1796 skb_queue_purge(&hdev->rx_q);
1797 skb_queue_purge(&hdev->cmd_q);
1798 skb_queue_purge(&hdev->raw_q);
1799
1800 /* Drop last sent command */
1801 if (hdev->sent_cmd) {
65cc2b49 1802 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1803 kfree_skb(hdev->sent_cmd);
1804 hdev->sent_cmd = NULL;
1805 }
1806
e9ca8bf1 1807 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1808 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1809
9952d90e
APS
1810 if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1811 wake_up(&hdev->suspend_wait_q);
1812
1da177e4
LT
1813 /* After this point our queues are empty
1814 * and no tasks are scheduled. */
1815 hdev->close(hdev);
1816
35b973c9 1817 /* Clear flags */
fee746b0 1818 hdev->flags &= BIT(HCI_RAW);
eacb44df 1819 hci_dev_clear_volatile_flags(hdev);
35b973c9 1820
ced5c338 1821 /* Controller radio is available but is currently powered down */
536619e8 1822 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1823
e59fda8d 1824 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1825 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1826 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1827
b504430c 1828 hci_req_sync_unlock(hdev);
1da177e4
LT
1829
1830 hci_dev_put(hdev);
1831 return 0;
1832}
1833
1834int hci_dev_close(__u16 dev)
1835{
1836 struct hci_dev *hdev;
1837 int err;
1838
70f23020
AE
1839 hdev = hci_dev_get(dev);
1840 if (!hdev)
1da177e4 1841 return -ENODEV;
8ee56540 1842
d7a5a11d 1843 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1844 err = -EBUSY;
1845 goto done;
1846 }
1847
a69d8927 1848 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1849 cancel_delayed_work(&hdev->power_off);
1850
1da177e4 1851 err = hci_dev_do_close(hdev);
8ee56540 1852
0736cfa8 1853done:
1da177e4
LT
1854 hci_dev_put(hdev);
1855 return err;
1856}
1857
5c912495 1858static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1859{
5c912495 1860 int ret;
1da177e4 1861
5c912495 1862 BT_DBG("%s %p", hdev->name, hdev);
1da177e4 1863
b504430c 1864 hci_req_sync_lock(hdev);
1da177e4 1865
1da177e4
LT
1866 /* Drop queues */
1867 skb_queue_purge(&hdev->rx_q);
1868 skb_queue_purge(&hdev->cmd_q);
1869
76727c02
JH
1870 /* Avoid potential lockdep warnings from the *_flush() calls by
1871 * ensuring the workqueue is empty up front.
1872 */
1873 drain_workqueue(hdev->workqueue);
1874
09fd0de5 1875 hci_dev_lock(hdev);
1f9b9a5d 1876 hci_inquiry_cache_flush(hdev);
1da177e4 1877 hci_conn_hash_flush(hdev);
09fd0de5 1878 hci_dev_unlock(hdev);
1da177e4
LT
1879
1880 if (hdev->flush)
1881 hdev->flush(hdev);
1882
8e87d142 1883 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1884 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1885
4ebeee2d 1886 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1da177e4 1887
b504430c 1888 hci_req_sync_unlock(hdev);
1da177e4
LT
1889 return ret;
1890}
1891
5c912495
MH
1892int hci_dev_reset(__u16 dev)
1893{
1894 struct hci_dev *hdev;
1895 int err;
1896
1897 hdev = hci_dev_get(dev);
1898 if (!hdev)
1899 return -ENODEV;
1900
1901 if (!test_bit(HCI_UP, &hdev->flags)) {
1902 err = -ENETDOWN;
1903 goto done;
1904 }
1905
d7a5a11d 1906 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1907 err = -EBUSY;
1908 goto done;
1909 }
1910
d7a5a11d 1911 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1912 err = -EOPNOTSUPP;
1913 goto done;
1914 }
1915
1916 err = hci_dev_do_reset(hdev);
1917
1918done:
1919 hci_dev_put(hdev);
1920 return err;
1921}
1922
1da177e4
LT
1923int hci_dev_reset_stat(__u16 dev)
1924{
1925 struct hci_dev *hdev;
1926 int ret = 0;
1927
70f23020
AE
1928 hdev = hci_dev_get(dev);
1929 if (!hdev)
1da177e4
LT
1930 return -ENODEV;
1931
d7a5a11d 1932 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1933 ret = -EBUSY;
1934 goto done;
1935 }
1936
d7a5a11d 1937 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1938 ret = -EOPNOTSUPP;
1939 goto done;
1940 }
1941
1da177e4
LT
1942 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1943
0736cfa8 1944done:
1da177e4 1945 hci_dev_put(hdev);
1da177e4
LT
1946 return ret;
1947}
1948
123abc08
JH
1949static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1950{
bc6d2d04 1951 bool conn_changed, discov_changed;
123abc08
JH
1952
1953 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1954
1955 if ((scan & SCAN_PAGE))
238be788
MH
1956 conn_changed = !hci_dev_test_and_set_flag(hdev,
1957 HCI_CONNECTABLE);
123abc08 1958 else
a69d8927
MH
1959 conn_changed = hci_dev_test_and_clear_flag(hdev,
1960 HCI_CONNECTABLE);
123abc08 1961
bc6d2d04 1962 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1963 discov_changed = !hci_dev_test_and_set_flag(hdev,
1964 HCI_DISCOVERABLE);
bc6d2d04 1965 } else {
a358dc11 1966 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1967 discov_changed = hci_dev_test_and_clear_flag(hdev,
1968 HCI_DISCOVERABLE);
bc6d2d04
JH
1969 }
1970
d7a5a11d 1971 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1972 return;
1973
bc6d2d04
JH
1974 if (conn_changed || discov_changed) {
1975 /* In case this was disabled through mgmt */
a1536da2 1976 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1977
d7a5a11d 1978 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
cab054ab 1979 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
bc6d2d04 1980
123abc08 1981 mgmt_new_settings(hdev);
bc6d2d04 1982 }
123abc08
JH
1983}
1984
1da177e4
LT
1985int hci_dev_cmd(unsigned int cmd, void __user *arg)
1986{
1987 struct hci_dev *hdev;
1988 struct hci_dev_req dr;
1989 int err = 0;
1990
1991 if (copy_from_user(&dr, arg, sizeof(dr)))
1992 return -EFAULT;
1993
70f23020
AE
1994 hdev = hci_dev_get(dr.dev_id);
1995 if (!hdev)
1da177e4
LT
1996 return -ENODEV;
1997
d7a5a11d 1998 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1999 err = -EBUSY;
2000 goto done;
2001 }
2002
d7a5a11d 2003 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
2004 err = -EOPNOTSUPP;
2005 goto done;
2006 }
2007
ca8bee5d 2008 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
2009 err = -EOPNOTSUPP;
2010 goto done;
2011 }
2012
d7a5a11d 2013 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
2014 err = -EOPNOTSUPP;
2015 goto done;
2016 }
2017
1da177e4
LT
2018 switch (cmd) {
2019 case HCISETAUTH:
01178cd4 2020 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 2021 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2022 break;
2023
2024 case HCISETENCRYPT:
2025 if (!lmp_encrypt_capable(hdev)) {
2026 err = -EOPNOTSUPP;
2027 break;
2028 }
2029
2030 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2031 /* Auth must be enabled first */
01178cd4 2032 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 2033 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2034 if (err)
2035 break;
2036 }
2037
01178cd4 2038 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
4ebeee2d 2039 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2040 break;
2041
2042 case HCISETSCAN:
01178cd4 2043 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
4ebeee2d 2044 HCI_INIT_TIMEOUT, NULL);
91a668b0 2045
bc6d2d04
JH
2046 /* Ensure that the connectable and discoverable states
2047 * get correctly modified as this was a non-mgmt change.
91a668b0 2048 */
123abc08
JH
2049 if (!err)
2050 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
2051 break;
2052
1da177e4 2053 case HCISETLINKPOL:
01178cd4 2054 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
4ebeee2d 2055 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2056 break;
2057
2058 case HCISETLINKMODE:
e4e8e37c
MH
2059 hdev->link_mode = ((__u16) dr.dev_opt) &
2060 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2061 break;
2062
2063 case HCISETPTYPE:
b7c23df8
JK
2064 if (hdev->pkt_type == (__u16) dr.dev_opt)
2065 break;
2066
e4e8e37c 2067 hdev->pkt_type = (__u16) dr.dev_opt;
b7c23df8 2068 mgmt_phy_configuration_changed(hdev, NULL);
1da177e4
LT
2069 break;
2070
2071 case HCISETACLMTU:
e4e8e37c
MH
2072 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2073 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2074 break;
2075
2076 case HCISETSCOMTU:
e4e8e37c
MH
2077 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2078 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2079 break;
2080
2081 default:
2082 err = -EINVAL;
2083 break;
2084 }
e4e8e37c 2085
0736cfa8 2086done:
1da177e4
LT
2087 hci_dev_put(hdev);
2088 return err;
2089}
2090
2091int hci_get_dev_list(void __user *arg)
2092{
8035ded4 2093 struct hci_dev *hdev;
1da177e4
LT
2094 struct hci_dev_list_req *dl;
2095 struct hci_dev_req *dr;
1da177e4
LT
2096 int n = 0, size, err;
2097 __u16 dev_num;
2098
2099 if (get_user(dev_num, (__u16 __user *) arg))
2100 return -EFAULT;
2101
2102 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2103 return -EINVAL;
2104
2105 size = sizeof(*dl) + dev_num * sizeof(*dr);
2106
70f23020
AE
2107 dl = kzalloc(size, GFP_KERNEL);
2108 if (!dl)
1da177e4
LT
2109 return -ENOMEM;
2110
2111 dr = dl->dev_req;
2112
f20d09d5 2113 read_lock(&hci_dev_list_lock);
8035ded4 2114 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2115 unsigned long flags = hdev->flags;
c542a06c 2116
2e84d8db
MH
2117 /* When the auto-off is configured it means the transport
2118 * is running, but in that case still indicate that the
2119 * device is actually down.
2120 */
d7a5a11d 2121 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 2122 flags &= ~BIT(HCI_UP);
c542a06c 2123
1da177e4 2124 (dr + n)->dev_id = hdev->id;
2e84d8db 2125 (dr + n)->dev_opt = flags;
c542a06c 2126
1da177e4
LT
2127 if (++n >= dev_num)
2128 break;
2129 }
f20d09d5 2130 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2131
2132 dl->dev_num = n;
2133 size = sizeof(*dl) + n * sizeof(*dr);
2134
2135 err = copy_to_user(arg, dl, size);
2136 kfree(dl);
2137
2138 return err ? -EFAULT : 0;
2139}
2140
2141int hci_get_dev_info(void __user *arg)
2142{
2143 struct hci_dev *hdev;
2144 struct hci_dev_info di;
2e84d8db 2145 unsigned long flags;
1da177e4
LT
2146 int err = 0;
2147
2148 if (copy_from_user(&di, arg, sizeof(di)))
2149 return -EFAULT;
2150
70f23020
AE
2151 hdev = hci_dev_get(di.dev_id);
2152 if (!hdev)
1da177e4
LT
2153 return -ENODEV;
2154
2e84d8db
MH
2155 /* When the auto-off is configured it means the transport
2156 * is running, but in that case still indicate that the
2157 * device is actually down.
2158 */
d7a5a11d 2159 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
2160 flags = hdev->flags & ~BIT(HCI_UP);
2161 else
2162 flags = hdev->flags;
c542a06c 2163
1da177e4
LT
2164 strcpy(di.name, hdev->name);
2165 di.bdaddr = hdev->bdaddr;
60f2a3ed 2166 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2167 di.flags = flags;
1da177e4 2168 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2169 if (lmp_bredr_capable(hdev)) {
2170 di.acl_mtu = hdev->acl_mtu;
2171 di.acl_pkts = hdev->acl_pkts;
2172 di.sco_mtu = hdev->sco_mtu;
2173 di.sco_pkts = hdev->sco_pkts;
2174 } else {
2175 di.acl_mtu = hdev->le_mtu;
2176 di.acl_pkts = hdev->le_pkts;
2177 di.sco_mtu = 0;
2178 di.sco_pkts = 0;
2179 }
1da177e4
LT
2180 di.link_policy = hdev->link_policy;
2181 di.link_mode = hdev->link_mode;
2182
2183 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2184 memcpy(&di.features, &hdev->features, sizeof(di.features));
2185
2186 if (copy_to_user(arg, &di, sizeof(di)))
2187 err = -EFAULT;
2188
2189 hci_dev_put(hdev);
2190
2191 return err;
2192}
2193
2194/* ---- Interface to HCI drivers ---- */
2195
611b30f7
MH
2196static int hci_rfkill_set_block(void *data, bool blocked)
2197{
2198 struct hci_dev *hdev = data;
2199
2200 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2201
d7a5a11d 2202 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2203 return -EBUSY;
2204
5e130367 2205 if (blocked) {
a1536da2 2206 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2207 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2208 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2209 hci_dev_do_close(hdev);
5e130367 2210 } else {
a358dc11 2211 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2212 }
611b30f7
MH
2213
2214 return 0;
2215}
2216
2217static const struct rfkill_ops hci_rfkill_ops = {
2218 .set_block = hci_rfkill_set_block,
2219};
2220
ab81cbf9
JH
2221static void hci_power_on(struct work_struct *work)
2222{
2223 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2224 int err;
ab81cbf9
JH
2225
2226 BT_DBG("%s", hdev->name);
2227
2ff13894
JH
2228 if (test_bit(HCI_UP, &hdev->flags) &&
2229 hci_dev_test_flag(hdev, HCI_MGMT) &&
2230 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
d82142a8 2231 cancel_delayed_work(&hdev->power_off);
2ff13894
JH
2232 hci_req_sync_lock(hdev);
2233 err = __hci_req_hci_power_on(hdev);
2234 hci_req_sync_unlock(hdev);
2235 mgmt_power_on(hdev, err);
2236 return;
2237 }
2238
cbed0ca1 2239 err = hci_dev_do_open(hdev);
96570ffc 2240 if (err < 0) {
3ad67582 2241 hci_dev_lock(hdev);
96570ffc 2242 mgmt_set_powered_failed(hdev, err);
3ad67582 2243 hci_dev_unlock(hdev);
ab81cbf9 2244 return;
96570ffc 2245 }
ab81cbf9 2246
a5c8f270
MH
2247 /* During the HCI setup phase, a few error conditions are
2248 * ignored and they need to be checked now. If they are still
2249 * valid, it is important to turn the device back off.
2250 */
d7a5a11d
MH
2251 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2252 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
ca8bee5d 2253 (hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
2254 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2255 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2256 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2257 hci_dev_do_close(hdev);
d7a5a11d 2258 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2259 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2260 HCI_AUTO_OFF_TIMEOUT);
bf543036 2261 }
ab81cbf9 2262
a69d8927 2263 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2264 /* For unconfigured devices, set the HCI_RAW flag
2265 * so that userspace can easily identify them.
4a964404 2266 */
d7a5a11d 2267 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2268 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2269
2270 /* For fully configured devices, this will send
2271 * the Index Added event. For unconfigured devices,
2272 * it will send Unconfigued Index Added event.
2273 *
2274 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2275 * and no event will be send.
2276 */
2277 mgmt_index_added(hdev);
a69d8927 2278 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2279 /* When the controller is now configured, then it
2280 * is important to clear the HCI_RAW flag.
2281 */
d7a5a11d 2282 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2283 clear_bit(HCI_RAW, &hdev->flags);
2284
d603b76b
MH
2285 /* Powering on the controller with HCI_CONFIG set only
2286 * happens with the transition from unconfigured to
2287 * configured. This will send the Index Added event.
2288 */
744cf19e 2289 mgmt_index_added(hdev);
fee746b0 2290 }
ab81cbf9
JH
2291}
2292
2293static void hci_power_off(struct work_struct *work)
2294{
3243553f 2295 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2296 power_off.work);
ab81cbf9
JH
2297
2298 BT_DBG("%s", hdev->name);
2299
8ee56540 2300 hci_dev_do_close(hdev);
ab81cbf9
JH
2301}
2302
c7741d16
MH
2303static void hci_error_reset(struct work_struct *work)
2304{
2305 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2306
2307 BT_DBG("%s", hdev->name);
2308
2309 if (hdev->hw_error)
2310 hdev->hw_error(hdev, hdev->hw_error_code);
2311 else
2064ee33 2312 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
c7741d16
MH
2313
2314 if (hci_dev_do_close(hdev))
2315 return;
2316
c7741d16
MH
2317 hci_dev_do_open(hdev);
2318}
2319
35f7498a 2320void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2321{
4821002c 2322 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2323
4821002c
JH
2324 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2325 list_del(&uuid->list);
2aeb9a1a
JH
2326 kfree(uuid);
2327 }
2aeb9a1a
JH
2328}
2329
35f7498a 2330void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2331{
0378b597 2332 struct link_key *key;
55ed8ca1 2333
d7d41682 2334 list_for_each_entry(key, &hdev->link_keys, list) {
0378b597
JH
2335 list_del_rcu(&key->list);
2336 kfree_rcu(key, rcu);
55ed8ca1 2337 }
55ed8ca1
JH
2338}
2339
35f7498a 2340void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2341{
970d0f1b 2342 struct smp_ltk *k;
b899efaf 2343
d7d41682 2344 list_for_each_entry(k, &hdev->long_term_keys, list) {
970d0f1b
JH
2345 list_del_rcu(&k->list);
2346 kfree_rcu(k, rcu);
b899efaf 2347 }
b899efaf
VCG
2348}
2349
970c4e46
JH
2350void hci_smp_irks_clear(struct hci_dev *hdev)
2351{
adae20cb 2352 struct smp_irk *k;
970c4e46 2353
d7d41682 2354 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
adae20cb
JH
2355 list_del_rcu(&k->list);
2356 kfree_rcu(k, rcu);
970c4e46
JH
2357 }
2358}
2359
600a8749
AM
2360void hci_blocked_keys_clear(struct hci_dev *hdev)
2361{
2362 struct blocked_key *b;
2363
d7d41682 2364 list_for_each_entry(b, &hdev->blocked_keys, list) {
600a8749
AM
2365 list_del_rcu(&b->list);
2366 kfree_rcu(b, rcu);
2367 }
2368}
2369
2370bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2371{
2372 bool blocked = false;
2373 struct blocked_key *b;
2374
2375 rcu_read_lock();
0c2ac7d4 2376 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
600a8749
AM
2377 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2378 blocked = true;
2379 break;
2380 }
2381 }
2382
2383 rcu_read_unlock();
2384 return blocked;
2385}
2386
55ed8ca1
JH
2387struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2388{
8035ded4 2389 struct link_key *k;
55ed8ca1 2390
0378b597
JH
2391 rcu_read_lock();
2392 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2393 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2394 rcu_read_unlock();
600a8749
AM
2395
2396 if (hci_is_blocked_key(hdev,
2397 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2398 k->val)) {
2399 bt_dev_warn_ratelimited(hdev,
2400 "Link key blocked for %pMR",
2401 &k->bdaddr);
2402 return NULL;
2403 }
2404
55ed8ca1 2405 return k;
0378b597
JH
2406 }
2407 }
2408 rcu_read_unlock();
55ed8ca1
JH
2409
2410 return NULL;
2411}
2412
745c0ce3 2413static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2414 u8 key_type, u8 old_key_type)
d25e28ab
JH
2415{
2416 /* Legacy key */
2417 if (key_type < 0x03)
745c0ce3 2418 return true;
d25e28ab
JH
2419
2420 /* Debug keys are insecure so don't store them persistently */
2421 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2422 return false;
d25e28ab
JH
2423
2424 /* Changed combination key and there's no previous one */
2425 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2426 return false;
d25e28ab
JH
2427
2428 /* Security mode 3 case */
2429 if (!conn)
745c0ce3 2430 return true;
d25e28ab 2431
e3befab9
JH
2432 /* BR/EDR key derived using SC from an LE link */
2433 if (conn->type == LE_LINK)
2434 return true;
2435
d25e28ab
JH
2436 /* Neither local nor remote side had no-bonding as requirement */
2437 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2438 return true;
d25e28ab
JH
2439
2440 /* Local side had dedicated bonding as requirement */
2441 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2442 return true;
d25e28ab
JH
2443
2444 /* Remote side had dedicated bonding as requirement */
2445 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2446 return true;
d25e28ab
JH
2447
2448 /* If none of the above criteria match, then don't store the key
2449 * persistently */
745c0ce3 2450 return false;
d25e28ab
JH
2451}
2452
e804d25d 2453static u8 ltk_role(u8 type)
98a0b845 2454{
e804d25d
JH
2455 if (type == SMP_LTK)
2456 return HCI_ROLE_MASTER;
98a0b845 2457
e804d25d 2458 return HCI_ROLE_SLAVE;
98a0b845
JH
2459}
2460
f3a73d97
JH
2461struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2462 u8 addr_type, u8 role)
75d262c2 2463{
c9839a11 2464 struct smp_ltk *k;
75d262c2 2465
970d0f1b
JH
2466 rcu_read_lock();
2467 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2468 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2469 continue;
2470
923e2414 2471 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2472 rcu_read_unlock();
600a8749
AM
2473
2474 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2475 k->val)) {
2476 bt_dev_warn_ratelimited(hdev,
2477 "LTK blocked for %pMR",
2478 &k->bdaddr);
2479 return NULL;
2480 }
2481
75d262c2 2482 return k;
970d0f1b
JH
2483 }
2484 }
2485 rcu_read_unlock();
75d262c2
VCG
2486
2487 return NULL;
2488}
75d262c2 2489
970c4e46
JH
2490struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2491{
600a8749 2492 struct smp_irk *irk_to_return = NULL;
970c4e46
JH
2493 struct smp_irk *irk;
2494
adae20cb
JH
2495 rcu_read_lock();
2496 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2497 if (!bacmp(&irk->rpa, rpa)) {
600a8749
AM
2498 irk_to_return = irk;
2499 goto done;
adae20cb 2500 }
970c4e46
JH
2501 }
2502
adae20cb 2503 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2504 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2505 bacpy(&irk->rpa, rpa);
600a8749
AM
2506 irk_to_return = irk;
2507 goto done;
970c4e46
JH
2508 }
2509 }
600a8749
AM
2510
2511done:
2512 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2513 irk_to_return->val)) {
2514 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2515 &irk_to_return->bdaddr);
2516 irk_to_return = NULL;
2517 }
2518
adae20cb 2519 rcu_read_unlock();
970c4e46 2520
600a8749 2521 return irk_to_return;
970c4e46
JH
2522}
2523
2524struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2525 u8 addr_type)
2526{
600a8749 2527 struct smp_irk *irk_to_return = NULL;
970c4e46
JH
2528 struct smp_irk *irk;
2529
6cfc9988
JH
2530 /* Identity Address must be public or static random */
2531 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2532 return NULL;
2533
adae20cb
JH
2534 rcu_read_lock();
2535 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2536 if (addr_type == irk->addr_type &&
adae20cb 2537 bacmp(bdaddr, &irk->bdaddr) == 0) {
600a8749
AM
2538 irk_to_return = irk;
2539 goto done;
adae20cb 2540 }
970c4e46 2541 }
600a8749
AM
2542
2543done:
2544
2545 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2546 irk_to_return->val)) {
2547 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2548 &irk_to_return->bdaddr);
2549 irk_to_return = NULL;
2550 }
2551
adae20cb 2552 rcu_read_unlock();
970c4e46 2553
600a8749 2554 return irk_to_return;
970c4e46
JH
2555}
2556
567fa2aa 2557struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2558 bdaddr_t *bdaddr, u8 *val, u8 type,
2559 u8 pin_len, bool *persistent)
55ed8ca1
JH
2560{
2561 struct link_key *key, *old_key;
745c0ce3 2562 u8 old_key_type;
55ed8ca1
JH
2563
2564 old_key = hci_find_link_key(hdev, bdaddr);
2565 if (old_key) {
2566 old_key_type = old_key->type;
2567 key = old_key;
2568 } else {
12adcf3a 2569 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2570 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2571 if (!key)
567fa2aa 2572 return NULL;
0378b597 2573 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2574 }
2575
6ed93dc6 2576 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2577
d25e28ab
JH
2578 /* Some buggy controller combinations generate a changed
2579 * combination key for legacy pairing even when there's no
2580 * previous key */
2581 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2582 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2583 type = HCI_LK_COMBINATION;
655fe6ec
JH
2584 if (conn)
2585 conn->key_type = type;
2586 }
d25e28ab 2587
55ed8ca1 2588 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2589 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2590 key->pin_len = pin_len;
2591
b6020ba0 2592 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2593 key->type = old_key_type;
4748fed2
JH
2594 else
2595 key->type = type;
2596
7652ff6a
JH
2597 if (persistent)
2598 *persistent = hci_persistent_key(hdev, conn, type,
2599 old_key_type);
4df378a1 2600
567fa2aa 2601 return key;
55ed8ca1
JH
2602}
2603
ca9142b8 2604struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2605 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2606 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2607{
c9839a11 2608 struct smp_ltk *key, *old_key;
e804d25d 2609 u8 role = ltk_role(type);
75d262c2 2610
f3a73d97 2611 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2612 if (old_key)
75d262c2 2613 key = old_key;
c9839a11 2614 else {
0a14ab41 2615 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2616 if (!key)
ca9142b8 2617 return NULL;
970d0f1b 2618 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2619 }
2620
75d262c2 2621 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2622 key->bdaddr_type = addr_type;
2623 memcpy(key->val, tk, sizeof(key->val));
2624 key->authenticated = authenticated;
2625 key->ediv = ediv;
fe39c7b2 2626 key->rand = rand;
c9839a11
VCG
2627 key->enc_size = enc_size;
2628 key->type = type;
75d262c2 2629
ca9142b8 2630 return key;
75d262c2
VCG
2631}
2632
ca9142b8
JH
2633struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2634 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2635{
2636 struct smp_irk *irk;
2637
2638 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2639 if (!irk) {
2640 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2641 if (!irk)
ca9142b8 2642 return NULL;
970c4e46
JH
2643
2644 bacpy(&irk->bdaddr, bdaddr);
2645 irk->addr_type = addr_type;
2646
adae20cb 2647 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2648 }
2649
2650 memcpy(irk->val, val, 16);
2651 bacpy(&irk->rpa, rpa);
2652
ca9142b8 2653 return irk;
970c4e46
JH
2654}
2655
55ed8ca1
JH
2656int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2657{
2658 struct link_key *key;
2659
2660 key = hci_find_link_key(hdev, bdaddr);
2661 if (!key)
2662 return -ENOENT;
2663
6ed93dc6 2664 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2665
0378b597
JH
2666 list_del_rcu(&key->list);
2667 kfree_rcu(key, rcu);
55ed8ca1
JH
2668
2669 return 0;
2670}
2671
e0b2b27e 2672int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2673{
970d0f1b 2674 struct smp_ltk *k;
c51ffa0b 2675 int removed = 0;
b899efaf 2676
970d0f1b 2677 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2678 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2679 continue;
2680
6ed93dc6 2681 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2682
970d0f1b
JH
2683 list_del_rcu(&k->list);
2684 kfree_rcu(k, rcu);
c51ffa0b 2685 removed++;
b899efaf
VCG
2686 }
2687
c51ffa0b 2688 return removed ? 0 : -ENOENT;
b899efaf
VCG
2689}
2690
a7ec7338
JH
2691void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2692{
adae20cb 2693 struct smp_irk *k;
a7ec7338 2694
adae20cb 2695 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2696 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2697 continue;
2698
2699 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2700
adae20cb
JH
2701 list_del_rcu(&k->list);
2702 kfree_rcu(k, rcu);
a7ec7338
JH
2703 }
2704}
2705
55e76b38
JH
2706bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2707{
2708 struct smp_ltk *k;
4ba9faf3 2709 struct smp_irk *irk;
55e76b38
JH
2710 u8 addr_type;
2711
2712 if (type == BDADDR_BREDR) {
2713 if (hci_find_link_key(hdev, bdaddr))
2714 return true;
2715 return false;
2716 }
2717
2718 /* Convert to HCI addr type which struct smp_ltk uses */
2719 if (type == BDADDR_LE_PUBLIC)
2720 addr_type = ADDR_LE_DEV_PUBLIC;
2721 else
2722 addr_type = ADDR_LE_DEV_RANDOM;
2723
4ba9faf3
JH
2724 irk = hci_get_irk(hdev, bdaddr, addr_type);
2725 if (irk) {
2726 bdaddr = &irk->bdaddr;
2727 addr_type = irk->addr_type;
2728 }
2729
55e76b38
JH
2730 rcu_read_lock();
2731 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2732 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2733 rcu_read_unlock();
55e76b38 2734 return true;
87c8b28d 2735 }
55e76b38
JH
2736 }
2737 rcu_read_unlock();
2738
2739 return false;
2740}
2741
6bd32326 2742/* HCI command timer function */
65cc2b49 2743static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2744{
65cc2b49
MH
2745 struct hci_dev *hdev = container_of(work, struct hci_dev,
2746 cmd_timer.work);
6bd32326 2747
bda4f23a
AE
2748 if (hdev->sent_cmd) {
2749 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2750 u16 opcode = __le16_to_cpu(sent->opcode);
2751
2064ee33 2752 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
bda4f23a 2753 } else {
2064ee33 2754 bt_dev_err(hdev, "command tx timeout");
bda4f23a
AE
2755 }
2756
e2bef384
RJ
2757 if (hdev->cmd_timeout)
2758 hdev->cmd_timeout(hdev);
2759
6bd32326 2760 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2761 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2762}
2763
2763eda6 2764struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2765 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2766{
2767 struct oob_data *data;
2768
6928a924
JH
2769 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2770 if (bacmp(bdaddr, &data->bdaddr) != 0)
2771 continue;
2772 if (data->bdaddr_type != bdaddr_type)
2773 continue;
2774 return data;
2775 }
2763eda6
SJ
2776
2777 return NULL;
2778}
2779
6928a924
JH
2780int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2781 u8 bdaddr_type)
2763eda6
SJ
2782{
2783 struct oob_data *data;
2784
6928a924 2785 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2786 if (!data)
2787 return -ENOENT;
2788
6928a924 2789 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2790
2791 list_del(&data->list);
2792 kfree(data);
2793
2794 return 0;
2795}
2796
35f7498a 2797void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2798{
2799 struct oob_data *data, *n;
2800
2801 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2802 list_del(&data->list);
2803 kfree(data);
2804 }
2763eda6
SJ
2805}
2806
0798872e 2807int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2808 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2809 u8 *hash256, u8 *rand256)
2763eda6
SJ
2810{
2811 struct oob_data *data;
2812
6928a924 2813 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2814 if (!data) {
0a14ab41 2815 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2816 if (!data)
2817 return -ENOMEM;
2818
2819 bacpy(&data->bdaddr, bdaddr);
6928a924 2820 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2821 list_add(&data->list, &hdev->remote_oob_data);
2822 }
2823
81328d5c
JH
2824 if (hash192 && rand192) {
2825 memcpy(data->hash192, hash192, sizeof(data->hash192));
2826 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2827 if (hash256 && rand256)
2828 data->present = 0x03;
81328d5c
JH
2829 } else {
2830 memset(data->hash192, 0, sizeof(data->hash192));
2831 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2832 if (hash256 && rand256)
2833 data->present = 0x02;
2834 else
2835 data->present = 0x00;
0798872e
MH
2836 }
2837
81328d5c
JH
2838 if (hash256 && rand256) {
2839 memcpy(data->hash256, hash256, sizeof(data->hash256));
2840 memcpy(data->rand256, rand256, sizeof(data->rand256));
2841 } else {
2842 memset(data->hash256, 0, sizeof(data->hash256));
2843 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2844 if (hash192 && rand192)
2845 data->present = 0x01;
81328d5c 2846 }
0798872e 2847
6ed93dc6 2848 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2849
2850 return 0;
2851}
2852
d2609b34
FG
2853/* This function requires the caller holds hdev->lock */
2854struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2855{
2856 struct adv_info *adv_instance;
2857
2858 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2859 if (adv_instance->instance == instance)
2860 return adv_instance;
2861 }
2862
2863 return NULL;
2864}
2865
2866/* This function requires the caller holds hdev->lock */
74b93e9f
PK
2867struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2868{
d2609b34
FG
2869 struct adv_info *cur_instance;
2870
2871 cur_instance = hci_find_adv_instance(hdev, instance);
2872 if (!cur_instance)
2873 return NULL;
2874
2875 if (cur_instance == list_last_entry(&hdev->adv_instances,
2876 struct adv_info, list))
2877 return list_first_entry(&hdev->adv_instances,
2878 struct adv_info, list);
2879 else
2880 return list_next_entry(cur_instance, list);
2881}
2882
2883/* This function requires the caller holds hdev->lock */
2884int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2885{
2886 struct adv_info *adv_instance;
2887
2888 adv_instance = hci_find_adv_instance(hdev, instance);
2889 if (!adv_instance)
2890 return -ENOENT;
2891
2892 BT_DBG("%s removing %dMR", hdev->name, instance);
2893
cab054ab
JH
2894 if (hdev->cur_adv_instance == instance) {
2895 if (hdev->adv_instance_timeout) {
2896 cancel_delayed_work(&hdev->adv_instance_expire);
2897 hdev->adv_instance_timeout = 0;
2898 }
2899 hdev->cur_adv_instance = 0x00;
5d900e46
FG
2900 }
2901
a73c046a
JK
2902 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2903
d2609b34
FG
2904 list_del(&adv_instance->list);
2905 kfree(adv_instance);
2906
2907 hdev->adv_instance_cnt--;
2908
2909 return 0;
2910}
2911
a73c046a
JK
2912void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2913{
2914 struct adv_info *adv_instance, *n;
2915
2916 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2917 adv_instance->rpa_expired = rpa_expired;
2918}
2919
d2609b34
FG
2920/* This function requires the caller holds hdev->lock */
2921void hci_adv_instances_clear(struct hci_dev *hdev)
2922{
2923 struct adv_info *adv_instance, *n;
2924
5d900e46
FG
2925 if (hdev->adv_instance_timeout) {
2926 cancel_delayed_work(&hdev->adv_instance_expire);
2927 hdev->adv_instance_timeout = 0;
2928 }
2929
d2609b34 2930 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
a73c046a 2931 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
d2609b34
FG
2932 list_del(&adv_instance->list);
2933 kfree(adv_instance);
2934 }
2935
2936 hdev->adv_instance_cnt = 0;
cab054ab 2937 hdev->cur_adv_instance = 0x00;
d2609b34
FG
2938}
2939
a73c046a
JK
2940static void adv_instance_rpa_expired(struct work_struct *work)
2941{
2942 struct adv_info *adv_instance = container_of(work, struct adv_info,
2943 rpa_expired_cb.work);
2944
2945 BT_DBG("");
2946
2947 adv_instance->rpa_expired = true;
2948}
2949
d2609b34
FG
2950/* This function requires the caller holds hdev->lock */
2951int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2952 u16 adv_data_len, u8 *adv_data,
2953 u16 scan_rsp_len, u8 *scan_rsp_data,
2954 u16 timeout, u16 duration)
2955{
2956 struct adv_info *adv_instance;
2957
2958 adv_instance = hci_find_adv_instance(hdev, instance);
2959 if (adv_instance) {
2960 memset(adv_instance->adv_data, 0,
2961 sizeof(adv_instance->adv_data));
2962 memset(adv_instance->scan_rsp_data, 0,
2963 sizeof(adv_instance->scan_rsp_data));
2964 } else {
1d0fac2c 2965 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
d2609b34
FG
2966 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2967 return -EOVERFLOW;
2968
39ecfad6 2969 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2970 if (!adv_instance)
2971 return -ENOMEM;
2972
fffd38bc 2973 adv_instance->pending = true;
d2609b34
FG
2974 adv_instance->instance = instance;
2975 list_add(&adv_instance->list, &hdev->adv_instances);
2976 hdev->adv_instance_cnt++;
2977 }
2978
2979 adv_instance->flags = flags;
2980 adv_instance->adv_data_len = adv_data_len;
2981 adv_instance->scan_rsp_len = scan_rsp_len;
2982
2983 if (adv_data_len)
2984 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2985
2986 if (scan_rsp_len)
2987 memcpy(adv_instance->scan_rsp_data,
2988 scan_rsp_data, scan_rsp_len);
2989
2990 adv_instance->timeout = timeout;
5d900e46 2991 adv_instance->remaining_time = timeout;
d2609b34
FG
2992
2993 if (duration == 0)
10873f99 2994 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
d2609b34
FG
2995 else
2996 adv_instance->duration = duration;
2997
de181e88
JK
2998 adv_instance->tx_power = HCI_TX_POWER_INVALID;
2999
a73c046a
JK
3000 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3001 adv_instance_rpa_expired);
3002
d2609b34
FG
3003 BT_DBG("%s for %dMR", hdev->name, instance);
3004
3005 return 0;
3006}
3007
e5e1e7fd
MC
3008/* This function requires the caller holds hdev->lock */
3009void hci_adv_monitors_clear(struct hci_dev *hdev)
3010{
b139553d
MC
3011 struct adv_monitor *monitor;
3012 int handle;
3013
3014 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3015 hci_free_adv_monitor(monitor);
3016
e5e1e7fd
MC
3017 idr_destroy(&hdev->adv_monitors_idr);
3018}
3019
b139553d
MC
3020void hci_free_adv_monitor(struct adv_monitor *monitor)
3021{
3022 struct adv_pattern *pattern;
3023 struct adv_pattern *tmp;
3024
3025 if (!monitor)
3026 return;
3027
3028 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list)
3029 kfree(pattern);
3030
3031 kfree(monitor);
3032}
3033
3034/* This function requires the caller holds hdev->lock */
3035int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3036{
3037 int min, max, handle;
3038
3039 if (!monitor)
3040 return -EINVAL;
3041
3042 min = HCI_MIN_ADV_MONITOR_HANDLE;
3043 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3044 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3045 GFP_KERNEL);
3046 if (handle < 0)
3047 return handle;
3048
3049 hdev->adv_monitors_cnt++;
3050 monitor->handle = handle;
8208f5a9
MC
3051
3052 hci_update_background_scan(hdev);
3053
b139553d
MC
3054 return 0;
3055}
3056
bd2fbc6c
MC
3057static int free_adv_monitor(int id, void *ptr, void *data)
3058{
3059 struct hci_dev *hdev = data;
3060 struct adv_monitor *monitor = ptr;
3061
3062 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3063 hci_free_adv_monitor(monitor);
3064
3065 return 0;
3066}
3067
3068/* This function requires the caller holds hdev->lock */
3069int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle)
3070{
3071 struct adv_monitor *monitor;
3072
3073 if (handle) {
3074 monitor = idr_find(&hdev->adv_monitors_idr, handle);
3075 if (!monitor)
3076 return -ENOENT;
3077
3078 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3079 hci_free_adv_monitor(monitor);
3080 } else {
3081 /* Remove all monitors if handle is 0. */
3082 idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev);
3083 }
3084
8208f5a9
MC
3085 hci_update_background_scan(hdev);
3086
bd2fbc6c
MC
3087 return 0;
3088}
3089
8208f5a9
MC
3090/* This function requires the caller holds hdev->lock */
3091bool hci_is_adv_monitoring(struct hci_dev *hdev)
3092{
3093 return !idr_is_empty(&hdev->adv_monitors_idr);
3094}
3095
dcc36c16 3096struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 3097 bdaddr_t *bdaddr, u8 type)
b2a66aad 3098{
8035ded4 3099 struct bdaddr_list *b;
b2a66aad 3100
dcc36c16 3101 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 3102 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3103 return b;
b9ee0a78 3104 }
b2a66aad
AJ
3105
3106 return NULL;
3107}
3108
b950aa88
AN
3109struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3110 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3111 u8 type)
3112{
3113 struct bdaddr_list_with_irk *b;
3114
3115 list_for_each_entry(b, bdaddr_list, list) {
3116 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3117 return b;
3118 }
3119
3120 return NULL;
3121}
3122
8baaa403
APS
3123struct bdaddr_list_with_flags *
3124hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3125 bdaddr_t *bdaddr, u8 type)
3126{
3127 struct bdaddr_list_with_flags *b;
3128
3129 list_for_each_entry(b, bdaddr_list, list) {
3130 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3131 return b;
3132 }
3133
3134 return NULL;
3135}
3136
dcc36c16 3137void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad 3138{
7eb7404f 3139 struct bdaddr_list *b, *n;
b2a66aad 3140
7eb7404f
GT
3141 list_for_each_entry_safe(b, n, bdaddr_list, list) {
3142 list_del(&b->list);
b2a66aad
AJ
3143 kfree(b);
3144 }
b2a66aad
AJ
3145}
3146
dcc36c16 3147int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3148{
3149 struct bdaddr_list *entry;
b2a66aad 3150
b9ee0a78 3151 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3152 return -EBADF;
3153
dcc36c16 3154 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 3155 return -EEXIST;
b2a66aad 3156
27f70f3e 3157 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
3158 if (!entry)
3159 return -ENOMEM;
b2a66aad
AJ
3160
3161 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3162 entry->bdaddr_type = type;
b2a66aad 3163
dcc36c16 3164 list_add(&entry->list, list);
b2a66aad 3165
2a8357f2 3166 return 0;
b2a66aad
AJ
3167}
3168
b950aa88
AN
3169int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3170 u8 type, u8 *peer_irk, u8 *local_irk)
3171{
3172 struct bdaddr_list_with_irk *entry;
3173
3174 if (!bacmp(bdaddr, BDADDR_ANY))
3175 return -EBADF;
3176
3177 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3178 return -EEXIST;
3179
3180 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3181 if (!entry)
3182 return -ENOMEM;
3183
3184 bacpy(&entry->bdaddr, bdaddr);
3185 entry->bdaddr_type = type;
3186
3187 if (peer_irk)
3188 memcpy(entry->peer_irk, peer_irk, 16);
3189
3190 if (local_irk)
3191 memcpy(entry->local_irk, local_irk, 16);
3192
3193 list_add(&entry->list, list);
3194
3195 return 0;
3196}
3197
8baaa403
APS
3198int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3199 u8 type, u32 flags)
3200{
3201 struct bdaddr_list_with_flags *entry;
3202
3203 if (!bacmp(bdaddr, BDADDR_ANY))
3204 return -EBADF;
3205
3206 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3207 return -EEXIST;
3208
3209 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3210 if (!entry)
3211 return -ENOMEM;
3212
3213 bacpy(&entry->bdaddr, bdaddr);
3214 entry->bdaddr_type = type;
3215 entry->current_flags = flags;
3216
3217 list_add(&entry->list, list);
3218
3219 return 0;
3220}
3221
dcc36c16 3222int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3223{
3224 struct bdaddr_list *entry;
b2a66aad 3225
35f7498a 3226 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 3227 hci_bdaddr_list_clear(list);
35f7498a
JH
3228 return 0;
3229 }
b2a66aad 3230
dcc36c16 3231 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
3232 if (!entry)
3233 return -ENOENT;
3234
3235 list_del(&entry->list);
3236 kfree(entry);
3237
3238 return 0;
3239}
3240
b950aa88
AN
3241int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3242 u8 type)
3243{
3244 struct bdaddr_list_with_irk *entry;
3245
3246 if (!bacmp(bdaddr, BDADDR_ANY)) {
3247 hci_bdaddr_list_clear(list);
3248 return 0;
3249 }
3250
3251 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3252 if (!entry)
3253 return -ENOENT;
3254
3255 list_del(&entry->list);
3256 kfree(entry);
3257
3258 return 0;
3259}
3260
8baaa403
APS
3261int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3262 u8 type)
3263{
3264 struct bdaddr_list_with_flags *entry;
3265
3266 if (!bacmp(bdaddr, BDADDR_ANY)) {
3267 hci_bdaddr_list_clear(list);
3268 return 0;
3269 }
3270
3271 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3272 if (!entry)
3273 return -ENOENT;
3274
3275 list_del(&entry->list);
3276 kfree(entry);
3277
3278 return 0;
3279}
3280
15819a70
AG
3281/* This function requires the caller holds hdev->lock */
3282struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3283 bdaddr_t *addr, u8 addr_type)
3284{
3285 struct hci_conn_params *params;
3286
3287 list_for_each_entry(params, &hdev->le_conn_params, list) {
3288 if (bacmp(&params->addr, addr) == 0 &&
3289 params->addr_type == addr_type) {
3290 return params;
3291 }
3292 }
3293
3294 return NULL;
3295}
3296
4b10966f 3297/* This function requires the caller holds hdev->lock */
501f8827
JH
3298struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3299 bdaddr_t *addr, u8 addr_type)
a9b0a04c 3300{
912b42ef 3301 struct hci_conn_params *param;
a9b0a04c 3302
6540351e
MH
3303 switch (addr_type) {
3304 case ADDR_LE_DEV_PUBLIC_RESOLVED:
3305 addr_type = ADDR_LE_DEV_PUBLIC;
3306 break;
3307 case ADDR_LE_DEV_RANDOM_RESOLVED:
3308 addr_type = ADDR_LE_DEV_RANDOM;
3309 break;
3310 }
3311
501f8827 3312 list_for_each_entry(param, list, action) {
912b42ef
JH
3313 if (bacmp(&param->addr, addr) == 0 &&
3314 param->addr_type == addr_type)
3315 return param;
4b10966f
MH
3316 }
3317
3318 return NULL;
a9b0a04c
AG
3319}
3320
15819a70 3321/* This function requires the caller holds hdev->lock */
51d167c0
MH
3322struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3323 bdaddr_t *addr, u8 addr_type)
15819a70
AG
3324{
3325 struct hci_conn_params *params;
3326
3327 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 3328 if (params)
51d167c0 3329 return params;
15819a70
AG
3330
3331 params = kzalloc(sizeof(*params), GFP_KERNEL);
3332 if (!params) {
2064ee33 3333 bt_dev_err(hdev, "out of memory");
51d167c0 3334 return NULL;
15819a70
AG
3335 }
3336
3337 bacpy(&params->addr, addr);
3338 params->addr_type = addr_type;
cef952ce
AG
3339
3340 list_add(&params->list, &hdev->le_conn_params);
93450c75 3341 INIT_LIST_HEAD(&params->action);
cef952ce 3342
bf5b3c8b
MH
3343 params->conn_min_interval = hdev->le_conn_min_interval;
3344 params->conn_max_interval = hdev->le_conn_max_interval;
3345 params->conn_latency = hdev->le_conn_latency;
3346 params->supervision_timeout = hdev->le_supv_timeout;
3347 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3348
3349 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3350
51d167c0 3351 return params;
bf5b3c8b
MH
3352}
3353
f6c63249 3354static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 3355{
f8aaf9b6 3356 if (params->conn) {
f161dd41 3357 hci_conn_drop(params->conn);
f8aaf9b6
JH
3358 hci_conn_put(params->conn);
3359 }
f161dd41 3360
95305baa 3361 list_del(&params->action);
15819a70
AG
3362 list_del(&params->list);
3363 kfree(params);
f6c63249
JH
3364}
3365
3366/* This function requires the caller holds hdev->lock */
3367void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3368{
3369 struct hci_conn_params *params;
3370
3371 params = hci_conn_params_lookup(hdev, addr, addr_type);
3372 if (!params)
3373 return;
3374
3375 hci_conn_params_free(params);
15819a70 3376
95305baa
JH
3377 hci_update_background_scan(hdev);
3378
15819a70
AG
3379 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3380}
3381
3382/* This function requires the caller holds hdev->lock */
55af49a8 3383void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3384{
3385 struct hci_conn_params *params, *tmp;
3386
3387 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3388 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3389 continue;
f75113a2
JP
3390
3391 /* If trying to estabilish one time connection to disabled
3392 * device, leave the params, but mark them as just once.
3393 */
3394 if (params->explicit_connect) {
3395 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3396 continue;
3397 }
3398
15819a70
AG
3399 list_del(&params->list);
3400 kfree(params);
3401 }
3402
55af49a8 3403 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3404}
3405
3406/* This function requires the caller holds hdev->lock */
030e7f81 3407static void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3408{
15819a70 3409 struct hci_conn_params *params, *tmp;
77a77a30 3410
f6c63249
JH
3411 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3412 hci_conn_params_free(params);
77a77a30 3413
15819a70 3414 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3415}
3416
a1f4c318
JH
3417/* Copy the Identity Address of the controller.
3418 *
3419 * If the controller has a public BD_ADDR, then by default use that one.
3420 * If this is a LE only controller without a public address, default to
3421 * the static random address.
3422 *
3423 * For debugging purposes it is possible to force controllers with a
3424 * public address to use the static random address instead.
50b5b952
MH
3425 *
3426 * In case BR/EDR has been disabled on a dual-mode controller and
3427 * userspace has configured a static address, then that address
3428 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
3429 */
3430void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3431 u8 *bdaddr_type)
3432{
b7cb93e5 3433 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 3434 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 3435 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 3436 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
3437 bacpy(bdaddr, &hdev->static_addr);
3438 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3439 } else {
3440 bacpy(bdaddr, &hdev->bdaddr);
3441 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3442 }
3443}
3444
0e995280
APS
3445static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3446{
3447 int i;
3448
3449 for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3450 clear_bit(i, hdev->suspend_tasks);
3451
3452 wake_up(&hdev->suspend_wait_q);
3453}
3454
9952d90e
APS
3455static int hci_suspend_wait_event(struct hci_dev *hdev)
3456{
3457#define WAKE_COND \
3458 (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
3459 __SUSPEND_NUM_TASKS)
3460
3461 int i;
3462 int ret = wait_event_timeout(hdev->suspend_wait_q,
3463 WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3464
3465 if (ret == 0) {
a9ec8423 3466 bt_dev_err(hdev, "Timed out waiting for suspend events");
9952d90e
APS
3467 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3468 if (test_bit(i, hdev->suspend_tasks))
a9ec8423 3469 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
9952d90e
APS
3470 clear_bit(i, hdev->suspend_tasks);
3471 }
3472
3473 ret = -ETIMEDOUT;
3474 } else {
3475 ret = 0;
3476 }
3477
3478 return ret;
3479}
3480
3481static void hci_prepare_suspend(struct work_struct *work)
3482{
3483 struct hci_dev *hdev =
3484 container_of(work, struct hci_dev, suspend_prepare);
3485
3486 hci_dev_lock(hdev);
3487 hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3488 hci_dev_unlock(hdev);
3489}
3490
8731840a
APS
3491static int hci_change_suspend_state(struct hci_dev *hdev,
3492 enum suspended_state next)
3493{
3494 hdev->suspend_state_next = next;
3495 set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3496 queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3497 return hci_suspend_wait_event(hdev);
3498}
3499
9952d90e
APS
3500static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3501 void *data)
3502{
3503 struct hci_dev *hdev =
3504 container_of(nb, struct hci_dev, suspend_notifier);
3505 int ret = 0;
3506
3507 /* If powering down, wait for completion. */
3508 if (mgmt_powering_down(hdev)) {
3509 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3510 ret = hci_suspend_wait_event(hdev);
3511 if (ret)
3512 goto done;
3513 }
3514
3515 /* Suspend notifier should only act on events when powered. */
3516 if (!hdev_is_powered(hdev))
3517 goto done;
3518
3519 if (action == PM_SUSPEND_PREPARE) {
4f40afc6
APS
3520 /* Suspend consists of two actions:
3521 * - First, disconnect everything and make the controller not
3522 * connectable (disabling scanning)
3523 * - Second, program event filter/whitelist and enable scan
3524 */
8731840a 3525 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
4f40afc6 3526
81dafad5
APS
3527 /* Only configure whitelist if disconnect succeeded and wake
3528 * isn't being prevented.
3529 */
3530 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev)))
8731840a 3531 ret = hci_change_suspend_state(hdev,
0d2c9825 3532 BT_SUSPEND_CONFIGURE_WAKE);
9952d90e 3533 } else if (action == PM_POST_SUSPEND) {
8731840a 3534 ret = hci_change_suspend_state(hdev, BT_RUNNING);
9952d90e
APS
3535 }
3536
3537done:
a9ec8423
APS
3538 /* We always allow suspend even if suspend preparation failed and
3539 * attempt to recover in resume.
3540 */
3541 if (ret)
3542 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3543 action, ret);
3544
24b06572 3545 return NOTIFY_DONE;
9952d90e 3546}
8731840a 3547
9be0dab7
DH
3548/* Alloc HCI device */
3549struct hci_dev *hci_alloc_dev(void)
3550{
3551 struct hci_dev *hdev;
3552
27f70f3e 3553 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3554 if (!hdev)
3555 return NULL;
3556
b1b813d4
DH
3557 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3558 hdev->esco_type = (ESCO_HV1);
3559 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3560 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3561 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3562 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3563 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3564 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
3565 hdev->adv_instance_cnt = 0;
3566 hdev->cur_adv_instance = 0x00;
5d900e46 3567 hdev->adv_instance_timeout = 0;
b1b813d4 3568
b1b813d4
DH
3569 hdev->sniff_max_interval = 800;
3570 hdev->sniff_min_interval = 80;
3571
3f959d46 3572 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3573 hdev->le_adv_min_interval = 0x0800;
3574 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3575 hdev->le_scan_interval = 0x0060;
3576 hdev->le_scan_window = 0x0030;
10873f99
AM
3577 hdev->le_scan_int_suspend = 0x0400;
3578 hdev->le_scan_window_suspend = 0x0012;
3579 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3580 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3581 hdev->le_scan_int_connect = 0x0060;
3582 hdev->le_scan_window_connect = 0x0060;
b48c3b59
JH
3583 hdev->le_conn_min_interval = 0x0018;
3584 hdev->le_conn_max_interval = 0x0028;
04fb7d90
MH
3585 hdev->le_conn_latency = 0x0000;
3586 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3587 hdev->le_def_tx_len = 0x001b;
3588 hdev->le_def_tx_time = 0x0148;
3589 hdev->le_max_tx_len = 0x001b;
3590 hdev->le_max_tx_time = 0x0148;
3591 hdev->le_max_rx_len = 0x001b;
3592 hdev->le_max_rx_time = 0x0148;
30d65e08
MK
3593 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3594 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
6decb5b4
JK
3595 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3596 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
1d0fac2c 3597 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
10873f99 3598 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
49b020c1 3599 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
bef64738 3600
d6bfd59c 3601 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3602 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3603 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3604 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
302975cb 3605 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
58a96fc3 3606 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
d6bfd59c 3607
10873f99
AM
3608 /* default 1.28 sec page scan */
3609 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3610 hdev->def_page_scan_int = 0x0800;
3611 hdev->def_page_scan_window = 0x0012;
3612
b1b813d4
DH
3613 mutex_init(&hdev->lock);
3614 mutex_init(&hdev->req_lock);
3615
3616 INIT_LIST_HEAD(&hdev->mgmt_pending);
3617 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3618 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3619 INIT_LIST_HEAD(&hdev->uuids);
3620 INIT_LIST_HEAD(&hdev->link_keys);
3621 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3622 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3623 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3624 INIT_LIST_HEAD(&hdev->le_white_list);
cfdb0c2d 3625 INIT_LIST_HEAD(&hdev->le_resolv_list);
15819a70 3626 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3627 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3628 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3629 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3630 INIT_LIST_HEAD(&hdev->adv_instances);
600a8749 3631 INIT_LIST_HEAD(&hdev->blocked_keys);
b1b813d4
DH
3632
3633 INIT_WORK(&hdev->rx_work, hci_rx_work);
3634 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3635 INIT_WORK(&hdev->tx_work, hci_tx_work);
3636 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3637 INIT_WORK(&hdev->error_reset, hci_error_reset);
9952d90e 3638 INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
b1b813d4 3639
b1b813d4 3640 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
b1b813d4 3641
b1b813d4
DH
3642 skb_queue_head_init(&hdev->rx_q);
3643 skb_queue_head_init(&hdev->cmd_q);
3644 skb_queue_head_init(&hdev->raw_q);
3645
3646 init_waitqueue_head(&hdev->req_wait_q);
9952d90e 3647 init_waitqueue_head(&hdev->suspend_wait_q);
b1b813d4 3648
65cc2b49 3649 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3650
5fc16cc4
JH
3651 hci_request_setup(hdev);
3652
b1b813d4
DH
3653 hci_init_sysfs(hdev);
3654 discovery_init(hdev);
9be0dab7
DH
3655
3656 return hdev;
3657}
3658EXPORT_SYMBOL(hci_alloc_dev);
3659
3660/* Free HCI device */
3661void hci_free_dev(struct hci_dev *hdev)
3662{
9be0dab7
DH
3663 /* will free via device release */
3664 put_device(&hdev->dev);
3665}
3666EXPORT_SYMBOL(hci_free_dev);
3667
1da177e4
LT
3668/* Register HCI device */
3669int hci_register_dev(struct hci_dev *hdev)
3670{
b1b813d4 3671 int id, error;
1da177e4 3672
74292d5a 3673 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3674 return -EINVAL;
3675
08add513
MM
3676 /* Do not allow HCI_AMP devices to register at index 0,
3677 * so the index can be used as the AMP controller ID.
3678 */
3df92b31 3679 switch (hdev->dev_type) {
ca8bee5d 3680 case HCI_PRIMARY:
3df92b31
SL
3681 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3682 break;
3683 case HCI_AMP:
3684 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3685 break;
3686 default:
3687 return -EINVAL;
1da177e4 3688 }
8e87d142 3689
3df92b31
SL
3690 if (id < 0)
3691 return id;
3692
1da177e4
LT
3693 sprintf(hdev->name, "hci%d", id);
3694 hdev->id = id;
2d8b3a11
AE
3695
3696 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3697
29e2dd0d 3698 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
33ca954d
DH
3699 if (!hdev->workqueue) {
3700 error = -ENOMEM;
3701 goto err;
3702 }
f48fd9c8 3703
29e2dd0d
TH
3704 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3705 hdev->name);
6ead1bbc
JH
3706 if (!hdev->req_workqueue) {
3707 destroy_workqueue(hdev->workqueue);
3708 error = -ENOMEM;
3709 goto err;
3710 }
3711
0153e2ec
MH
3712 if (!IS_ERR_OR_NULL(bt_debugfs))
3713 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3714
bdc3e0f1
MH
3715 dev_set_name(&hdev->dev, "%s", hdev->name);
3716
3717 error = device_add(&hdev->dev);
33ca954d 3718 if (error < 0)
54506918 3719 goto err_wqueue;
1da177e4 3720
6d5d2ee6
HK
3721 hci_leds_init(hdev);
3722
611b30f7 3723 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3724 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3725 hdev);
611b30f7
MH
3726 if (hdev->rfkill) {
3727 if (rfkill_register(hdev->rfkill) < 0) {
3728 rfkill_destroy(hdev->rfkill);
3729 hdev->rfkill = NULL;
3730 }
3731 }
3732
5e130367 3733 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3734 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3735
a1536da2
MH
3736 hci_dev_set_flag(hdev, HCI_SETUP);
3737 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3738
ca8bee5d 3739 if (hdev->dev_type == HCI_PRIMARY) {
56f87901
JH
3740 /* Assume BR/EDR support until proven otherwise (such as
3741 * through reading supported features during init.
3742 */
a1536da2 3743 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3744 }
ce2be9ac 3745
fcee3377
GP
3746 write_lock(&hci_dev_list_lock);
3747 list_add(&hdev->list, &hci_dev_list);
3748 write_unlock(&hci_dev_list_lock);
3749
4a964404
MH
3750 /* Devices that are marked for raw-only usage are unconfigured
3751 * and should not be included in normal operation.
fee746b0
MH
3752 */
3753 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3754 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3755
05fcd4c4 3756 hci_sock_dev_event(hdev, HCI_DEV_REG);
dc946bd8 3757 hci_dev_hold(hdev);
1da177e4 3758
9952d90e
APS
3759 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3760 error = register_pm_notifier(&hdev->suspend_notifier);
3761 if (error)
3762 goto err_wqueue;
3763
19202573 3764 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3765
e5e1e7fd
MC
3766 idr_init(&hdev->adv_monitors_idr);
3767
1da177e4 3768 return id;
f48fd9c8 3769
33ca954d
DH
3770err_wqueue:
3771 destroy_workqueue(hdev->workqueue);
6ead1bbc 3772 destroy_workqueue(hdev->req_workqueue);
33ca954d 3773err:
3df92b31 3774 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3775
33ca954d 3776 return error;
1da177e4
LT
3777}
3778EXPORT_SYMBOL(hci_register_dev);
3779
3780/* Unregister HCI device */
59735631 3781void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3782{
2d7cc19e 3783 int id;
ef222013 3784
c13854ce 3785 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3786
a1536da2 3787 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3788
3df92b31
SL
3789 id = hdev->id;
3790
f20d09d5 3791 write_lock(&hci_dev_list_lock);
1da177e4 3792 list_del(&hdev->list);
f20d09d5 3793 write_unlock(&hci_dev_list_lock);
1da177e4 3794
b9b5ef18
GP
3795 cancel_work_sync(&hdev->power_on);
3796
0e995280 3797 hci_suspend_clear_tasks(hdev);
3eec158d 3798 unregister_pm_notifier(&hdev->suspend_notifier);
4e8c36c3
APS
3799 cancel_work_sync(&hdev->suspend_prepare);
3800
3801 hci_dev_do_close(hdev);
9952d90e 3802
ab81cbf9 3803 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3804 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3805 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3806 hci_dev_lock(hdev);
744cf19e 3807 mgmt_index_removed(hdev);
09fd0de5 3808 hci_dev_unlock(hdev);
56e5cb86 3809 }
ab81cbf9 3810
2e58ef3e
JH
3811 /* mgmt_index_removed should take care of emptying the
3812 * pending list */
3813 BUG_ON(!list_empty(&hdev->mgmt_pending));
3814
05fcd4c4 3815 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
1da177e4 3816
611b30f7
MH
3817 if (hdev->rfkill) {
3818 rfkill_unregister(hdev->rfkill);
3819 rfkill_destroy(hdev->rfkill);
3820 }
3821
bdc3e0f1 3822 device_del(&hdev->dev);
147e2d59 3823
0153e2ec 3824 debugfs_remove_recursive(hdev->debugfs);
5177a838
MH
3825 kfree_const(hdev->hw_info);
3826 kfree_const(hdev->fw_info);
0153e2ec 3827
f48fd9c8 3828 destroy_workqueue(hdev->workqueue);
6ead1bbc 3829 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3830
09fd0de5 3831 hci_dev_lock(hdev);
dcc36c16 3832 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3833 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3834 hci_uuids_clear(hdev);
55ed8ca1 3835 hci_link_keys_clear(hdev);
b899efaf 3836 hci_smp_ltks_clear(hdev);
970c4e46 3837 hci_smp_irks_clear(hdev);
2763eda6 3838 hci_remote_oob_data_clear(hdev);
d2609b34 3839 hci_adv_instances_clear(hdev);
e5e1e7fd 3840 hci_adv_monitors_clear(hdev);
dcc36c16 3841 hci_bdaddr_list_clear(&hdev->le_white_list);
cfdb0c2d 3842 hci_bdaddr_list_clear(&hdev->le_resolv_list);
373110c5 3843 hci_conn_params_clear_all(hdev);
22078800 3844 hci_discovery_filter_clear(hdev);
600a8749 3845 hci_blocked_keys_clear(hdev);
09fd0de5 3846 hci_dev_unlock(hdev);
e2e0cacb 3847
dc946bd8 3848 hci_dev_put(hdev);
3df92b31
SL
3849
3850 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3851}
3852EXPORT_SYMBOL(hci_unregister_dev);
3853
3854/* Suspend HCI device */
3855int hci_suspend_dev(struct hci_dev *hdev)
3856{
05fcd4c4 3857 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
1da177e4
LT
3858 return 0;
3859}
3860EXPORT_SYMBOL(hci_suspend_dev);
3861
3862/* Resume HCI device */
3863int hci_resume_dev(struct hci_dev *hdev)
3864{
05fcd4c4 3865 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
1da177e4
LT
3866 return 0;
3867}
3868EXPORT_SYMBOL(hci_resume_dev);
3869
75e0569f
MH
3870/* Reset HCI device */
3871int hci_reset_dev(struct hci_dev *hdev)
3872{
1e4b6e91 3873 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
75e0569f
MH
3874 struct sk_buff *skb;
3875
3876 skb = bt_skb_alloc(3, GFP_ATOMIC);
3877 if (!skb)
3878 return -ENOMEM;
3879
d79f34e3 3880 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
59ae1d12 3881 skb_put_data(skb, hw_err, 3);
75e0569f
MH
3882
3883 /* Send Hardware Error to upper stack */
3884 return hci_recv_frame(hdev, skb);
3885}
3886EXPORT_SYMBOL(hci_reset_dev);
3887
76bca880 3888/* Receive frame from HCI drivers */
e1a26170 3889int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3890{
76bca880 3891 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3892 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3893 kfree_skb(skb);
3894 return -ENXIO;
3895 }
3896
d79f34e3
MH
3897 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3898 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
cc974003
MH
3899 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
3900 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
fe806dce
MH
3901 kfree_skb(skb);
3902 return -EINVAL;
3903 }
3904
d82603c6 3905 /* Incoming skb */
76bca880
MH
3906 bt_cb(skb)->incoming = 1;
3907
3908 /* Time stamp */
3909 __net_timestamp(skb);
3910
76bca880 3911 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3912 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3913
76bca880
MH
3914 return 0;
3915}
3916EXPORT_SYMBOL(hci_recv_frame);
3917
e875ff84
MH
3918/* Receive diagnostic message from HCI drivers */
3919int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3920{
581d6fd6 3921 /* Mark as diagnostic packet */
d79f34e3 3922 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
581d6fd6 3923
e875ff84
MH
3924 /* Time stamp */
3925 __net_timestamp(skb);
3926
581d6fd6
MH
3927 skb_queue_tail(&hdev->rx_q, skb);
3928 queue_work(hdev->workqueue, &hdev->rx_work);
e875ff84 3929
e875ff84
MH
3930 return 0;
3931}
3932EXPORT_SYMBOL(hci_recv_diag);
3933
5177a838
MH
3934void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3935{
3936 va_list vargs;
3937
3938 va_start(vargs, fmt);
3939 kfree_const(hdev->hw_info);
3940 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3941 va_end(vargs);
3942}
3943EXPORT_SYMBOL(hci_set_hw_info);
3944
3945void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3946{
3947 va_list vargs;
3948
3949 va_start(vargs, fmt);
3950 kfree_const(hdev->fw_info);
3951 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3952 va_end(vargs);
3953}
3954EXPORT_SYMBOL(hci_set_fw_info);
3955
1da177e4
LT
3956/* ---- Interface to upper protocols ---- */
3957
1da177e4
LT
3958int hci_register_cb(struct hci_cb *cb)
3959{
3960 BT_DBG("%p name %s", cb, cb->name);
3961
fba7ecf0 3962 mutex_lock(&hci_cb_list_lock);
00629e0f 3963 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3964 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3965
3966 return 0;
3967}
3968EXPORT_SYMBOL(hci_register_cb);
3969
3970int hci_unregister_cb(struct hci_cb *cb)
3971{
3972 BT_DBG("%p name %s", cb, cb->name);
3973
fba7ecf0 3974 mutex_lock(&hci_cb_list_lock);
1da177e4 3975 list_del(&cb->list);
fba7ecf0 3976 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3977
3978 return 0;
3979}
3980EXPORT_SYMBOL(hci_unregister_cb);
3981
51086991 3982static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3983{
cdc52faa
MH
3984 int err;
3985
d79f34e3
MH
3986 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3987 skb->len);
1da177e4 3988
cd82e61c
MH
3989 /* Time stamp */
3990 __net_timestamp(skb);
1da177e4 3991
cd82e61c
MH
3992 /* Send copy to monitor */
3993 hci_send_to_monitor(hdev, skb);
3994
3995 if (atomic_read(&hdev->promisc)) {
3996 /* Send copy to the sockets */
470fe1b5 3997 hci_send_to_sock(hdev, skb);
1da177e4
LT
3998 }
3999
4000 /* Get rid of skb owner, prior to sending to the driver. */
4001 skb_orphan(skb);
4002
73d0d3c8
MH
4003 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4004 kfree_skb(skb);
4005 return;
4006 }
4007
cdc52faa
MH
4008 err = hdev->send(hdev, skb);
4009 if (err < 0) {
2064ee33 4010 bt_dev_err(hdev, "sending frame failed (%d)", err);
cdc52faa
MH
4011 kfree_skb(skb);
4012 }
1da177e4
LT
4013}
4014
1ca3a9d0 4015/* Send HCI command */
07dc93dd
JH
4016int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4017 const void *param)
1ca3a9d0
JH
4018{
4019 struct sk_buff *skb;
4020
4021 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4022
4023 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4024 if (!skb) {
2064ee33 4025 bt_dev_err(hdev, "no memory for command");
1ca3a9d0
JH
4026 return -ENOMEM;
4027 }
4028
49c922bb 4029 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
4030 * single-command requests.
4031 */
44d27137 4032 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
11714b3d 4033
1da177e4 4034 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4035 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4036
4037 return 0;
4038}
1da177e4 4039
d6ee6ad7
LP
4040int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4041 const void *param)
4042{
4043 struct sk_buff *skb;
4044
4045 if (hci_opcode_ogf(opcode) != 0x3f) {
4046 /* A controller receiving a command shall respond with either
4047 * a Command Status Event or a Command Complete Event.
4048 * Therefore, all standard HCI commands must be sent via the
4049 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4050 * Some vendors do not comply with this rule for vendor-specific
4051 * commands and do not return any event. We want to support
4052 * unresponded commands for such cases only.
4053 */
4054 bt_dev_err(hdev, "unresponded command not supported");
4055 return -EINVAL;
4056 }
4057
4058 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4059 if (!skb) {
4060 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4061 opcode);
4062 return -ENOMEM;
4063 }
4064
4065 hci_send_frame(hdev, skb);
4066
4067 return 0;
4068}
4069EXPORT_SYMBOL(__hci_cmd_send);
4070
1da177e4 4071/* Get data from the previously sent command */
a9de9248 4072void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4073{
4074 struct hci_command_hdr *hdr;
4075
4076 if (!hdev->sent_cmd)
4077 return NULL;
4078
4079 hdr = (void *) hdev->sent_cmd->data;
4080
a9de9248 4081 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4082 return NULL;
4083
f0e09510 4084 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4085
4086 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4087}
4088
fbef168f
LP
4089/* Send HCI command and wait for command commplete event */
4090struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4091 const void *param, u32 timeout)
4092{
4093 struct sk_buff *skb;
4094
4095 if (!test_bit(HCI_UP, &hdev->flags))
4096 return ERR_PTR(-ENETDOWN);
4097
4098 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4099
b504430c 4100 hci_req_sync_lock(hdev);
fbef168f 4101 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
b504430c 4102 hci_req_sync_unlock(hdev);
fbef168f
LP
4103
4104 return skb;
4105}
4106EXPORT_SYMBOL(hci_cmd_sync);
4107
1da177e4
LT
4108/* Send ACL data */
4109static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4110{
4111 struct hci_acl_hdr *hdr;
4112 int len = skb->len;
4113
badff6d0
ACM
4114 skb_push(skb, HCI_ACL_HDR_SIZE);
4115 skb_reset_transport_header(skb);
9c70220b 4116 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4117 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4118 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4119}
4120
ee22be7e 4121static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4122 struct sk_buff *skb, __u16 flags)
1da177e4 4123{
ee22be7e 4124 struct hci_conn *conn = chan->conn;
1da177e4
LT
4125 struct hci_dev *hdev = conn->hdev;
4126 struct sk_buff *list;
4127
087bfd99
GP
4128 skb->len = skb_headlen(skb);
4129 skb->data_len = 0;
4130
d79f34e3 4131 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
204a6e54
AE
4132
4133 switch (hdev->dev_type) {
ca8bee5d 4134 case HCI_PRIMARY:
204a6e54
AE
4135 hci_add_acl_hdr(skb, conn->handle, flags);
4136 break;
4137 case HCI_AMP:
4138 hci_add_acl_hdr(skb, chan->handle, flags);
4139 break;
4140 default:
2064ee33 4141 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
204a6e54
AE
4142 return;
4143 }
087bfd99 4144
70f23020
AE
4145 list = skb_shinfo(skb)->frag_list;
4146 if (!list) {
1da177e4
LT
4147 /* Non fragmented */
4148 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4149
73d80deb 4150 skb_queue_tail(queue, skb);
1da177e4
LT
4151 } else {
4152 /* Fragmented */
4153 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4154
4155 skb_shinfo(skb)->frag_list = NULL;
4156
9cfd5a23
JR
4157 /* Queue all fragments atomically. We need to use spin_lock_bh
4158 * here because of 6LoWPAN links, as there this function is
4159 * called from softirq and using normal spin lock could cause
4160 * deadlocks.
4161 */
4162 spin_lock_bh(&queue->lock);
1da177e4 4163
73d80deb 4164 __skb_queue_tail(queue, skb);
e702112f
AE
4165
4166 flags &= ~ACL_START;
4167 flags |= ACL_CONT;
1da177e4
LT
4168 do {
4169 skb = list; list = list->next;
8e87d142 4170
d79f34e3 4171 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
e702112f 4172 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4173
4174 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4175
73d80deb 4176 __skb_queue_tail(queue, skb);
1da177e4
LT
4177 } while (list);
4178
9cfd5a23 4179 spin_unlock_bh(&queue->lock);
1da177e4 4180 }
73d80deb
LAD
4181}
4182
4183void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4184{
ee22be7e 4185 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4186
f0e09510 4187 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4188
ee22be7e 4189 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4190
3eff45ea 4191 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4192}
1da177e4
LT
4193
4194/* Send SCO data */
0d861d8b 4195void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4196{
4197 struct hci_dev *hdev = conn->hdev;
4198 struct hci_sco_hdr hdr;
4199
4200 BT_DBG("%s len %d", hdev->name, skb->len);
4201
aca3192c 4202 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4203 hdr.dlen = skb->len;
4204
badff6d0
ACM
4205 skb_push(skb, HCI_SCO_HDR_SIZE);
4206 skb_reset_transport_header(skb);
9c70220b 4207 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4208
d79f34e3 4209 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
c78ae283 4210
1da177e4 4211 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4212 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4213}
1da177e4
LT
4214
4215/* ---- HCI TX task (outgoing data) ---- */
4216
4217/* HCI Connection scheduler */
6039aa73
GP
4218static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4219 int *quote)
1da177e4
LT
4220{
4221 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4222 struct hci_conn *conn = NULL, *c;
abc5de8f 4223 unsigned int num = 0, min = ~0;
1da177e4 4224
8e87d142 4225 /* We don't have to lock device here. Connections are always
1da177e4 4226 * added and removed with TX task disabled. */
bf4c6325
GP
4227
4228 rcu_read_lock();
4229
4230 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4231 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4232 continue;
769be974
MH
4233
4234 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4235 continue;
4236
1da177e4
LT
4237 num++;
4238
4239 if (c->sent < min) {
4240 min = c->sent;
4241 conn = c;
4242 }
52087a79
LAD
4243
4244 if (hci_conn_num(hdev, type) == num)
4245 break;
1da177e4
LT
4246 }
4247
bf4c6325
GP
4248 rcu_read_unlock();
4249
1da177e4 4250 if (conn) {
6ed58ec5
VT
4251 int cnt, q;
4252
4253 switch (conn->type) {
4254 case ACL_LINK:
4255 cnt = hdev->acl_cnt;
4256 break;
4257 case SCO_LINK:
4258 case ESCO_LINK:
4259 cnt = hdev->sco_cnt;
4260 break;
4261 case LE_LINK:
4262 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4263 break;
4264 default:
4265 cnt = 0;
2064ee33 4266 bt_dev_err(hdev, "unknown link type %d", conn->type);
6ed58ec5
VT
4267 }
4268
4269 q = cnt / num;
1da177e4
LT
4270 *quote = q ? q : 1;
4271 } else
4272 *quote = 0;
4273
4274 BT_DBG("conn %p quote %d", conn, *quote);
4275 return conn;
4276}
4277
6039aa73 4278static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4279{
4280 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4281 struct hci_conn *c;
1da177e4 4282
2064ee33 4283 bt_dev_err(hdev, "link tx timeout");
1da177e4 4284
bf4c6325
GP
4285 rcu_read_lock();
4286
1da177e4 4287 /* Kill stalled connections */
bf4c6325 4288 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4289 if (c->type == type && c->sent) {
2064ee33
MH
4290 bt_dev_err(hdev, "killing stalled connection %pMR",
4291 &c->dst);
bed71748 4292 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4293 }
4294 }
bf4c6325
GP
4295
4296 rcu_read_unlock();
1da177e4
LT
4297}
4298
6039aa73
GP
4299static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4300 int *quote)
1da177e4 4301{
73d80deb
LAD
4302 struct hci_conn_hash *h = &hdev->conn_hash;
4303 struct hci_chan *chan = NULL;
abc5de8f 4304 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4305 struct hci_conn *conn;
73d80deb
LAD
4306 int cnt, q, conn_num = 0;
4307
4308 BT_DBG("%s", hdev->name);
4309
bf4c6325
GP
4310 rcu_read_lock();
4311
4312 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4313 struct hci_chan *tmp;
4314
4315 if (conn->type != type)
4316 continue;
4317
4318 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4319 continue;
4320
4321 conn_num++;
4322
8192edef 4323 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4324 struct sk_buff *skb;
4325
4326 if (skb_queue_empty(&tmp->data_q))
4327 continue;
4328
4329 skb = skb_peek(&tmp->data_q);
4330 if (skb->priority < cur_prio)
4331 continue;
4332
4333 if (skb->priority > cur_prio) {
4334 num = 0;
4335 min = ~0;
4336 cur_prio = skb->priority;
4337 }
4338
4339 num++;
4340
4341 if (conn->sent < min) {
4342 min = conn->sent;
4343 chan = tmp;
4344 }
4345 }
4346
4347 if (hci_conn_num(hdev, type) == conn_num)
4348 break;
4349 }
4350
bf4c6325
GP
4351 rcu_read_unlock();
4352
73d80deb
LAD
4353 if (!chan)
4354 return NULL;
4355
4356 switch (chan->conn->type) {
4357 case ACL_LINK:
4358 cnt = hdev->acl_cnt;
4359 break;
bd1eb66b
AE
4360 case AMP_LINK:
4361 cnt = hdev->block_cnt;
4362 break;
73d80deb
LAD
4363 case SCO_LINK:
4364 case ESCO_LINK:
4365 cnt = hdev->sco_cnt;
4366 break;
4367 case LE_LINK:
4368 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4369 break;
4370 default:
4371 cnt = 0;
2064ee33 4372 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
73d80deb
LAD
4373 }
4374
4375 q = cnt / num;
4376 *quote = q ? q : 1;
4377 BT_DBG("chan %p quote %d", chan, *quote);
4378 return chan;
4379}
4380
02b20f0b
LAD
4381static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4382{
4383 struct hci_conn_hash *h = &hdev->conn_hash;
4384 struct hci_conn *conn;
4385 int num = 0;
4386
4387 BT_DBG("%s", hdev->name);
4388
bf4c6325
GP
4389 rcu_read_lock();
4390
4391 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4392 struct hci_chan *chan;
4393
4394 if (conn->type != type)
4395 continue;
4396
4397 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4398 continue;
4399
4400 num++;
4401
8192edef 4402 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4403 struct sk_buff *skb;
4404
4405 if (chan->sent) {
4406 chan->sent = 0;
4407 continue;
4408 }
4409
4410 if (skb_queue_empty(&chan->data_q))
4411 continue;
4412
4413 skb = skb_peek(&chan->data_q);
4414 if (skb->priority >= HCI_PRIO_MAX - 1)
4415 continue;
4416
4417 skb->priority = HCI_PRIO_MAX - 1;
4418
4419 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4420 skb->priority);
02b20f0b
LAD
4421 }
4422
4423 if (hci_conn_num(hdev, type) == num)
4424 break;
4425 }
bf4c6325
GP
4426
4427 rcu_read_unlock();
4428
02b20f0b
LAD
4429}
4430
b71d385a
AE
4431static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4432{
4433 /* Calculate count of blocks used by this packet */
4434 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4435}
4436
6039aa73 4437static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4438{
d7a5a11d 4439 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
4440 /* ACL tx timeout must be longer than maximum
4441 * link supervision timeout (40.9 seconds) */
63d2bc1b 4442 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4443 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4444 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4445 }
63d2bc1b 4446}
1da177e4 4447
7fedd3bb
APS
4448/* Schedule SCO */
4449static void hci_sched_sco(struct hci_dev *hdev)
4450{
4451 struct hci_conn *conn;
4452 struct sk_buff *skb;
4453 int quote;
4454
4455 BT_DBG("%s", hdev->name);
4456
4457 if (!hci_conn_num(hdev, SCO_LINK))
4458 return;
4459
4460 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4461 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4462 BT_DBG("skb %p len %d", skb, skb->len);
4463 hci_send_frame(hdev, skb);
4464
4465 conn->sent++;
4466 if (conn->sent == ~0)
4467 conn->sent = 0;
4468 }
4469 }
4470}
4471
4472static void hci_sched_esco(struct hci_dev *hdev)
4473{
4474 struct hci_conn *conn;
4475 struct sk_buff *skb;
4476 int quote;
4477
4478 BT_DBG("%s", hdev->name);
4479
4480 if (!hci_conn_num(hdev, ESCO_LINK))
4481 return;
4482
4483 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4484 &quote))) {
4485 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4486 BT_DBG("skb %p len %d", skb, skb->len);
4487 hci_send_frame(hdev, skb);
4488
4489 conn->sent++;
4490 if (conn->sent == ~0)
4491 conn->sent = 0;
4492 }
4493 }
4494}
4495
6039aa73 4496static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4497{
4498 unsigned int cnt = hdev->acl_cnt;
4499 struct hci_chan *chan;
4500 struct sk_buff *skb;
4501 int quote;
4502
4503 __check_timeout(hdev, cnt);
04837f64 4504
73d80deb 4505 while (hdev->acl_cnt &&
a8c5fb1a 4506 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4507 u32 priority = (skb_peek(&chan->data_q))->priority;
4508 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4509 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4510 skb->len, skb->priority);
73d80deb 4511
ec1cce24
LAD
4512 /* Stop if priority has changed */
4513 if (skb->priority < priority)
4514 break;
4515
4516 skb = skb_dequeue(&chan->data_q);
4517
73d80deb 4518 hci_conn_enter_active_mode(chan->conn,
04124681 4519 bt_cb(skb)->force_active);
04837f64 4520
57d17d70 4521 hci_send_frame(hdev, skb);
1da177e4
LT
4522 hdev->acl_last_tx = jiffies;
4523
4524 hdev->acl_cnt--;
73d80deb
LAD
4525 chan->sent++;
4526 chan->conn->sent++;
7fedd3bb
APS
4527
4528 /* Send pending SCO packets right away */
4529 hci_sched_sco(hdev);
4530 hci_sched_esco(hdev);
1da177e4
LT
4531 }
4532 }
02b20f0b
LAD
4533
4534 if (cnt != hdev->acl_cnt)
4535 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4536}
4537
6039aa73 4538static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4539{
63d2bc1b 4540 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4541 struct hci_chan *chan;
4542 struct sk_buff *skb;
4543 int quote;
bd1eb66b 4544 u8 type;
b71d385a 4545
63d2bc1b 4546 __check_timeout(hdev, cnt);
b71d385a 4547
bd1eb66b
AE
4548 BT_DBG("%s", hdev->name);
4549
4550 if (hdev->dev_type == HCI_AMP)
4551 type = AMP_LINK;
4552 else
4553 type = ACL_LINK;
4554
b71d385a 4555 while (hdev->block_cnt > 0 &&
bd1eb66b 4556 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4557 u32 priority = (skb_peek(&chan->data_q))->priority;
4558 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4559 int blocks;
4560
4561 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4562 skb->len, skb->priority);
b71d385a
AE
4563
4564 /* Stop if priority has changed */
4565 if (skb->priority < priority)
4566 break;
4567
4568 skb = skb_dequeue(&chan->data_q);
4569
4570 blocks = __get_blocks(hdev, skb);
4571 if (blocks > hdev->block_cnt)
4572 return;
4573
4574 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4575 bt_cb(skb)->force_active);
b71d385a 4576
57d17d70 4577 hci_send_frame(hdev, skb);
b71d385a
AE
4578 hdev->acl_last_tx = jiffies;
4579
4580 hdev->block_cnt -= blocks;
4581 quote -= blocks;
4582
4583 chan->sent += blocks;
4584 chan->conn->sent += blocks;
4585 }
4586 }
4587
4588 if (cnt != hdev->block_cnt)
bd1eb66b 4589 hci_prio_recalculate(hdev, type);
b71d385a
AE
4590}
4591
6039aa73 4592static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4593{
4594 BT_DBG("%s", hdev->name);
4595
bd1eb66b 4596 /* No ACL link over BR/EDR controller */
ca8bee5d 4597 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
bd1eb66b
AE
4598 return;
4599
4600 /* No AMP link over AMP controller */
4601 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4602 return;
4603
4604 switch (hdev->flow_ctl_mode) {
4605 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4606 hci_sched_acl_pkt(hdev);
4607 break;
4608
4609 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4610 hci_sched_acl_blk(hdev);
4611 break;
4612 }
4613}
4614
6039aa73 4615static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4616{
73d80deb 4617 struct hci_chan *chan;
6ed58ec5 4618 struct sk_buff *skb;
02b20f0b 4619 int quote, cnt, tmp;
6ed58ec5
VT
4620
4621 BT_DBG("%s", hdev->name);
4622
52087a79
LAD
4623 if (!hci_conn_num(hdev, LE_LINK))
4624 return;
4625
6ed58ec5 4626 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1b1d29e5
LAD
4627
4628 __check_timeout(hdev, cnt);
4629
02b20f0b 4630 tmp = cnt;
73d80deb 4631 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4632 u32 priority = (skb_peek(&chan->data_q))->priority;
4633 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4634 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4635 skb->len, skb->priority);
6ed58ec5 4636
ec1cce24
LAD
4637 /* Stop if priority has changed */
4638 if (skb->priority < priority)
4639 break;
4640
4641 skb = skb_dequeue(&chan->data_q);
4642
57d17d70 4643 hci_send_frame(hdev, skb);
6ed58ec5
VT
4644 hdev->le_last_tx = jiffies;
4645
4646 cnt--;
73d80deb
LAD
4647 chan->sent++;
4648 chan->conn->sent++;
7fedd3bb
APS
4649
4650 /* Send pending SCO packets right away */
4651 hci_sched_sco(hdev);
4652 hci_sched_esco(hdev);
6ed58ec5
VT
4653 }
4654 }
73d80deb 4655
6ed58ec5
VT
4656 if (hdev->le_pkts)
4657 hdev->le_cnt = cnt;
4658 else
4659 hdev->acl_cnt = cnt;
02b20f0b
LAD
4660
4661 if (cnt != tmp)
4662 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4663}
4664
3eff45ea 4665static void hci_tx_work(struct work_struct *work)
1da177e4 4666{
3eff45ea 4667 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4668 struct sk_buff *skb;
4669
6ed58ec5 4670 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4671 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4672
d7a5a11d 4673 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e 4674 /* Schedule queues and send stuff to HCI driver */
52de599e
MH
4675 hci_sched_sco(hdev);
4676 hci_sched_esco(hdev);
7fedd3bb 4677 hci_sched_acl(hdev);
52de599e
MH
4678 hci_sched_le(hdev);
4679 }
6ed58ec5 4680
1da177e4
LT
4681 /* Send next queued raw (unknown type) packet */
4682 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4683 hci_send_frame(hdev, skb);
1da177e4
LT
4684}
4685
25985edc 4686/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4687
4688/* ACL data packet */
6039aa73 4689static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4690{
4691 struct hci_acl_hdr *hdr = (void *) skb->data;
4692 struct hci_conn *conn;
4693 __u16 handle, flags;
4694
4695 skb_pull(skb, HCI_ACL_HDR_SIZE);
4696
4697 handle = __le16_to_cpu(hdr->handle);
4698 flags = hci_flags(handle);
4699 handle = hci_handle(handle);
4700
f0e09510 4701 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4702 handle, flags);
1da177e4
LT
4703
4704 hdev->stat.acl_rx++;
4705
4706 hci_dev_lock(hdev);
4707 conn = hci_conn_hash_lookup_handle(hdev, handle);
4708 hci_dev_unlock(hdev);
8e87d142 4709
1da177e4 4710 if (conn) {
65983fc7 4711 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4712
1da177e4 4713 /* Send to upper protocol */
686ebf28
UF
4714 l2cap_recv_acldata(conn, skb, flags);
4715 return;
1da177e4 4716 } else {
2064ee33
MH
4717 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4718 handle);
1da177e4
LT
4719 }
4720
4721 kfree_skb(skb);
4722}
4723
4724/* SCO data packet */
6039aa73 4725static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4726{
4727 struct hci_sco_hdr *hdr = (void *) skb->data;
4728 struct hci_conn *conn;
debdedf2 4729 __u16 handle, flags;
1da177e4
LT
4730
4731 skb_pull(skb, HCI_SCO_HDR_SIZE);
4732
4733 handle = __le16_to_cpu(hdr->handle);
debdedf2
MH
4734 flags = hci_flags(handle);
4735 handle = hci_handle(handle);
1da177e4 4736
debdedf2
MH
4737 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4738 handle, flags);
1da177e4
LT
4739
4740 hdev->stat.sco_rx++;
4741
4742 hci_dev_lock(hdev);
4743 conn = hci_conn_hash_lookup_handle(hdev, handle);
4744 hci_dev_unlock(hdev);
4745
4746 if (conn) {
1da177e4 4747 /* Send to upper protocol */
00398e1d 4748 bt_cb(skb)->sco.pkt_status = flags & 0x03;
686ebf28
UF
4749 sco_recv_scodata(conn, skb);
4750 return;
1da177e4 4751 } else {
2064ee33
MH
4752 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4753 handle);
1da177e4
LT
4754 }
4755
4756 kfree_skb(skb);
4757}
4758
9238f36a
JH
4759static bool hci_req_is_complete(struct hci_dev *hdev)
4760{
4761 struct sk_buff *skb;
4762
4763 skb = skb_peek(&hdev->cmd_q);
4764 if (!skb)
4765 return true;
4766
44d27137 4767 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
9238f36a
JH
4768}
4769
42c6b129
JH
4770static void hci_resend_last(struct hci_dev *hdev)
4771{
4772 struct hci_command_hdr *sent;
4773 struct sk_buff *skb;
4774 u16 opcode;
4775
4776 if (!hdev->sent_cmd)
4777 return;
4778
4779 sent = (void *) hdev->sent_cmd->data;
4780 opcode = __le16_to_cpu(sent->opcode);
4781 if (opcode == HCI_OP_RESET)
4782 return;
4783
4784 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4785 if (!skb)
4786 return;
4787
4788 skb_queue_head(&hdev->cmd_q, skb);
4789 queue_work(hdev->workqueue, &hdev->cmd_work);
4790}
4791
e6214487
JH
4792void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4793 hci_req_complete_t *req_complete,
4794 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4795{
9238f36a
JH
4796 struct sk_buff *skb;
4797 unsigned long flags;
4798
4799 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4800
42c6b129
JH
4801 /* If the completed command doesn't match the last one that was
4802 * sent we need to do special handling of it.
9238f36a 4803 */
42c6b129
JH
4804 if (!hci_sent_cmd_data(hdev, opcode)) {
4805 /* Some CSR based controllers generate a spontaneous
4806 * reset complete event during init and any pending
4807 * command will never be completed. In such a case we
4808 * need to resend whatever was the last sent
4809 * command.
4810 */
4811 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4812 hci_resend_last(hdev);
4813
9238f36a 4814 return;
42c6b129 4815 }
9238f36a 4816
f80c5dad
JPRV
4817 /* If we reach this point this event matches the last command sent */
4818 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4819
9238f36a
JH
4820 /* If the command succeeded and there's still more commands in
4821 * this request the request is not yet complete.
4822 */
4823 if (!status && !hci_req_is_complete(hdev))
4824 return;
4825
4826 /* If this was the last command in a request the complete
4827 * callback would be found in hdev->sent_cmd instead of the
4828 * command queue (hdev->cmd_q).
4829 */
44d27137
JH
4830 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4831 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
e6214487
JH
4832 return;
4833 }
53e21fbc 4834
44d27137
JH
4835 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4836 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
e6214487 4837 return;
9238f36a
JH
4838 }
4839
4840 /* Remove all pending commands belonging to this request */
4841 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4842 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
44d27137 4843 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
9238f36a
JH
4844 __skb_queue_head(&hdev->cmd_q, skb);
4845 break;
4846 }
4847
3bd7594e
DA
4848 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4849 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4850 else
4851 *req_complete = bt_cb(skb)->hci.req_complete;
9238f36a
JH
4852 kfree_skb(skb);
4853 }
4854 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4855}
4856
b78752cc 4857static void hci_rx_work(struct work_struct *work)
1da177e4 4858{
b78752cc 4859 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4860 struct sk_buff *skb;
4861
4862 BT_DBG("%s", hdev->name);
4863
1da177e4 4864 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4865 /* Send copy to monitor */
4866 hci_send_to_monitor(hdev, skb);
4867
1da177e4
LT
4868 if (atomic_read(&hdev->promisc)) {
4869 /* Send copy to the sockets */
470fe1b5 4870 hci_send_to_sock(hdev, skb);
1da177e4
LT
4871 }
4872
eb8c101e
MK
4873 /* If the device has been opened in HCI_USER_CHANNEL,
4874 * the userspace has exclusive access to device.
4875 * When device is HCI_INIT, we still need to process
4876 * the data packets to the driver in order
4877 * to complete its setup().
4878 */
4879 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4880 !test_bit(HCI_INIT, &hdev->flags)) {
1da177e4
LT
4881 kfree_skb(skb);
4882 continue;
4883 }
4884
4885 if (test_bit(HCI_INIT, &hdev->flags)) {
4886 /* Don't process data packets in this states. */
d79f34e3 4887 switch (hci_skb_pkt_type(skb)) {
1da177e4
LT
4888 case HCI_ACLDATA_PKT:
4889 case HCI_SCODATA_PKT:
cc974003 4890 case HCI_ISODATA_PKT:
1da177e4
LT
4891 kfree_skb(skb);
4892 continue;
3ff50b79 4893 }
1da177e4
LT
4894 }
4895
4896 /* Process frame */
d79f34e3 4897 switch (hci_skb_pkt_type(skb)) {
1da177e4 4898 case HCI_EVENT_PKT:
b78752cc 4899 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4900 hci_event_packet(hdev, skb);
4901 break;
4902
4903 case HCI_ACLDATA_PKT:
4904 BT_DBG("%s ACL data packet", hdev->name);
4905 hci_acldata_packet(hdev, skb);
4906 break;
4907
4908 case HCI_SCODATA_PKT:
4909 BT_DBG("%s SCO data packet", hdev->name);
4910 hci_scodata_packet(hdev, skb);
4911 break;
4912
4913 default:
4914 kfree_skb(skb);
4915 break;
4916 }
4917 }
1da177e4
LT
4918}
4919
c347b765 4920static void hci_cmd_work(struct work_struct *work)
1da177e4 4921{
c347b765 4922 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4923 struct sk_buff *skb;
4924
2104786b
AE
4925 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4926 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4927
1da177e4 4928 /* Send queued commands */
5a08ecce
AE
4929 if (atomic_read(&hdev->cmd_cnt)) {
4930 skb = skb_dequeue(&hdev->cmd_q);
4931 if (!skb)
4932 return;
4933
7585b97a 4934 kfree_skb(hdev->sent_cmd);
1da177e4 4935
a675d7f1 4936 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4937 if (hdev->sent_cmd) {
f80c5dad
JPRV
4938 if (hci_req_status_pend(hdev))
4939 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
1da177e4 4940 atomic_dec(&hdev->cmd_cnt);
57d17d70 4941 hci_send_frame(hdev, skb);
7bdb8a5c 4942 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4943 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4944 else
65cc2b49
MH
4945 schedule_delayed_work(&hdev->cmd_timer,
4946 HCI_CMD_TIMEOUT);
1da177e4
LT
4947 } else {
4948 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4949 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4950 }
4951 }
4952}