Bluetooth: Add definitions for advertisement monitor features
[linux-2.6-block.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
7a0e5b15 33#include <linux/property.h>
9952d90e
APS
34#include <linux/suspend.h>
35#include <linux/wait.h>
47219839 36#include <asm/unaligned.h>
1da177e4
LT
37
38#include <net/bluetooth/bluetooth.h>
39#include <net/bluetooth/hci_core.h>
4bc58f51 40#include <net/bluetooth/l2cap.h>
af58925c 41#include <net/bluetooth/mgmt.h>
1da177e4 42
0857dd3b 43#include "hci_request.h"
60c5f5fb 44#include "hci_debugfs.h"
970c4e46 45#include "smp.h"
6d5d2ee6 46#include "leds.h"
145373cb 47#include "msft.h"
970c4e46 48
b78752cc 49static void hci_rx_work(struct work_struct *work);
c347b765 50static void hci_cmd_work(struct work_struct *work);
3eff45ea 51static void hci_tx_work(struct work_struct *work);
1da177e4 52
1da177e4
LT
53/* HCI device list */
54LIST_HEAD(hci_dev_list);
55DEFINE_RWLOCK(hci_dev_list_lock);
56
57/* HCI callback list */
58LIST_HEAD(hci_cb_list);
fba7ecf0 59DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 60
3df92b31
SL
61/* HCI ID Numbering */
62static DEFINE_IDA(hci_index_ida);
63
baf27f6e
MH
64/* ---- HCI debugfs entries ---- */
65
4b4148e9
MH
66static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
74b93e9f 72 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
4b4148e9
MH
73 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
4b4148e9 83 bool enable;
3bf5e97d 84 int err;
4b4148e9
MH
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
3bf5e97d
AS
89 err = kstrtobool_from_user(user_buf, count, &enable);
90 if (err)
91 return err;
4b4148e9 92
b7cb93e5 93 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
94 return -EALREADY;
95
b504430c 96 hci_req_sync_lock(hdev);
4b4148e9
MH
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
b504430c 103 hci_req_sync_unlock(hdev);
4b4148e9
MH
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
4b4148e9
MH
108 kfree_skb(skb);
109
b7cb93e5 110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
111
112 return count;
113}
114
115static const struct file_operations dut_mode_fops = {
116 .open = simple_open,
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
120};
121
4b4113d6
MH
122static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
124{
125 struct hci_dev *hdev = file->private_data;
126 char buf[3];
127
74b93e9f 128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
4b4113d6
MH
129 buf[1] = '\n';
130 buf[2] = '\0';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132}
133
134static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
136{
137 struct hci_dev *hdev = file->private_data;
4b4113d6
MH
138 bool enable;
139 int err;
140
3bf5e97d
AS
141 err = kstrtobool_from_user(user_buf, count, &enable);
142 if (err)
143 return err;
4b4113d6 144
7e995b9e 145 /* When the diagnostic flags are not persistent and the transport
b56c7b25
MH
146 * is not active or in user channel operation, then there is no need
147 * for the vendor callback. Instead just store the desired value and
148 * the setting will be programmed when the controller gets powered on.
7e995b9e
MH
149 */
150 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
b56c7b25
MH
151 (!test_bit(HCI_RUNNING, &hdev->flags) ||
152 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
7e995b9e
MH
153 goto done;
154
b504430c 155 hci_req_sync_lock(hdev);
4b4113d6 156 err = hdev->set_diag(hdev, enable);
b504430c 157 hci_req_sync_unlock(hdev);
4b4113d6
MH
158
159 if (err < 0)
160 return err;
161
7e995b9e 162done:
4b4113d6
MH
163 if (enable)
164 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
165 else
166 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
167
168 return count;
169}
170
171static const struct file_operations vendor_diag_fops = {
172 .open = simple_open,
173 .read = vendor_diag_read,
174 .write = vendor_diag_write,
175 .llseek = default_llseek,
176};
177
f640ee98
MH
178static void hci_debugfs_create_basic(struct hci_dev *hdev)
179{
180 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
181 &dut_mode_fops);
182
183 if (hdev->set_diag)
184 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
185 &vendor_diag_fops);
186}
187
a1d01db1 188static int hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 189{
42c6b129 190 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
191
192 /* Reset device */
42c6b129
JH
193 set_bit(HCI_RESET, &req->hdev->flags);
194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
a1d01db1 195 return 0;
1da177e4
LT
196}
197
42c6b129 198static void bredr_init(struct hci_request *req)
1da177e4 199{
42c6b129 200 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 201
1da177e4 202 /* Read Local Supported Features */
42c6b129 203 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 204
1143e5a6 205 /* Read Local Version */
42c6b129 206 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
207
208 /* Read BD Address */
42c6b129 209 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
210}
211
0af801b9 212static void amp_init1(struct hci_request *req)
e61ef499 213{
42c6b129 214 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 215
e61ef499 216 /* Read Local Version */
42c6b129 217 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 218
f6996cfe
MH
219 /* Read Local Supported Commands */
220 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
221
6bcbc489 222 /* Read Local AMP Info */
42c6b129 223 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
224
225 /* Read Data Blk size */
42c6b129 226 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 227
f38ba941
MH
228 /* Read Flow Control Mode */
229 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
230
7528ca1c
MH
231 /* Read Location Data */
232 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
233}
234
a1d01db1 235static int amp_init2(struct hci_request *req)
0af801b9
JH
236{
237 /* Read Local Supported Features. Not all AMP controllers
238 * support this so it's placed conditionally in the second
239 * stage init.
240 */
241 if (req->hdev->commands[14] & 0x20)
242 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
a1d01db1
JH
243
244 return 0;
0af801b9
JH
245}
246
a1d01db1 247static int hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 248{
42c6b129 249 struct hci_dev *hdev = req->hdev;
e61ef499
AE
250
251 BT_DBG("%s %ld", hdev->name, opt);
252
11778716
AE
253 /* Reset */
254 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 255 hci_reset_req(req, 0);
11778716 256
e61ef499 257 switch (hdev->dev_type) {
ca8bee5d 258 case HCI_PRIMARY:
42c6b129 259 bredr_init(req);
e61ef499 260 break;
e61ef499 261 case HCI_AMP:
0af801b9 262 amp_init1(req);
e61ef499 263 break;
e61ef499 264 default:
2064ee33 265 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
e61ef499
AE
266 break;
267 }
a1d01db1
JH
268
269 return 0;
e61ef499
AE
270}
271
42c6b129 272static void bredr_setup(struct hci_request *req)
2177bab5 273{
2177bab5
JH
274 __le16 param;
275 __u8 flt_type;
276
277 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 278 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
279
280 /* Read Class of Device */
42c6b129 281 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
282
283 /* Read Local Name */
42c6b129 284 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
285
286 /* Read Voice Setting */
42c6b129 287 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 288
b4cb9fb2
MH
289 /* Read Number of Supported IAC */
290 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
291
4b836f39
MH
292 /* Read Current IAC LAP */
293 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
294
2177bab5
JH
295 /* Clear Event Filters */
296 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 297 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
298
299 /* Connection accept timeout ~20 secs */
dcf4adbf 300 param = cpu_to_le16(0x7d00);
42c6b129 301 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
302}
303
42c6b129 304static void le_setup(struct hci_request *req)
2177bab5 305{
c73eee91
JH
306 struct hci_dev *hdev = req->hdev;
307
2177bab5 308 /* Read LE Buffer Size */
42c6b129 309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
310
311 /* Read LE Local Supported Features */
42c6b129 312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 313
747d3f03
MH
314 /* Read LE Supported States */
315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
316
c73eee91
JH
317 /* LE-only controllers have LE implicitly enabled */
318 if (!lmp_bredr_capable(hdev))
a1536da2 319 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
320}
321
42c6b129 322static void hci_setup_event_mask(struct hci_request *req)
2177bab5 323{
42c6b129
JH
324 struct hci_dev *hdev = req->hdev;
325
2177bab5
JH
326 /* The second byte is 0xff instead of 0x9f (two reserved bits
327 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
328 * command otherwise.
329 */
330 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
331
332 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
333 * any event mask for pre 1.2 devices.
334 */
335 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
336 return;
337
338 if (lmp_bredr_capable(hdev)) {
339 events[4] |= 0x01; /* Flow Specification Complete */
c7882cbd
MH
340 } else {
341 /* Use a different default for LE-only devices */
342 memset(events, 0, sizeof(events));
c7882cbd
MH
343 events[1] |= 0x20; /* Command Complete */
344 events[1] |= 0x40; /* Command Status */
345 events[1] |= 0x80; /* Hardware Error */
5c3d3b4c
MH
346
347 /* If the controller supports the Disconnect command, enable
348 * the corresponding event. In addition enable packet flow
349 * control related events.
350 */
351 if (hdev->commands[0] & 0x20) {
352 events[0] |= 0x10; /* Disconnection Complete */
353 events[2] |= 0x04; /* Number of Completed Packets */
354 events[3] |= 0x02; /* Data Buffer Overflow */
355 }
356
357 /* If the controller supports the Read Remote Version
358 * Information command, enable the corresponding event.
359 */
360 if (hdev->commands[2] & 0x80)
361 events[1] |= 0x08; /* Read Remote Version Information
362 * Complete
363 */
0da71f1b
MH
364
365 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366 events[0] |= 0x80; /* Encryption Change */
367 events[5] |= 0x80; /* Encryption Key Refresh Complete */
368 }
2177bab5
JH
369 }
370
9fe759ce
MH
371 if (lmp_inq_rssi_capable(hdev) ||
372 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
2177bab5
JH
373 events[4] |= 0x02; /* Inquiry Result with RSSI */
374
70f56aa2
MH
375 if (lmp_ext_feat_capable(hdev))
376 events[4] |= 0x04; /* Read Remote Extended Features Complete */
377
378 if (lmp_esco_capable(hdev)) {
379 events[5] |= 0x08; /* Synchronous Connection Complete */
380 events[5] |= 0x10; /* Synchronous Connection Changed */
381 }
382
2177bab5
JH
383 if (lmp_sniffsubr_capable(hdev))
384 events[5] |= 0x20; /* Sniff Subrating */
385
386 if (lmp_pause_enc_capable(hdev))
387 events[5] |= 0x80; /* Encryption Key Refresh Complete */
388
389 if (lmp_ext_inq_capable(hdev))
390 events[5] |= 0x40; /* Extended Inquiry Result */
391
392 if (lmp_no_flush_capable(hdev))
393 events[7] |= 0x01; /* Enhanced Flush Complete */
394
395 if (lmp_lsto_capable(hdev))
396 events[6] |= 0x80; /* Link Supervision Timeout Changed */
397
398 if (lmp_ssp_capable(hdev)) {
399 events[6] |= 0x01; /* IO Capability Request */
400 events[6] |= 0x02; /* IO Capability Response */
401 events[6] |= 0x04; /* User Confirmation Request */
402 events[6] |= 0x08; /* User Passkey Request */
403 events[6] |= 0x10; /* Remote OOB Data Request */
404 events[6] |= 0x20; /* Simple Pairing Complete */
405 events[7] |= 0x04; /* User Passkey Notification */
406 events[7] |= 0x08; /* Keypress Notification */
407 events[7] |= 0x10; /* Remote Host Supported
408 * Features Notification
409 */
410 }
411
412 if (lmp_le_capable(hdev))
413 events[7] |= 0x20; /* LE Meta-Event */
414
42c6b129 415 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
416}
417
a1d01db1 418static int hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 419{
42c6b129
JH
420 struct hci_dev *hdev = req->hdev;
421
0af801b9
JH
422 if (hdev->dev_type == HCI_AMP)
423 return amp_init2(req);
424
2177bab5 425 if (lmp_bredr_capable(hdev))
42c6b129 426 bredr_setup(req);
56f87901 427 else
a358dc11 428 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
429
430 if (lmp_le_capable(hdev))
42c6b129 431 le_setup(req);
2177bab5 432
0f3adeae
MH
433 /* All Bluetooth 1.2 and later controllers should support the
434 * HCI command for reading the local supported commands.
435 *
436 * Unfortunately some controllers indicate Bluetooth 1.2 support,
437 * but do not have support for this command. If that is the case,
438 * the driver can quirk the behavior and skip reading the local
439 * supported commands.
3f8e2d75 440 */
0f3adeae
MH
441 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 443 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
444
445 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
446 /* When SSP is available, then the host features page
447 * should also be available as well. However some
448 * controllers list the max_page as 0 as long as SSP
449 * has not been enabled. To achieve proper debugging
450 * output, force the minimum max_page to 1 at least.
451 */
452 hdev->max_page = 0x01;
453
d7a5a11d 454 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 455 u8 mode = 0x01;
574ea3c7 456
42c6b129
JH
457 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458 sizeof(mode), &mode);
2177bab5
JH
459 } else {
460 struct hci_cp_write_eir cp;
461
462 memset(hdev->eir, 0, sizeof(hdev->eir));
463 memset(&cp, 0, sizeof(cp));
464
42c6b129 465 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
466 }
467 }
468
043ec9bf
MH
469 if (lmp_inq_rssi_capable(hdev) ||
470 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
471 u8 mode;
472
473 /* If Extended Inquiry Result events are supported, then
474 * they are clearly preferred over Inquiry Result with RSSI
475 * events.
476 */
477 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
478
479 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
480 }
2177bab5
JH
481
482 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 483 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
484
485 if (lmp_ext_feat_capable(hdev)) {
486 struct hci_cp_read_local_ext_features cp;
487
488 cp.page = 0x01;
42c6b129
JH
489 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
490 sizeof(cp), &cp);
2177bab5
JH
491 }
492
d7a5a11d 493 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 494 u8 enable = 1;
42c6b129
JH
495 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
496 &enable);
2177bab5 497 }
a1d01db1
JH
498
499 return 0;
2177bab5
JH
500}
501
42c6b129 502static void hci_setup_link_policy(struct hci_request *req)
2177bab5 503{
42c6b129 504 struct hci_dev *hdev = req->hdev;
2177bab5
JH
505 struct hci_cp_write_def_link_policy cp;
506 u16 link_policy = 0;
507
508 if (lmp_rswitch_capable(hdev))
509 link_policy |= HCI_LP_RSWITCH;
510 if (lmp_hold_capable(hdev))
511 link_policy |= HCI_LP_HOLD;
512 if (lmp_sniff_capable(hdev))
513 link_policy |= HCI_LP_SNIFF;
514 if (lmp_park_capable(hdev))
515 link_policy |= HCI_LP_PARK;
516
517 cp.policy = cpu_to_le16(link_policy);
42c6b129 518 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
519}
520
42c6b129 521static void hci_set_le_support(struct hci_request *req)
2177bab5 522{
42c6b129 523 struct hci_dev *hdev = req->hdev;
2177bab5
JH
524 struct hci_cp_write_le_host_supported cp;
525
c73eee91
JH
526 /* LE-only devices do not support explicit enablement */
527 if (!lmp_bredr_capable(hdev))
528 return;
529
2177bab5
JH
530 memset(&cp, 0, sizeof(cp));
531
d7a5a11d 532 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 533 cp.le = 0x01;
32226e4f 534 cp.simul = 0x00;
2177bab5
JH
535 }
536
537 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
538 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
539 &cp);
2177bab5
JH
540}
541
d62e6d67
JH
542static void hci_set_event_mask_page_2(struct hci_request *req)
543{
544 struct hci_dev *hdev = req->hdev;
545 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
313f6888 546 bool changed = false;
d62e6d67
JH
547
548 /* If Connectionless Slave Broadcast master role is supported
549 * enable all necessary events for it.
550 */
53b834d2 551 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
552 events[1] |= 0x40; /* Triggered Clock Capture */
553 events[1] |= 0x80; /* Synchronization Train Complete */
554 events[2] |= 0x10; /* Slave Page Response Timeout */
555 events[2] |= 0x20; /* CSB Channel Map Change */
313f6888 556 changed = true;
d62e6d67
JH
557 }
558
559 /* If Connectionless Slave Broadcast slave role is supported
560 * enable all necessary events for it.
561 */
53b834d2 562 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
563 events[2] |= 0x01; /* Synchronization Train Received */
564 events[2] |= 0x02; /* CSB Receive */
565 events[2] |= 0x04; /* CSB Timeout */
566 events[2] |= 0x08; /* Truncated Page Complete */
313f6888 567 changed = true;
d62e6d67
JH
568 }
569
40c59fcb 570 /* Enable Authenticated Payload Timeout Expired event if supported */
313f6888 571 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
40c59fcb 572 events[2] |= 0x80;
313f6888
MH
573 changed = true;
574 }
40c59fcb 575
313f6888
MH
576 /* Some Broadcom based controllers indicate support for Set Event
577 * Mask Page 2 command, but then actually do not support it. Since
578 * the default value is all bits set to zero, the command is only
579 * required if the event mask has to be changed. In case no change
580 * to the event mask is needed, skip this command.
581 */
582 if (changed)
583 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584 sizeof(events), events);
d62e6d67
JH
585}
586
a1d01db1 587static int hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 588{
42c6b129 589 struct hci_dev *hdev = req->hdev;
d2c5d77f 590 u8 p;
42c6b129 591
0da71f1b
MH
592 hci_setup_event_mask(req);
593
e81be90b
JH
594 if (hdev->commands[6] & 0x20 &&
595 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
596 struct hci_cp_read_stored_link_key cp;
597
598 bacpy(&cp.bdaddr, BDADDR_ANY);
599 cp.read_all = 0x01;
600 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
601 }
602
2177bab5 603 if (hdev->commands[5] & 0x10)
42c6b129 604 hci_setup_link_policy(req);
2177bab5 605
417287de
MH
606 if (hdev->commands[8] & 0x01)
607 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
608
8a595619 609 if (hdev->commands[18] & 0x04)
00bce3fb
AM
610 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
611
417287de
MH
612 /* Some older Broadcom based Bluetooth 1.2 controllers do not
613 * support the Read Page Scan Type command. Check support for
614 * this command in the bit mask of supported commands.
615 */
616 if (hdev->commands[13] & 0x01)
617 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
618
9193c6e8
AG
619 if (lmp_le_capable(hdev)) {
620 u8 events[8];
621
622 memset(events, 0, sizeof(events));
4d6c705b
MH
623
624 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
625 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
626
627 /* If controller supports the Connection Parameters Request
628 * Link Layer Procedure, enable the corresponding event.
629 */
630 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
631 events[0] |= 0x20; /* LE Remote Connection
632 * Parameter Request
633 */
634
a9f6068e
MH
635 /* If the controller supports the Data Length Extension
636 * feature, enable the corresponding event.
637 */
638 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
639 events[0] |= 0x40; /* LE Data Length Change */
640
ff3b8df2
MH
641 /* If the controller supports LL Privacy feature, enable
642 * the corresponding event.
643 */
644 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
645 events[1] |= 0x02; /* LE Enhanced Connection
646 * Complete
647 */
648
4b71bba4
MH
649 /* If the controller supports Extended Scanner Filter
650 * Policies, enable the correspondig event.
651 */
652 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
653 events[1] |= 0x04; /* LE Direct Advertising
654 * Report
655 */
656
9756d33b
MH
657 /* If the controller supports Channel Selection Algorithm #2
658 * feature, enable the corresponding event.
659 */
660 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
661 events[2] |= 0x08; /* LE Channel Selection
662 * Algorithm
663 */
664
7d26f5c4
MH
665 /* If the controller supports the LE Set Scan Enable command,
666 * enable the corresponding advertising report event.
667 */
668 if (hdev->commands[26] & 0x08)
669 events[0] |= 0x02; /* LE Advertising Report */
670
671 /* If the controller supports the LE Create Connection
672 * command, enable the corresponding event.
673 */
674 if (hdev->commands[26] & 0x10)
675 events[0] |= 0x01; /* LE Connection Complete */
676
677 /* If the controller supports the LE Connection Update
678 * command, enable the corresponding event.
679 */
680 if (hdev->commands[27] & 0x04)
681 events[0] |= 0x04; /* LE Connection Update
682 * Complete
683 */
684
685 /* If the controller supports the LE Read Remote Used Features
686 * command, enable the corresponding event.
687 */
688 if (hdev->commands[27] & 0x20)
689 events[0] |= 0x08; /* LE Read Remote Used
690 * Features Complete
691 */
692
5a34bd5f
MH
693 /* If the controller supports the LE Read Local P-256
694 * Public Key command, enable the corresponding event.
695 */
696 if (hdev->commands[34] & 0x02)
697 events[0] |= 0x80; /* LE Read Local P-256
698 * Public Key Complete
699 */
700
701 /* If the controller supports the LE Generate DHKey
702 * command, enable the corresponding event.
703 */
704 if (hdev->commands[34] & 0x04)
705 events[1] |= 0x01; /* LE Generate DHKey Complete */
706
27bbca44
MH
707 /* If the controller supports the LE Set Default PHY or
708 * LE Set PHY commands, enable the corresponding event.
709 */
710 if (hdev->commands[35] & (0x20 | 0x40))
711 events[1] |= 0x08; /* LE PHY Update Complete */
712
c215e939
JK
713 /* If the controller supports LE Set Extended Scan Parameters
714 * and LE Set Extended Scan Enable commands, enable the
715 * corresponding event.
716 */
717 if (use_ext_scan(hdev))
718 events[1] |= 0x10; /* LE Extended Advertising
719 * Report
720 */
721
acf0aeae
JK
722 /* If the controller supports the LE Extended Advertising
723 * command, enable the corresponding event.
724 */
725 if (ext_adv_capable(hdev))
726 events[2] |= 0x02; /* LE Advertising Set
727 * Terminated
728 */
729
9193c6e8
AG
730 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
731 events);
732
6b49bcb4
JK
733 /* Read LE Advertising Channel TX Power */
734 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
735 /* HCI TS spec forbids mixing of legacy and extended
736 * advertising commands wherein READ_ADV_TX_POWER is
737 * also included. So do not call it if extended adv
738 * is supported otherwise controller will return
739 * COMMAND_DISALLOWED for extended commands.
740 */
15a49cca
MH
741 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
742 }
743
2ab216a7
MH
744 if (hdev->commands[26] & 0x40) {
745 /* Read LE White List Size */
746 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
747 0, NULL);
748 }
749
750 if (hdev->commands[26] & 0x80) {
751 /* Clear LE White List */
752 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
753 }
754
cfdb0c2d
AN
755 if (hdev->commands[34] & 0x40) {
756 /* Read LE Resolving List Size */
757 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
758 0, NULL);
759 }
760
545f2596
AN
761 if (hdev->commands[34] & 0x20) {
762 /* Clear LE Resolving List */
763 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
764 }
765
a9f6068e
MH
766 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
767 /* Read LE Maximum Data Length */
768 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
769
770 /* Read LE Suggested Default Data Length */
771 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
772 }
773
6b49bcb4
JK
774 if (ext_adv_capable(hdev)) {
775 /* Read LE Number of Supported Advertising Sets */
776 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
777 0, NULL);
778 }
779
42c6b129 780 hci_set_le_support(req);
9193c6e8 781 }
d2c5d77f
JH
782
783 /* Read features beyond page 1 if available */
784 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
785 struct hci_cp_read_local_ext_features cp;
786
787 cp.page = p;
788 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
789 sizeof(cp), &cp);
790 }
a1d01db1
JH
791
792 return 0;
2177bab5
JH
793}
794
a1d01db1 795static int hci_init4_req(struct hci_request *req, unsigned long opt)
5d4e7e8d
JH
796{
797 struct hci_dev *hdev = req->hdev;
798
36f260ce
MH
799 /* Some Broadcom based Bluetooth controllers do not support the
800 * Delete Stored Link Key command. They are clearly indicating its
801 * absence in the bit mask of supported commands.
802 *
803 * Check the supported commands and only if the the command is marked
804 * as supported send it. If not supported assume that the controller
805 * does not have actual support for stored link keys which makes this
806 * command redundant anyway.
807 *
808 * Some controllers indicate that they support handling deleting
809 * stored link keys, but they don't. The quirk lets a driver
810 * just disable this command.
811 */
812 if (hdev->commands[6] & 0x80 &&
813 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
814 struct hci_cp_delete_stored_link_key cp;
815
816 bacpy(&cp.bdaddr, BDADDR_ANY);
817 cp.delete_all = 0x01;
818 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
819 sizeof(cp), &cp);
820 }
821
d62e6d67
JH
822 /* Set event mask page 2 if the HCI command for it is supported */
823 if (hdev->commands[22] & 0x04)
824 hci_set_event_mask_page_2(req);
825
109e3191
MH
826 /* Read local codec list if the HCI command is supported */
827 if (hdev->commands[29] & 0x20)
828 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
829
a4790360
MH
830 /* Read local pairing options if the HCI command is supported */
831 if (hdev->commands[41] & 0x08)
832 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
833
f4fe73ed
MH
834 /* Get MWS transport configuration if the HCI command is supported */
835 if (hdev->commands[30] & 0x08)
836 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
837
5d4e7e8d 838 /* Check for Synchronization Train support */
53b834d2 839 if (lmp_sync_train_capable(hdev))
5d4e7e8d 840 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
841
842 /* Enable Secure Connections if supported and configured */
d7a5a11d 843 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 844 bredr_sc_enabled(hdev)) {
a6d0d690 845 u8 support = 0x01;
574ea3c7 846
a6d0d690
MH
847 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
848 sizeof(support), &support);
849 }
a1d01db1 850
00bce3fb
AM
851 /* Set erroneous data reporting if supported to the wideband speech
852 * setting value
853 */
8a595619 854 if (hdev->commands[18] & 0x08) {
00bce3fb
AM
855 bool enabled = hci_dev_test_flag(hdev,
856 HCI_WIDEBAND_SPEECH_ENABLED);
857
858 if (enabled !=
859 (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
860 struct hci_cp_write_def_err_data_reporting cp;
861
862 cp.err_data_reporting = enabled ?
863 ERR_DATA_REPORTING_ENABLED :
864 ERR_DATA_REPORTING_DISABLED;
865
866 hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
867 sizeof(cp), &cp);
868 }
869 }
870
12204875
MH
871 /* Set Suggested Default Data Length to maximum if supported */
872 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
873 struct hci_cp_le_write_def_data_len cp;
874
727ea61a
BDC
875 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
876 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
12204875
MH
877 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
878 }
879
de2ba303
MH
880 /* Set Default PHY parameters if command is supported */
881 if (hdev->commands[35] & 0x20) {
882 struct hci_cp_le_set_default_phy cp;
883
6decb5b4
JK
884 cp.all_phys = 0x00;
885 cp.tx_phys = hdev->le_tx_def_phys;
886 cp.rx_phys = hdev->le_rx_def_phys;
de2ba303
MH
887
888 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
889 }
890
a1d01db1 891 return 0;
5d4e7e8d
JH
892}
893
2177bab5
JH
894static int __hci_init(struct hci_dev *hdev)
895{
896 int err;
897
4ebeee2d 898 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
2177bab5
JH
899 if (err < 0)
900 return err;
901
f640ee98
MH
902 if (hci_dev_test_flag(hdev, HCI_SETUP))
903 hci_debugfs_create_basic(hdev);
4b4148e9 904
4ebeee2d 905 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
0af801b9
JH
906 if (err < 0)
907 return err;
908
ca8bee5d 909 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
2177bab5 910 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 911 * first two stages of init.
2177bab5 912 */
ca8bee5d 913 if (hdev->dev_type != HCI_PRIMARY)
2177bab5
JH
914 return 0;
915
4ebeee2d 916 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
5d4e7e8d
JH
917 if (err < 0)
918 return err;
919
4ebeee2d 920 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
baf27f6e
MH
921 if (err < 0)
922 return err;
923
ec6cef9c
MH
924 /* This function is only called when the controller is actually in
925 * configured state. When the controller is marked as unconfigured,
926 * this initialization procedure is not run.
927 *
928 * It means that it is possible that a controller runs through its
929 * setup phase and then discovers missing settings. If that is the
930 * case, then this function will not be called. It then will only
931 * be called during the config phase.
932 *
933 * So only when in setup phase or config phase, create the debugfs
934 * entries and register the SMP channels.
baf27f6e 935 */
d7a5a11d
MH
936 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
937 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
938 return 0;
939
60c5f5fb
MH
940 hci_debugfs_create_common(hdev);
941
71c3b60e 942 if (lmp_bredr_capable(hdev))
60c5f5fb 943 hci_debugfs_create_bredr(hdev);
2bfa3531 944
162a3bac 945 if (lmp_le_capable(hdev))
60c5f5fb 946 hci_debugfs_create_le(hdev);
e7b8fc92 947
baf27f6e 948 return 0;
2177bab5
JH
949}
950
a1d01db1 951static int hci_init0_req(struct hci_request *req, unsigned long opt)
0ebca7d6
MH
952{
953 struct hci_dev *hdev = req->hdev;
954
955 BT_DBG("%s %ld", hdev->name, opt);
956
957 /* Reset */
958 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
959 hci_reset_req(req, 0);
960
961 /* Read Local Version */
962 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
963
964 /* Read BD Address */
965 if (hdev->set_bdaddr)
966 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
a1d01db1
JH
967
968 return 0;
0ebca7d6
MH
969}
970
971static int __hci_unconf_init(struct hci_dev *hdev)
972{
973 int err;
974
cc78b44b
MH
975 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
976 return 0;
977
4ebeee2d 978 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
0ebca7d6
MH
979 if (err < 0)
980 return err;
981
f640ee98
MH
982 if (hci_dev_test_flag(hdev, HCI_SETUP))
983 hci_debugfs_create_basic(hdev);
984
0ebca7d6
MH
985 return 0;
986}
987
a1d01db1 988static int hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
989{
990 __u8 scan = opt;
991
42c6b129 992 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
993
994 /* Inquiry and Page scans */
42c6b129 995 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
a1d01db1 996 return 0;
1da177e4
LT
997}
998
a1d01db1 999static int hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1000{
1001 __u8 auth = opt;
1002
42c6b129 1003 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1004
1005 /* Authentication */
42c6b129 1006 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
a1d01db1 1007 return 0;
1da177e4
LT
1008}
1009
a1d01db1 1010static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1011{
1012 __u8 encrypt = opt;
1013
42c6b129 1014 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1015
e4e8e37c 1016 /* Encryption */
42c6b129 1017 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
a1d01db1 1018 return 0;
1da177e4
LT
1019}
1020
a1d01db1 1021static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1022{
1023 __le16 policy = cpu_to_le16(opt);
1024
42c6b129 1025 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1026
1027 /* Default link policy */
42c6b129 1028 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
a1d01db1 1029 return 0;
e4e8e37c
MH
1030}
1031
8e87d142 1032/* Get HCI device by index.
1da177e4
LT
1033 * Device is held on return. */
1034struct hci_dev *hci_dev_get(int index)
1035{
8035ded4 1036 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1037
1038 BT_DBG("%d", index);
1039
1040 if (index < 0)
1041 return NULL;
1042
1043 read_lock(&hci_dev_list_lock);
8035ded4 1044 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1045 if (d->id == index) {
1046 hdev = hci_dev_hold(d);
1047 break;
1048 }
1049 }
1050 read_unlock(&hci_dev_list_lock);
1051 return hdev;
1052}
1da177e4
LT
1053
1054/* ---- Inquiry support ---- */
ff9ef578 1055
30dc78e1
JH
1056bool hci_discovery_active(struct hci_dev *hdev)
1057{
1058 struct discovery_state *discov = &hdev->discovery;
1059
6fbe195d 1060 switch (discov->state) {
343f935b 1061 case DISCOVERY_FINDING:
6fbe195d 1062 case DISCOVERY_RESOLVING:
30dc78e1
JH
1063 return true;
1064
6fbe195d
AG
1065 default:
1066 return false;
1067 }
30dc78e1
JH
1068}
1069
ff9ef578
JH
1070void hci_discovery_set_state(struct hci_dev *hdev, int state)
1071{
bb3e0a33
JH
1072 int old_state = hdev->discovery.state;
1073
ff9ef578
JH
1074 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1075
bb3e0a33 1076 if (old_state == state)
ff9ef578
JH
1077 return;
1078
bb3e0a33
JH
1079 hdev->discovery.state = state;
1080
ff9ef578
JH
1081 switch (state) {
1082 case DISCOVERY_STOPPED:
c54c3860
AG
1083 hci_update_background_scan(hdev);
1084
bb3e0a33 1085 if (old_state != DISCOVERY_STARTING)
7b99b659 1086 mgmt_discovering(hdev, 0);
ff9ef578
JH
1087 break;
1088 case DISCOVERY_STARTING:
1089 break;
343f935b 1090 case DISCOVERY_FINDING:
ff9ef578
JH
1091 mgmt_discovering(hdev, 1);
1092 break;
30dc78e1
JH
1093 case DISCOVERY_RESOLVING:
1094 break;
ff9ef578
JH
1095 case DISCOVERY_STOPPING:
1096 break;
1097 }
ff9ef578
JH
1098}
1099
1f9b9a5d 1100void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1101{
30883512 1102 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1103 struct inquiry_entry *p, *n;
1da177e4 1104
561aafbc
JH
1105 list_for_each_entry_safe(p, n, &cache->all, all) {
1106 list_del(&p->all);
b57c1a56 1107 kfree(p);
1da177e4 1108 }
561aafbc
JH
1109
1110 INIT_LIST_HEAD(&cache->unknown);
1111 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1112}
1113
a8c5fb1a
GP
1114struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1115 bdaddr_t *bdaddr)
1da177e4 1116{
30883512 1117 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1118 struct inquiry_entry *e;
1119
6ed93dc6 1120 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1121
561aafbc
JH
1122 list_for_each_entry(e, &cache->all, all) {
1123 if (!bacmp(&e->data.bdaddr, bdaddr))
1124 return e;
1125 }
1126
1127 return NULL;
1128}
1129
1130struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1131 bdaddr_t *bdaddr)
561aafbc 1132{
30883512 1133 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1134 struct inquiry_entry *e;
1135
6ed93dc6 1136 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1137
1138 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1139 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1140 return e;
1141 }
1142
1143 return NULL;
1da177e4
LT
1144}
1145
30dc78e1 1146struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1147 bdaddr_t *bdaddr,
1148 int state)
30dc78e1
JH
1149{
1150 struct discovery_state *cache = &hdev->discovery;
1151 struct inquiry_entry *e;
1152
6ed93dc6 1153 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1154
1155 list_for_each_entry(e, &cache->resolve, list) {
1156 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1157 return e;
1158 if (!bacmp(&e->data.bdaddr, bdaddr))
1159 return e;
1160 }
1161
1162 return NULL;
1163}
1164
a3d4e20a 1165void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1166 struct inquiry_entry *ie)
a3d4e20a
JH
1167{
1168 struct discovery_state *cache = &hdev->discovery;
1169 struct list_head *pos = &cache->resolve;
1170 struct inquiry_entry *p;
1171
1172 list_del(&ie->list);
1173
1174 list_for_each_entry(p, &cache->resolve, list) {
1175 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1176 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1177 break;
1178 pos = &p->list;
1179 }
1180
1181 list_add(&ie->list, pos);
1182}
1183
af58925c
MH
1184u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1185 bool name_known)
1da177e4 1186{
30883512 1187 struct discovery_state *cache = &hdev->discovery;
70f23020 1188 struct inquiry_entry *ie;
af58925c 1189 u32 flags = 0;
1da177e4 1190
6ed93dc6 1191 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1192
6928a924 1193 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1194
af58925c
MH
1195 if (!data->ssp_mode)
1196 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1197
70f23020 1198 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1199 if (ie) {
af58925c
MH
1200 if (!ie->data.ssp_mode)
1201 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1202
a3d4e20a 1203 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1204 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1205 ie->data.rssi = data->rssi;
1206 hci_inquiry_cache_update_resolve(hdev, ie);
1207 }
1208
561aafbc 1209 goto update;
a3d4e20a 1210 }
561aafbc
JH
1211
1212 /* Entry not in the cache. Add new one. */
27f70f3e 1213 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1214 if (!ie) {
1215 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1216 goto done;
1217 }
561aafbc
JH
1218
1219 list_add(&ie->all, &cache->all);
1220
1221 if (name_known) {
1222 ie->name_state = NAME_KNOWN;
1223 } else {
1224 ie->name_state = NAME_NOT_KNOWN;
1225 list_add(&ie->list, &cache->unknown);
1226 }
70f23020 1227
561aafbc
JH
1228update:
1229 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1230 ie->name_state != NAME_PENDING) {
561aafbc
JH
1231 ie->name_state = NAME_KNOWN;
1232 list_del(&ie->list);
1da177e4
LT
1233 }
1234
70f23020
AE
1235 memcpy(&ie->data, data, sizeof(*data));
1236 ie->timestamp = jiffies;
1da177e4 1237 cache->timestamp = jiffies;
3175405b
JH
1238
1239 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1240 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1241
af58925c
MH
1242done:
1243 return flags;
1da177e4
LT
1244}
1245
1246static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1247{
30883512 1248 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1249 struct inquiry_info *info = (struct inquiry_info *) buf;
1250 struct inquiry_entry *e;
1251 int copied = 0;
1252
561aafbc 1253 list_for_each_entry(e, &cache->all, all) {
1da177e4 1254 struct inquiry_data *data = &e->data;
b57c1a56
JH
1255
1256 if (copied >= num)
1257 break;
1258
1da177e4
LT
1259 bacpy(&info->bdaddr, &data->bdaddr);
1260 info->pscan_rep_mode = data->pscan_rep_mode;
1261 info->pscan_period_mode = data->pscan_period_mode;
1262 info->pscan_mode = data->pscan_mode;
1263 memcpy(info->dev_class, data->dev_class, 3);
1264 info->clock_offset = data->clock_offset;
b57c1a56 1265
1da177e4 1266 info++;
b57c1a56 1267 copied++;
1da177e4
LT
1268 }
1269
1270 BT_DBG("cache %p, copied %d", cache, copied);
1271 return copied;
1272}
1273
a1d01db1 1274static int hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1275{
1276 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1277 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1278 struct hci_cp_inquiry cp;
1279
1280 BT_DBG("%s", hdev->name);
1281
1282 if (test_bit(HCI_INQUIRY, &hdev->flags))
a1d01db1 1283 return 0;
1da177e4
LT
1284
1285 /* Start Inquiry */
1286 memcpy(&cp.lap, &ir->lap, 3);
1287 cp.length = ir->length;
1288 cp.num_rsp = ir->num_rsp;
42c6b129 1289 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
a1d01db1
JH
1290
1291 return 0;
1da177e4
LT
1292}
1293
1294int hci_inquiry(void __user *arg)
1295{
1296 __u8 __user *ptr = arg;
1297 struct hci_inquiry_req ir;
1298 struct hci_dev *hdev;
1299 int err = 0, do_inquiry = 0, max_rsp;
1300 long timeo;
1301 __u8 *buf;
1302
1303 if (copy_from_user(&ir, ptr, sizeof(ir)))
1304 return -EFAULT;
1305
5a08ecce
AE
1306 hdev = hci_dev_get(ir.dev_id);
1307 if (!hdev)
1da177e4
LT
1308 return -ENODEV;
1309
d7a5a11d 1310 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1311 err = -EBUSY;
1312 goto done;
1313 }
1314
d7a5a11d 1315 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1316 err = -EOPNOTSUPP;
1317 goto done;
1318 }
1319
ca8bee5d 1320 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
1321 err = -EOPNOTSUPP;
1322 goto done;
1323 }
1324
d7a5a11d 1325 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1326 err = -EOPNOTSUPP;
1327 goto done;
1328 }
1329
09fd0de5 1330 hci_dev_lock(hdev);
8e87d142 1331 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1332 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1333 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1334 do_inquiry = 1;
1335 }
09fd0de5 1336 hci_dev_unlock(hdev);
1da177e4 1337
04837f64 1338 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1339
1340 if (do_inquiry) {
01178cd4 1341 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
4ebeee2d 1342 timeo, NULL);
70f23020
AE
1343 if (err < 0)
1344 goto done;
3e13fa1e
AG
1345
1346 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1347 * cleared). If it is interrupted by a signal, return -EINTR.
1348 */
74316201 1349 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1350 TASK_INTERRUPTIBLE))
1351 return -EINTR;
70f23020 1352 }
1da177e4 1353
8fc9ced3
GP
1354 /* for unlimited number of responses we will use buffer with
1355 * 255 entries
1356 */
1da177e4
LT
1357 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1358
1359 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1360 * copy it to the user space.
1361 */
6da2ec56 1362 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
70f23020 1363 if (!buf) {
1da177e4
LT
1364 err = -ENOMEM;
1365 goto done;
1366 }
1367
09fd0de5 1368 hci_dev_lock(hdev);
1da177e4 1369 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1370 hci_dev_unlock(hdev);
1da177e4
LT
1371
1372 BT_DBG("num_rsp %d", ir.num_rsp);
1373
1374 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1375 ptr += sizeof(ir);
1376 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1377 ir.num_rsp))
1da177e4 1378 err = -EFAULT;
8e87d142 1379 } else
1da177e4
LT
1380 err = -EFAULT;
1381
1382 kfree(buf);
1383
1384done:
1385 hci_dev_put(hdev);
1386 return err;
1387}
1388
7a0e5b15
MK
1389/**
1390 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1391 * (BD_ADDR) for a HCI device from
1392 * a firmware node property.
1393 * @hdev: The HCI device
1394 *
1395 * Search the firmware node for 'local-bd-address'.
1396 *
1397 * All-zero BD addresses are rejected, because those could be properties
1398 * that exist in the firmware tables, but were not updated by the firmware. For
1399 * example, the DTS could define 'local-bd-address', with zero BD addresses.
1400 */
1401static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1402{
1403 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1404 bdaddr_t ba;
1405 int ret;
1406
1407 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1408 (u8 *)&ba, sizeof(ba));
1409 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1410 return;
1411
1412 bacpy(&hdev->public_addr, &ba);
1413}
1414
cbed0ca1 1415static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1416{
1da177e4
LT
1417 int ret = 0;
1418
1da177e4
LT
1419 BT_DBG("%s %p", hdev->name, hdev);
1420
b504430c 1421 hci_req_sync_lock(hdev);
1da177e4 1422
d7a5a11d 1423 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1424 ret = -ENODEV;
1425 goto done;
1426 }
1427
d7a5a11d
MH
1428 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1429 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1430 /* Check for rfkill but allow the HCI setup stage to
1431 * proceed (which in itself doesn't cause any RF activity).
1432 */
d7a5a11d 1433 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1434 ret = -ERFKILL;
1435 goto done;
1436 }
1437
1438 /* Check for valid public address or a configured static
1439 * random adddress, but let the HCI setup proceed to
1440 * be able to determine if there is a public address
1441 * or not.
1442 *
c6beca0e
MH
1443 * In case of user channel usage, it is not important
1444 * if a public address or static random address is
1445 * available.
1446 *
a5c8f270
MH
1447 * This check is only valid for BR/EDR controllers
1448 * since AMP controllers do not have an address.
1449 */
d7a5a11d 1450 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
ca8bee5d 1451 hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
1452 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1453 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1454 ret = -EADDRNOTAVAIL;
1455 goto done;
1456 }
611b30f7
MH
1457 }
1458
1da177e4
LT
1459 if (test_bit(HCI_UP, &hdev->flags)) {
1460 ret = -EALREADY;
1461 goto done;
1462 }
1463
1da177e4
LT
1464 if (hdev->open(hdev)) {
1465 ret = -EIO;
1466 goto done;
1467 }
1468
e9ca8bf1 1469 set_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1470 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
4a3f95b7 1471
f41c70c4
MH
1472 atomic_set(&hdev->cmd_cnt, 1);
1473 set_bit(HCI_INIT, &hdev->flags);
1474
740011cf
SW
1475 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1476 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
7fdf6c6a
MH
1477 bool invalid_bdaddr;
1478
e131d74a
MH
1479 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1480
af202f84
MH
1481 if (hdev->setup)
1482 ret = hdev->setup(hdev);
f41c70c4 1483
7fdf6c6a
MH
1484 /* The transport driver can set the quirk to mark the
1485 * BD_ADDR invalid before creating the HCI device or in
1486 * its setup callback.
1487 */
1488 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1489 &hdev->quirks);
1490
7a0e5b15
MK
1491 if (ret)
1492 goto setup_failed;
1493
1494 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1495 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1496 hci_dev_get_bd_addr_from_property(hdev);
1497
1498 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
7fdf6c6a 1499 hdev->set_bdaddr) {
7a0e5b15
MK
1500 ret = hdev->set_bdaddr(hdev,
1501 &hdev->public_addr);
7fdf6c6a
MH
1502
1503 /* If setting of the BD_ADDR from the device
1504 * property succeeds, then treat the address
1505 * as valid even if the invalid BD_ADDR
1506 * quirk indicates otherwise.
1507 */
1508 if (!ret)
1509 invalid_bdaddr = false;
1510 }
7a0e5b15
MK
1511 }
1512
1513setup_failed:
af202f84
MH
1514 /* The transport driver can set these quirks before
1515 * creating the HCI device or in its setup callback.
1516 *
7fdf6c6a
MH
1517 * For the invalid BD_ADDR quirk it is possible that
1518 * it becomes a valid address if the bootloader does
1519 * provide it (see above).
1520 *
af202f84
MH
1521 * In case any of them is set, the controller has to
1522 * start up as unconfigured.
1523 */
eb1904f4 1524 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
7fdf6c6a 1525 invalid_bdaddr)
a1536da2 1526 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1527
0ebca7d6
MH
1528 /* For an unconfigured controller it is required to
1529 * read at least the version information provided by
1530 * the Read Local Version Information command.
1531 *
1532 * If the set_bdaddr driver callback is provided, then
1533 * also the original Bluetooth public device address
1534 * will be read using the Read BD Address command.
1535 */
d7a5a11d 1536 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1537 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1538 }
1539
d7a5a11d 1540 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1541 /* If public address change is configured, ensure that
1542 * the address gets programmed. If the driver does not
1543 * support changing the public address, fail the power
1544 * on procedure.
1545 */
1546 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1547 hdev->set_bdaddr)
24c457e2
MH
1548 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1549 else
1550 ret = -EADDRNOTAVAIL;
1551 }
1552
f41c70c4 1553 if (!ret) {
d7a5a11d 1554 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
98a63aaf 1555 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
f41c70c4 1556 ret = __hci_init(hdev);
98a63aaf
MH
1557 if (!ret && hdev->post_init)
1558 ret = hdev->post_init(hdev);
1559 }
1da177e4
LT
1560 }
1561
7e995b9e
MH
1562 /* If the HCI Reset command is clearing all diagnostic settings,
1563 * then they need to be reprogrammed after the init procedure
1564 * completed.
1565 */
1566 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
b56c7b25 1567 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
7e995b9e
MH
1568 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1569 ret = hdev->set_diag(hdev, true);
1570
145373cb
MC
1571 msft_do_open(hdev);
1572
f41c70c4
MH
1573 clear_bit(HCI_INIT, &hdev->flags);
1574
1da177e4
LT
1575 if (!ret) {
1576 hci_dev_hold(hdev);
a1536da2 1577 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
a73c046a 1578 hci_adv_instances_set_rpa_expired(hdev, true);
1da177e4 1579 set_bit(HCI_UP, &hdev->flags);
05fcd4c4 1580 hci_sock_dev_event(hdev, HCI_DEV_UP);
6d5d2ee6 1581 hci_leds_update_powered(hdev, true);
d7a5a11d
MH
1582 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1583 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1584 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1585 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
2ff13894 1586 hci_dev_test_flag(hdev, HCI_MGMT) &&
ca8bee5d 1587 hdev->dev_type == HCI_PRIMARY) {
2ff13894
JH
1588 ret = __hci_req_hci_power_on(hdev);
1589 mgmt_power_on(hdev, ret);
56e5cb86 1590 }
8e87d142 1591 } else {
1da177e4 1592 /* Init failed, cleanup */
3eff45ea 1593 flush_work(&hdev->tx_work);
c347b765 1594 flush_work(&hdev->cmd_work);
b78752cc 1595 flush_work(&hdev->rx_work);
1da177e4
LT
1596
1597 skb_queue_purge(&hdev->cmd_q);
1598 skb_queue_purge(&hdev->rx_q);
1599
1600 if (hdev->flush)
1601 hdev->flush(hdev);
1602
1603 if (hdev->sent_cmd) {
1604 kfree_skb(hdev->sent_cmd);
1605 hdev->sent_cmd = NULL;
1606 }
1607
e9ca8bf1 1608 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1609 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1610
1da177e4 1611 hdev->close(hdev);
fee746b0 1612 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1613 }
1614
1615done:
b504430c 1616 hci_req_sync_unlock(hdev);
1da177e4
LT
1617 return ret;
1618}
1619
cbed0ca1
JH
1620/* ---- HCI ioctl helpers ---- */
1621
1622int hci_dev_open(__u16 dev)
1623{
1624 struct hci_dev *hdev;
1625 int err;
1626
1627 hdev = hci_dev_get(dev);
1628 if (!hdev)
1629 return -ENODEV;
1630
4a964404 1631 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1632 * up as user channel. Trying to bring them up as normal devices
1633 * will result into a failure. Only user channel operation is
1634 * possible.
1635 *
1636 * When this function is called for a user channel, the flag
1637 * HCI_USER_CHANNEL will be set first before attempting to
1638 * open the device.
1639 */
d7a5a11d
MH
1640 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1641 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1642 err = -EOPNOTSUPP;
1643 goto done;
1644 }
1645
e1d08f40
JH
1646 /* We need to ensure that no other power on/off work is pending
1647 * before proceeding to call hci_dev_do_open. This is
1648 * particularly important if the setup procedure has not yet
1649 * completed.
1650 */
a69d8927 1651 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1652 cancel_delayed_work(&hdev->power_off);
1653
a5c8f270
MH
1654 /* After this call it is guaranteed that the setup procedure
1655 * has finished. This means that error conditions like RFKILL
1656 * or no valid public or static random address apply.
1657 */
e1d08f40
JH
1658 flush_workqueue(hdev->req_workqueue);
1659
12aa4f0a 1660 /* For controllers not using the management interface and that
b6ae8457 1661 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1662 * so that pairing works for them. Once the management interface
1663 * is in use this bit will be cleared again and userspace has
1664 * to explicitly enable it.
1665 */
d7a5a11d
MH
1666 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1667 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1668 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1669
cbed0ca1
JH
1670 err = hci_dev_do_open(hdev);
1671
fee746b0 1672done:
cbed0ca1 1673 hci_dev_put(hdev);
cbed0ca1
JH
1674 return err;
1675}
1676
d7347f3c
JH
1677/* This function requires the caller holds hdev->lock */
1678static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1679{
1680 struct hci_conn_params *p;
1681
f161dd41
JH
1682 list_for_each_entry(p, &hdev->le_conn_params, list) {
1683 if (p->conn) {
1684 hci_conn_drop(p->conn);
f8aaf9b6 1685 hci_conn_put(p->conn);
f161dd41
JH
1686 p->conn = NULL;
1687 }
d7347f3c 1688 list_del_init(&p->action);
f161dd41 1689 }
d7347f3c
JH
1690
1691 BT_DBG("All LE pending actions cleared");
1692}
1693
6b3cc1db 1694int hci_dev_do_close(struct hci_dev *hdev)
1da177e4 1695{
acc649c6
MH
1696 bool auto_off;
1697
1da177e4
LT
1698 BT_DBG("%s %p", hdev->name, hdev);
1699
d24d8144 1700 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1701 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1702 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1703 /* Execute vendor specific shutdown routine */
1704 if (hdev->shutdown)
1705 hdev->shutdown(hdev);
1706 }
1707
78c04c0b
VCG
1708 cancel_delayed_work(&hdev->power_off);
1709
7df0f73e 1710 hci_request_cancel_all(hdev);
b504430c 1711 hci_req_sync_lock(hdev);
1da177e4
LT
1712
1713 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1714 cancel_delayed_work_sync(&hdev->cmd_timer);
b504430c 1715 hci_req_sync_unlock(hdev);
1da177e4
LT
1716 return 0;
1717 }
1718
6d5d2ee6
HK
1719 hci_leds_update_powered(hdev, false);
1720
3eff45ea
GP
1721 /* Flush RX and TX works */
1722 flush_work(&hdev->tx_work);
b78752cc 1723 flush_work(&hdev->rx_work);
1da177e4 1724
16ab91ab 1725 if (hdev->discov_timeout > 0) {
16ab91ab 1726 hdev->discov_timeout = 0;
a358dc11
MH
1727 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1728 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1729 }
1730
a69d8927 1731 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1732 cancel_delayed_work(&hdev->service_cache);
1733
a73c046a
JK
1734 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1735 struct adv_info *adv_instance;
1736
4518bb0f 1737 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1738
a73c046a
JK
1739 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1740 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1741 }
1742
76727c02
JH
1743 /* Avoid potential lockdep warnings from the *_flush() calls by
1744 * ensuring the workqueue is empty up front.
1745 */
1746 drain_workqueue(hdev->workqueue);
1747
09fd0de5 1748 hci_dev_lock(hdev);
1aeb9c65 1749
8f502f84
JH
1750 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1751
acc649c6
MH
1752 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1753
ca8bee5d 1754 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
baab7932 1755 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
2ff13894
JH
1756 hci_dev_test_flag(hdev, HCI_MGMT))
1757 __mgmt_power_off(hdev);
1aeb9c65 1758
1f9b9a5d 1759 hci_inquiry_cache_flush(hdev);
d7347f3c 1760 hci_pend_le_actions_clear(hdev);
f161dd41 1761 hci_conn_hash_flush(hdev);
09fd0de5 1762 hci_dev_unlock(hdev);
1da177e4 1763
64dae967
MH
1764 smp_unregister(hdev);
1765
05fcd4c4 1766 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1da177e4 1767
145373cb
MC
1768 msft_do_close(hdev);
1769
1da177e4
LT
1770 if (hdev->flush)
1771 hdev->flush(hdev);
1772
1773 /* Reset device */
1774 skb_queue_purge(&hdev->cmd_q);
1775 atomic_set(&hdev->cmd_cnt, 1);
acc649c6
MH
1776 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1777 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4 1778 set_bit(HCI_INIT, &hdev->flags);
4ebeee2d 1779 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1da177e4
LT
1780 clear_bit(HCI_INIT, &hdev->flags);
1781 }
1782
c347b765
GP
1783 /* flush cmd work */
1784 flush_work(&hdev->cmd_work);
1da177e4
LT
1785
1786 /* Drop queues */
1787 skb_queue_purge(&hdev->rx_q);
1788 skb_queue_purge(&hdev->cmd_q);
1789 skb_queue_purge(&hdev->raw_q);
1790
1791 /* Drop last sent command */
1792 if (hdev->sent_cmd) {
65cc2b49 1793 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1794 kfree_skb(hdev->sent_cmd);
1795 hdev->sent_cmd = NULL;
1796 }
1797
e9ca8bf1 1798 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1799 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1800
9952d90e
APS
1801 if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1802 wake_up(&hdev->suspend_wait_q);
1803
1da177e4
LT
1804 /* After this point our queues are empty
1805 * and no tasks are scheduled. */
1806 hdev->close(hdev);
1807
35b973c9 1808 /* Clear flags */
fee746b0 1809 hdev->flags &= BIT(HCI_RAW);
eacb44df 1810 hci_dev_clear_volatile_flags(hdev);
35b973c9 1811
ced5c338 1812 /* Controller radio is available but is currently powered down */
536619e8 1813 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1814
e59fda8d 1815 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1816 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1817 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1818
b504430c 1819 hci_req_sync_unlock(hdev);
1da177e4
LT
1820
1821 hci_dev_put(hdev);
1822 return 0;
1823}
1824
1825int hci_dev_close(__u16 dev)
1826{
1827 struct hci_dev *hdev;
1828 int err;
1829
70f23020
AE
1830 hdev = hci_dev_get(dev);
1831 if (!hdev)
1da177e4 1832 return -ENODEV;
8ee56540 1833
d7a5a11d 1834 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1835 err = -EBUSY;
1836 goto done;
1837 }
1838
a69d8927 1839 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1840 cancel_delayed_work(&hdev->power_off);
1841
1da177e4 1842 err = hci_dev_do_close(hdev);
8ee56540 1843
0736cfa8 1844done:
1da177e4
LT
1845 hci_dev_put(hdev);
1846 return err;
1847}
1848
5c912495 1849static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1850{
5c912495 1851 int ret;
1da177e4 1852
5c912495 1853 BT_DBG("%s %p", hdev->name, hdev);
1da177e4 1854
b504430c 1855 hci_req_sync_lock(hdev);
1da177e4 1856
1da177e4
LT
1857 /* Drop queues */
1858 skb_queue_purge(&hdev->rx_q);
1859 skb_queue_purge(&hdev->cmd_q);
1860
76727c02
JH
1861 /* Avoid potential lockdep warnings from the *_flush() calls by
1862 * ensuring the workqueue is empty up front.
1863 */
1864 drain_workqueue(hdev->workqueue);
1865
09fd0de5 1866 hci_dev_lock(hdev);
1f9b9a5d 1867 hci_inquiry_cache_flush(hdev);
1da177e4 1868 hci_conn_hash_flush(hdev);
09fd0de5 1869 hci_dev_unlock(hdev);
1da177e4
LT
1870
1871 if (hdev->flush)
1872 hdev->flush(hdev);
1873
8e87d142 1874 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1875 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1876
4ebeee2d 1877 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1da177e4 1878
b504430c 1879 hci_req_sync_unlock(hdev);
1da177e4
LT
1880 return ret;
1881}
1882
5c912495
MH
1883int hci_dev_reset(__u16 dev)
1884{
1885 struct hci_dev *hdev;
1886 int err;
1887
1888 hdev = hci_dev_get(dev);
1889 if (!hdev)
1890 return -ENODEV;
1891
1892 if (!test_bit(HCI_UP, &hdev->flags)) {
1893 err = -ENETDOWN;
1894 goto done;
1895 }
1896
d7a5a11d 1897 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1898 err = -EBUSY;
1899 goto done;
1900 }
1901
d7a5a11d 1902 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1903 err = -EOPNOTSUPP;
1904 goto done;
1905 }
1906
1907 err = hci_dev_do_reset(hdev);
1908
1909done:
1910 hci_dev_put(hdev);
1911 return err;
1912}
1913
1da177e4
LT
1914int hci_dev_reset_stat(__u16 dev)
1915{
1916 struct hci_dev *hdev;
1917 int ret = 0;
1918
70f23020
AE
1919 hdev = hci_dev_get(dev);
1920 if (!hdev)
1da177e4
LT
1921 return -ENODEV;
1922
d7a5a11d 1923 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1924 ret = -EBUSY;
1925 goto done;
1926 }
1927
d7a5a11d 1928 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1929 ret = -EOPNOTSUPP;
1930 goto done;
1931 }
1932
1da177e4
LT
1933 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1934
0736cfa8 1935done:
1da177e4 1936 hci_dev_put(hdev);
1da177e4
LT
1937 return ret;
1938}
1939
123abc08
JH
1940static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1941{
bc6d2d04 1942 bool conn_changed, discov_changed;
123abc08
JH
1943
1944 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1945
1946 if ((scan & SCAN_PAGE))
238be788
MH
1947 conn_changed = !hci_dev_test_and_set_flag(hdev,
1948 HCI_CONNECTABLE);
123abc08 1949 else
a69d8927
MH
1950 conn_changed = hci_dev_test_and_clear_flag(hdev,
1951 HCI_CONNECTABLE);
123abc08 1952
bc6d2d04 1953 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1954 discov_changed = !hci_dev_test_and_set_flag(hdev,
1955 HCI_DISCOVERABLE);
bc6d2d04 1956 } else {
a358dc11 1957 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1958 discov_changed = hci_dev_test_and_clear_flag(hdev,
1959 HCI_DISCOVERABLE);
bc6d2d04
JH
1960 }
1961
d7a5a11d 1962 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1963 return;
1964
bc6d2d04
JH
1965 if (conn_changed || discov_changed) {
1966 /* In case this was disabled through mgmt */
a1536da2 1967 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1968
d7a5a11d 1969 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
cab054ab 1970 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
bc6d2d04 1971
123abc08 1972 mgmt_new_settings(hdev);
bc6d2d04 1973 }
123abc08
JH
1974}
1975
1da177e4
LT
1976int hci_dev_cmd(unsigned int cmd, void __user *arg)
1977{
1978 struct hci_dev *hdev;
1979 struct hci_dev_req dr;
1980 int err = 0;
1981
1982 if (copy_from_user(&dr, arg, sizeof(dr)))
1983 return -EFAULT;
1984
70f23020
AE
1985 hdev = hci_dev_get(dr.dev_id);
1986 if (!hdev)
1da177e4
LT
1987 return -ENODEV;
1988
d7a5a11d 1989 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1990 err = -EBUSY;
1991 goto done;
1992 }
1993
d7a5a11d 1994 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1995 err = -EOPNOTSUPP;
1996 goto done;
1997 }
1998
ca8bee5d 1999 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
2000 err = -EOPNOTSUPP;
2001 goto done;
2002 }
2003
d7a5a11d 2004 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
2005 err = -EOPNOTSUPP;
2006 goto done;
2007 }
2008
1da177e4
LT
2009 switch (cmd) {
2010 case HCISETAUTH:
01178cd4 2011 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 2012 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2013 break;
2014
2015 case HCISETENCRYPT:
2016 if (!lmp_encrypt_capable(hdev)) {
2017 err = -EOPNOTSUPP;
2018 break;
2019 }
2020
2021 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2022 /* Auth must be enabled first */
01178cd4 2023 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 2024 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2025 if (err)
2026 break;
2027 }
2028
01178cd4 2029 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
4ebeee2d 2030 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2031 break;
2032
2033 case HCISETSCAN:
01178cd4 2034 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
4ebeee2d 2035 HCI_INIT_TIMEOUT, NULL);
91a668b0 2036
bc6d2d04
JH
2037 /* Ensure that the connectable and discoverable states
2038 * get correctly modified as this was a non-mgmt change.
91a668b0 2039 */
123abc08
JH
2040 if (!err)
2041 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
2042 break;
2043
1da177e4 2044 case HCISETLINKPOL:
01178cd4 2045 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
4ebeee2d 2046 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
2047 break;
2048
2049 case HCISETLINKMODE:
e4e8e37c
MH
2050 hdev->link_mode = ((__u16) dr.dev_opt) &
2051 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2052 break;
2053
2054 case HCISETPTYPE:
b7c23df8
JK
2055 if (hdev->pkt_type == (__u16) dr.dev_opt)
2056 break;
2057
e4e8e37c 2058 hdev->pkt_type = (__u16) dr.dev_opt;
b7c23df8 2059 mgmt_phy_configuration_changed(hdev, NULL);
1da177e4
LT
2060 break;
2061
2062 case HCISETACLMTU:
e4e8e37c
MH
2063 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2064 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2065 break;
2066
2067 case HCISETSCOMTU:
e4e8e37c
MH
2068 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2069 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2070 break;
2071
2072 default:
2073 err = -EINVAL;
2074 break;
2075 }
e4e8e37c 2076
0736cfa8 2077done:
1da177e4
LT
2078 hci_dev_put(hdev);
2079 return err;
2080}
2081
2082int hci_get_dev_list(void __user *arg)
2083{
8035ded4 2084 struct hci_dev *hdev;
1da177e4
LT
2085 struct hci_dev_list_req *dl;
2086 struct hci_dev_req *dr;
1da177e4
LT
2087 int n = 0, size, err;
2088 __u16 dev_num;
2089
2090 if (get_user(dev_num, (__u16 __user *) arg))
2091 return -EFAULT;
2092
2093 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2094 return -EINVAL;
2095
2096 size = sizeof(*dl) + dev_num * sizeof(*dr);
2097
70f23020
AE
2098 dl = kzalloc(size, GFP_KERNEL);
2099 if (!dl)
1da177e4
LT
2100 return -ENOMEM;
2101
2102 dr = dl->dev_req;
2103
f20d09d5 2104 read_lock(&hci_dev_list_lock);
8035ded4 2105 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2106 unsigned long flags = hdev->flags;
c542a06c 2107
2e84d8db
MH
2108 /* When the auto-off is configured it means the transport
2109 * is running, but in that case still indicate that the
2110 * device is actually down.
2111 */
d7a5a11d 2112 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 2113 flags &= ~BIT(HCI_UP);
c542a06c 2114
1da177e4 2115 (dr + n)->dev_id = hdev->id;
2e84d8db 2116 (dr + n)->dev_opt = flags;
c542a06c 2117
1da177e4
LT
2118 if (++n >= dev_num)
2119 break;
2120 }
f20d09d5 2121 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2122
2123 dl->dev_num = n;
2124 size = sizeof(*dl) + n * sizeof(*dr);
2125
2126 err = copy_to_user(arg, dl, size);
2127 kfree(dl);
2128
2129 return err ? -EFAULT : 0;
2130}
2131
2132int hci_get_dev_info(void __user *arg)
2133{
2134 struct hci_dev *hdev;
2135 struct hci_dev_info di;
2e84d8db 2136 unsigned long flags;
1da177e4
LT
2137 int err = 0;
2138
2139 if (copy_from_user(&di, arg, sizeof(di)))
2140 return -EFAULT;
2141
70f23020
AE
2142 hdev = hci_dev_get(di.dev_id);
2143 if (!hdev)
1da177e4
LT
2144 return -ENODEV;
2145
2e84d8db
MH
2146 /* When the auto-off is configured it means the transport
2147 * is running, but in that case still indicate that the
2148 * device is actually down.
2149 */
d7a5a11d 2150 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
2151 flags = hdev->flags & ~BIT(HCI_UP);
2152 else
2153 flags = hdev->flags;
c542a06c 2154
1da177e4
LT
2155 strcpy(di.name, hdev->name);
2156 di.bdaddr = hdev->bdaddr;
60f2a3ed 2157 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2158 di.flags = flags;
1da177e4 2159 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2160 if (lmp_bredr_capable(hdev)) {
2161 di.acl_mtu = hdev->acl_mtu;
2162 di.acl_pkts = hdev->acl_pkts;
2163 di.sco_mtu = hdev->sco_mtu;
2164 di.sco_pkts = hdev->sco_pkts;
2165 } else {
2166 di.acl_mtu = hdev->le_mtu;
2167 di.acl_pkts = hdev->le_pkts;
2168 di.sco_mtu = 0;
2169 di.sco_pkts = 0;
2170 }
1da177e4
LT
2171 di.link_policy = hdev->link_policy;
2172 di.link_mode = hdev->link_mode;
2173
2174 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2175 memcpy(&di.features, &hdev->features, sizeof(di.features));
2176
2177 if (copy_to_user(arg, &di, sizeof(di)))
2178 err = -EFAULT;
2179
2180 hci_dev_put(hdev);
2181
2182 return err;
2183}
2184
2185/* ---- Interface to HCI drivers ---- */
2186
611b30f7
MH
2187static int hci_rfkill_set_block(void *data, bool blocked)
2188{
2189 struct hci_dev *hdev = data;
2190
2191 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2192
d7a5a11d 2193 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2194 return -EBUSY;
2195
5e130367 2196 if (blocked) {
a1536da2 2197 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2198 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2199 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2200 hci_dev_do_close(hdev);
5e130367 2201 } else {
a358dc11 2202 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2203 }
611b30f7
MH
2204
2205 return 0;
2206}
2207
2208static const struct rfkill_ops hci_rfkill_ops = {
2209 .set_block = hci_rfkill_set_block,
2210};
2211
ab81cbf9
JH
2212static void hci_power_on(struct work_struct *work)
2213{
2214 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2215 int err;
ab81cbf9
JH
2216
2217 BT_DBG("%s", hdev->name);
2218
2ff13894
JH
2219 if (test_bit(HCI_UP, &hdev->flags) &&
2220 hci_dev_test_flag(hdev, HCI_MGMT) &&
2221 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
d82142a8 2222 cancel_delayed_work(&hdev->power_off);
2ff13894
JH
2223 hci_req_sync_lock(hdev);
2224 err = __hci_req_hci_power_on(hdev);
2225 hci_req_sync_unlock(hdev);
2226 mgmt_power_on(hdev, err);
2227 return;
2228 }
2229
cbed0ca1 2230 err = hci_dev_do_open(hdev);
96570ffc 2231 if (err < 0) {
3ad67582 2232 hci_dev_lock(hdev);
96570ffc 2233 mgmt_set_powered_failed(hdev, err);
3ad67582 2234 hci_dev_unlock(hdev);
ab81cbf9 2235 return;
96570ffc 2236 }
ab81cbf9 2237
a5c8f270
MH
2238 /* During the HCI setup phase, a few error conditions are
2239 * ignored and they need to be checked now. If they are still
2240 * valid, it is important to turn the device back off.
2241 */
d7a5a11d
MH
2242 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2243 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
ca8bee5d 2244 (hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
2245 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2246 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2247 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2248 hci_dev_do_close(hdev);
d7a5a11d 2249 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2250 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2251 HCI_AUTO_OFF_TIMEOUT);
bf543036 2252 }
ab81cbf9 2253
a69d8927 2254 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2255 /* For unconfigured devices, set the HCI_RAW flag
2256 * so that userspace can easily identify them.
4a964404 2257 */
d7a5a11d 2258 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2259 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2260
2261 /* For fully configured devices, this will send
2262 * the Index Added event. For unconfigured devices,
2263 * it will send Unconfigued Index Added event.
2264 *
2265 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2266 * and no event will be send.
2267 */
2268 mgmt_index_added(hdev);
a69d8927 2269 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2270 /* When the controller is now configured, then it
2271 * is important to clear the HCI_RAW flag.
2272 */
d7a5a11d 2273 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2274 clear_bit(HCI_RAW, &hdev->flags);
2275
d603b76b
MH
2276 /* Powering on the controller with HCI_CONFIG set only
2277 * happens with the transition from unconfigured to
2278 * configured. This will send the Index Added event.
2279 */
744cf19e 2280 mgmt_index_added(hdev);
fee746b0 2281 }
ab81cbf9
JH
2282}
2283
2284static void hci_power_off(struct work_struct *work)
2285{
3243553f 2286 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2287 power_off.work);
ab81cbf9
JH
2288
2289 BT_DBG("%s", hdev->name);
2290
8ee56540 2291 hci_dev_do_close(hdev);
ab81cbf9
JH
2292}
2293
c7741d16
MH
2294static void hci_error_reset(struct work_struct *work)
2295{
2296 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2297
2298 BT_DBG("%s", hdev->name);
2299
2300 if (hdev->hw_error)
2301 hdev->hw_error(hdev, hdev->hw_error_code);
2302 else
2064ee33 2303 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
c7741d16
MH
2304
2305 if (hci_dev_do_close(hdev))
2306 return;
2307
c7741d16
MH
2308 hci_dev_do_open(hdev);
2309}
2310
35f7498a 2311void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2312{
4821002c 2313 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2314
4821002c
JH
2315 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2316 list_del(&uuid->list);
2aeb9a1a
JH
2317 kfree(uuid);
2318 }
2aeb9a1a
JH
2319}
2320
35f7498a 2321void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2322{
0378b597 2323 struct link_key *key;
55ed8ca1 2324
d7d41682 2325 list_for_each_entry(key, &hdev->link_keys, list) {
0378b597
JH
2326 list_del_rcu(&key->list);
2327 kfree_rcu(key, rcu);
55ed8ca1 2328 }
55ed8ca1
JH
2329}
2330
35f7498a 2331void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2332{
970d0f1b 2333 struct smp_ltk *k;
b899efaf 2334
d7d41682 2335 list_for_each_entry(k, &hdev->long_term_keys, list) {
970d0f1b
JH
2336 list_del_rcu(&k->list);
2337 kfree_rcu(k, rcu);
b899efaf 2338 }
b899efaf
VCG
2339}
2340
970c4e46
JH
2341void hci_smp_irks_clear(struct hci_dev *hdev)
2342{
adae20cb 2343 struct smp_irk *k;
970c4e46 2344
d7d41682 2345 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
adae20cb
JH
2346 list_del_rcu(&k->list);
2347 kfree_rcu(k, rcu);
970c4e46
JH
2348 }
2349}
2350
600a8749
AM
2351void hci_blocked_keys_clear(struct hci_dev *hdev)
2352{
2353 struct blocked_key *b;
2354
d7d41682 2355 list_for_each_entry(b, &hdev->blocked_keys, list) {
600a8749
AM
2356 list_del_rcu(&b->list);
2357 kfree_rcu(b, rcu);
2358 }
2359}
2360
2361bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2362{
2363 bool blocked = false;
2364 struct blocked_key *b;
2365
2366 rcu_read_lock();
0c2ac7d4 2367 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
600a8749
AM
2368 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2369 blocked = true;
2370 break;
2371 }
2372 }
2373
2374 rcu_read_unlock();
2375 return blocked;
2376}
2377
55ed8ca1
JH
2378struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2379{
8035ded4 2380 struct link_key *k;
55ed8ca1 2381
0378b597
JH
2382 rcu_read_lock();
2383 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2384 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2385 rcu_read_unlock();
600a8749
AM
2386
2387 if (hci_is_blocked_key(hdev,
2388 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2389 k->val)) {
2390 bt_dev_warn_ratelimited(hdev,
2391 "Link key blocked for %pMR",
2392 &k->bdaddr);
2393 return NULL;
2394 }
2395
55ed8ca1 2396 return k;
0378b597
JH
2397 }
2398 }
2399 rcu_read_unlock();
55ed8ca1
JH
2400
2401 return NULL;
2402}
2403
745c0ce3 2404static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2405 u8 key_type, u8 old_key_type)
d25e28ab
JH
2406{
2407 /* Legacy key */
2408 if (key_type < 0x03)
745c0ce3 2409 return true;
d25e28ab
JH
2410
2411 /* Debug keys are insecure so don't store them persistently */
2412 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2413 return false;
d25e28ab
JH
2414
2415 /* Changed combination key and there's no previous one */
2416 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2417 return false;
d25e28ab
JH
2418
2419 /* Security mode 3 case */
2420 if (!conn)
745c0ce3 2421 return true;
d25e28ab 2422
e3befab9
JH
2423 /* BR/EDR key derived using SC from an LE link */
2424 if (conn->type == LE_LINK)
2425 return true;
2426
d25e28ab
JH
2427 /* Neither local nor remote side had no-bonding as requirement */
2428 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2429 return true;
d25e28ab
JH
2430
2431 /* Local side had dedicated bonding as requirement */
2432 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2433 return true;
d25e28ab
JH
2434
2435 /* Remote side had dedicated bonding as requirement */
2436 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2437 return true;
d25e28ab
JH
2438
2439 /* If none of the above criteria match, then don't store the key
2440 * persistently */
745c0ce3 2441 return false;
d25e28ab
JH
2442}
2443
e804d25d 2444static u8 ltk_role(u8 type)
98a0b845 2445{
e804d25d
JH
2446 if (type == SMP_LTK)
2447 return HCI_ROLE_MASTER;
98a0b845 2448
e804d25d 2449 return HCI_ROLE_SLAVE;
98a0b845
JH
2450}
2451
f3a73d97
JH
2452struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2453 u8 addr_type, u8 role)
75d262c2 2454{
c9839a11 2455 struct smp_ltk *k;
75d262c2 2456
970d0f1b
JH
2457 rcu_read_lock();
2458 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2459 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2460 continue;
2461
923e2414 2462 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2463 rcu_read_unlock();
600a8749
AM
2464
2465 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2466 k->val)) {
2467 bt_dev_warn_ratelimited(hdev,
2468 "LTK blocked for %pMR",
2469 &k->bdaddr);
2470 return NULL;
2471 }
2472
75d262c2 2473 return k;
970d0f1b
JH
2474 }
2475 }
2476 rcu_read_unlock();
75d262c2
VCG
2477
2478 return NULL;
2479}
75d262c2 2480
970c4e46
JH
2481struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2482{
600a8749 2483 struct smp_irk *irk_to_return = NULL;
970c4e46
JH
2484 struct smp_irk *irk;
2485
adae20cb
JH
2486 rcu_read_lock();
2487 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2488 if (!bacmp(&irk->rpa, rpa)) {
600a8749
AM
2489 irk_to_return = irk;
2490 goto done;
adae20cb 2491 }
970c4e46
JH
2492 }
2493
adae20cb 2494 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2495 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2496 bacpy(&irk->rpa, rpa);
600a8749
AM
2497 irk_to_return = irk;
2498 goto done;
970c4e46
JH
2499 }
2500 }
600a8749
AM
2501
2502done:
2503 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2504 irk_to_return->val)) {
2505 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2506 &irk_to_return->bdaddr);
2507 irk_to_return = NULL;
2508 }
2509
adae20cb 2510 rcu_read_unlock();
970c4e46 2511
600a8749 2512 return irk_to_return;
970c4e46
JH
2513}
2514
2515struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2516 u8 addr_type)
2517{
600a8749 2518 struct smp_irk *irk_to_return = NULL;
970c4e46
JH
2519 struct smp_irk *irk;
2520
6cfc9988
JH
2521 /* Identity Address must be public or static random */
2522 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2523 return NULL;
2524
adae20cb
JH
2525 rcu_read_lock();
2526 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2527 if (addr_type == irk->addr_type &&
adae20cb 2528 bacmp(bdaddr, &irk->bdaddr) == 0) {
600a8749
AM
2529 irk_to_return = irk;
2530 goto done;
adae20cb 2531 }
970c4e46 2532 }
600a8749
AM
2533
2534done:
2535
2536 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2537 irk_to_return->val)) {
2538 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2539 &irk_to_return->bdaddr);
2540 irk_to_return = NULL;
2541 }
2542
adae20cb 2543 rcu_read_unlock();
970c4e46 2544
600a8749 2545 return irk_to_return;
970c4e46
JH
2546}
2547
567fa2aa 2548struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2549 bdaddr_t *bdaddr, u8 *val, u8 type,
2550 u8 pin_len, bool *persistent)
55ed8ca1
JH
2551{
2552 struct link_key *key, *old_key;
745c0ce3 2553 u8 old_key_type;
55ed8ca1
JH
2554
2555 old_key = hci_find_link_key(hdev, bdaddr);
2556 if (old_key) {
2557 old_key_type = old_key->type;
2558 key = old_key;
2559 } else {
12adcf3a 2560 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2561 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2562 if (!key)
567fa2aa 2563 return NULL;
0378b597 2564 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2565 }
2566
6ed93dc6 2567 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2568
d25e28ab
JH
2569 /* Some buggy controller combinations generate a changed
2570 * combination key for legacy pairing even when there's no
2571 * previous key */
2572 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2573 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2574 type = HCI_LK_COMBINATION;
655fe6ec
JH
2575 if (conn)
2576 conn->key_type = type;
2577 }
d25e28ab 2578
55ed8ca1 2579 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2580 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2581 key->pin_len = pin_len;
2582
b6020ba0 2583 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2584 key->type = old_key_type;
4748fed2
JH
2585 else
2586 key->type = type;
2587
7652ff6a
JH
2588 if (persistent)
2589 *persistent = hci_persistent_key(hdev, conn, type,
2590 old_key_type);
4df378a1 2591
567fa2aa 2592 return key;
55ed8ca1
JH
2593}
2594
ca9142b8 2595struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2596 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2597 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2598{
c9839a11 2599 struct smp_ltk *key, *old_key;
e804d25d 2600 u8 role = ltk_role(type);
75d262c2 2601
f3a73d97 2602 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2603 if (old_key)
75d262c2 2604 key = old_key;
c9839a11 2605 else {
0a14ab41 2606 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2607 if (!key)
ca9142b8 2608 return NULL;
970d0f1b 2609 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2610 }
2611
75d262c2 2612 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2613 key->bdaddr_type = addr_type;
2614 memcpy(key->val, tk, sizeof(key->val));
2615 key->authenticated = authenticated;
2616 key->ediv = ediv;
fe39c7b2 2617 key->rand = rand;
c9839a11
VCG
2618 key->enc_size = enc_size;
2619 key->type = type;
75d262c2 2620
ca9142b8 2621 return key;
75d262c2
VCG
2622}
2623
ca9142b8
JH
2624struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2625 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2626{
2627 struct smp_irk *irk;
2628
2629 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2630 if (!irk) {
2631 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2632 if (!irk)
ca9142b8 2633 return NULL;
970c4e46
JH
2634
2635 bacpy(&irk->bdaddr, bdaddr);
2636 irk->addr_type = addr_type;
2637
adae20cb 2638 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2639 }
2640
2641 memcpy(irk->val, val, 16);
2642 bacpy(&irk->rpa, rpa);
2643
ca9142b8 2644 return irk;
970c4e46
JH
2645}
2646
55ed8ca1
JH
2647int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2648{
2649 struct link_key *key;
2650
2651 key = hci_find_link_key(hdev, bdaddr);
2652 if (!key)
2653 return -ENOENT;
2654
6ed93dc6 2655 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2656
0378b597
JH
2657 list_del_rcu(&key->list);
2658 kfree_rcu(key, rcu);
55ed8ca1
JH
2659
2660 return 0;
2661}
2662
e0b2b27e 2663int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2664{
970d0f1b 2665 struct smp_ltk *k;
c51ffa0b 2666 int removed = 0;
b899efaf 2667
970d0f1b 2668 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2669 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2670 continue;
2671
6ed93dc6 2672 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2673
970d0f1b
JH
2674 list_del_rcu(&k->list);
2675 kfree_rcu(k, rcu);
c51ffa0b 2676 removed++;
b899efaf
VCG
2677 }
2678
c51ffa0b 2679 return removed ? 0 : -ENOENT;
b899efaf
VCG
2680}
2681
a7ec7338
JH
2682void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2683{
adae20cb 2684 struct smp_irk *k;
a7ec7338 2685
adae20cb 2686 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2687 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2688 continue;
2689
2690 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2691
adae20cb
JH
2692 list_del_rcu(&k->list);
2693 kfree_rcu(k, rcu);
a7ec7338
JH
2694 }
2695}
2696
55e76b38
JH
2697bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2698{
2699 struct smp_ltk *k;
4ba9faf3 2700 struct smp_irk *irk;
55e76b38
JH
2701 u8 addr_type;
2702
2703 if (type == BDADDR_BREDR) {
2704 if (hci_find_link_key(hdev, bdaddr))
2705 return true;
2706 return false;
2707 }
2708
2709 /* Convert to HCI addr type which struct smp_ltk uses */
2710 if (type == BDADDR_LE_PUBLIC)
2711 addr_type = ADDR_LE_DEV_PUBLIC;
2712 else
2713 addr_type = ADDR_LE_DEV_RANDOM;
2714
4ba9faf3
JH
2715 irk = hci_get_irk(hdev, bdaddr, addr_type);
2716 if (irk) {
2717 bdaddr = &irk->bdaddr;
2718 addr_type = irk->addr_type;
2719 }
2720
55e76b38
JH
2721 rcu_read_lock();
2722 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2723 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2724 rcu_read_unlock();
55e76b38 2725 return true;
87c8b28d 2726 }
55e76b38
JH
2727 }
2728 rcu_read_unlock();
2729
2730 return false;
2731}
2732
6bd32326 2733/* HCI command timer function */
65cc2b49 2734static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2735{
65cc2b49
MH
2736 struct hci_dev *hdev = container_of(work, struct hci_dev,
2737 cmd_timer.work);
6bd32326 2738
bda4f23a
AE
2739 if (hdev->sent_cmd) {
2740 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2741 u16 opcode = __le16_to_cpu(sent->opcode);
2742
2064ee33 2743 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
bda4f23a 2744 } else {
2064ee33 2745 bt_dev_err(hdev, "command tx timeout");
bda4f23a
AE
2746 }
2747
e2bef384
RJ
2748 if (hdev->cmd_timeout)
2749 hdev->cmd_timeout(hdev);
2750
6bd32326 2751 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2752 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2753}
2754
2763eda6 2755struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2756 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2757{
2758 struct oob_data *data;
2759
6928a924
JH
2760 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2761 if (bacmp(bdaddr, &data->bdaddr) != 0)
2762 continue;
2763 if (data->bdaddr_type != bdaddr_type)
2764 continue;
2765 return data;
2766 }
2763eda6
SJ
2767
2768 return NULL;
2769}
2770
6928a924
JH
2771int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2772 u8 bdaddr_type)
2763eda6
SJ
2773{
2774 struct oob_data *data;
2775
6928a924 2776 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2777 if (!data)
2778 return -ENOENT;
2779
6928a924 2780 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2781
2782 list_del(&data->list);
2783 kfree(data);
2784
2785 return 0;
2786}
2787
35f7498a 2788void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2789{
2790 struct oob_data *data, *n;
2791
2792 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2793 list_del(&data->list);
2794 kfree(data);
2795 }
2763eda6
SJ
2796}
2797
0798872e 2798int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2799 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2800 u8 *hash256, u8 *rand256)
2763eda6
SJ
2801{
2802 struct oob_data *data;
2803
6928a924 2804 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2805 if (!data) {
0a14ab41 2806 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2807 if (!data)
2808 return -ENOMEM;
2809
2810 bacpy(&data->bdaddr, bdaddr);
6928a924 2811 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2812 list_add(&data->list, &hdev->remote_oob_data);
2813 }
2814
81328d5c
JH
2815 if (hash192 && rand192) {
2816 memcpy(data->hash192, hash192, sizeof(data->hash192));
2817 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2818 if (hash256 && rand256)
2819 data->present = 0x03;
81328d5c
JH
2820 } else {
2821 memset(data->hash192, 0, sizeof(data->hash192));
2822 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2823 if (hash256 && rand256)
2824 data->present = 0x02;
2825 else
2826 data->present = 0x00;
0798872e
MH
2827 }
2828
81328d5c
JH
2829 if (hash256 && rand256) {
2830 memcpy(data->hash256, hash256, sizeof(data->hash256));
2831 memcpy(data->rand256, rand256, sizeof(data->rand256));
2832 } else {
2833 memset(data->hash256, 0, sizeof(data->hash256));
2834 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2835 if (hash192 && rand192)
2836 data->present = 0x01;
81328d5c 2837 }
0798872e 2838
6ed93dc6 2839 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2840
2841 return 0;
2842}
2843
d2609b34
FG
2844/* This function requires the caller holds hdev->lock */
2845struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2846{
2847 struct adv_info *adv_instance;
2848
2849 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2850 if (adv_instance->instance == instance)
2851 return adv_instance;
2852 }
2853
2854 return NULL;
2855}
2856
2857/* This function requires the caller holds hdev->lock */
74b93e9f
PK
2858struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2859{
d2609b34
FG
2860 struct adv_info *cur_instance;
2861
2862 cur_instance = hci_find_adv_instance(hdev, instance);
2863 if (!cur_instance)
2864 return NULL;
2865
2866 if (cur_instance == list_last_entry(&hdev->adv_instances,
2867 struct adv_info, list))
2868 return list_first_entry(&hdev->adv_instances,
2869 struct adv_info, list);
2870 else
2871 return list_next_entry(cur_instance, list);
2872}
2873
2874/* This function requires the caller holds hdev->lock */
2875int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2876{
2877 struct adv_info *adv_instance;
2878
2879 adv_instance = hci_find_adv_instance(hdev, instance);
2880 if (!adv_instance)
2881 return -ENOENT;
2882
2883 BT_DBG("%s removing %dMR", hdev->name, instance);
2884
cab054ab
JH
2885 if (hdev->cur_adv_instance == instance) {
2886 if (hdev->adv_instance_timeout) {
2887 cancel_delayed_work(&hdev->adv_instance_expire);
2888 hdev->adv_instance_timeout = 0;
2889 }
2890 hdev->cur_adv_instance = 0x00;
5d900e46
FG
2891 }
2892
a73c046a
JK
2893 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2894
d2609b34
FG
2895 list_del(&adv_instance->list);
2896 kfree(adv_instance);
2897
2898 hdev->adv_instance_cnt--;
2899
2900 return 0;
2901}
2902
a73c046a
JK
2903void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2904{
2905 struct adv_info *adv_instance, *n;
2906
2907 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2908 adv_instance->rpa_expired = rpa_expired;
2909}
2910
d2609b34
FG
2911/* This function requires the caller holds hdev->lock */
2912void hci_adv_instances_clear(struct hci_dev *hdev)
2913{
2914 struct adv_info *adv_instance, *n;
2915
5d900e46
FG
2916 if (hdev->adv_instance_timeout) {
2917 cancel_delayed_work(&hdev->adv_instance_expire);
2918 hdev->adv_instance_timeout = 0;
2919 }
2920
d2609b34 2921 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
a73c046a 2922 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
d2609b34
FG
2923 list_del(&adv_instance->list);
2924 kfree(adv_instance);
2925 }
2926
2927 hdev->adv_instance_cnt = 0;
cab054ab 2928 hdev->cur_adv_instance = 0x00;
d2609b34
FG
2929}
2930
a73c046a
JK
2931static void adv_instance_rpa_expired(struct work_struct *work)
2932{
2933 struct adv_info *adv_instance = container_of(work, struct adv_info,
2934 rpa_expired_cb.work);
2935
2936 BT_DBG("");
2937
2938 adv_instance->rpa_expired = true;
2939}
2940
d2609b34
FG
2941/* This function requires the caller holds hdev->lock */
2942int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2943 u16 adv_data_len, u8 *adv_data,
2944 u16 scan_rsp_len, u8 *scan_rsp_data,
2945 u16 timeout, u16 duration)
2946{
2947 struct adv_info *adv_instance;
2948
2949 adv_instance = hci_find_adv_instance(hdev, instance);
2950 if (adv_instance) {
2951 memset(adv_instance->adv_data, 0,
2952 sizeof(adv_instance->adv_data));
2953 memset(adv_instance->scan_rsp_data, 0,
2954 sizeof(adv_instance->scan_rsp_data));
2955 } else {
1d0fac2c 2956 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
d2609b34
FG
2957 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2958 return -EOVERFLOW;
2959
39ecfad6 2960 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2961 if (!adv_instance)
2962 return -ENOMEM;
2963
fffd38bc 2964 adv_instance->pending = true;
d2609b34
FG
2965 adv_instance->instance = instance;
2966 list_add(&adv_instance->list, &hdev->adv_instances);
2967 hdev->adv_instance_cnt++;
2968 }
2969
2970 adv_instance->flags = flags;
2971 adv_instance->adv_data_len = adv_data_len;
2972 adv_instance->scan_rsp_len = scan_rsp_len;
2973
2974 if (adv_data_len)
2975 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2976
2977 if (scan_rsp_len)
2978 memcpy(adv_instance->scan_rsp_data,
2979 scan_rsp_data, scan_rsp_len);
2980
2981 adv_instance->timeout = timeout;
5d900e46 2982 adv_instance->remaining_time = timeout;
d2609b34
FG
2983
2984 if (duration == 0)
10873f99 2985 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
d2609b34
FG
2986 else
2987 adv_instance->duration = duration;
2988
de181e88
JK
2989 adv_instance->tx_power = HCI_TX_POWER_INVALID;
2990
a73c046a
JK
2991 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2992 adv_instance_rpa_expired);
2993
d2609b34
FG
2994 BT_DBG("%s for %dMR", hdev->name, instance);
2995
2996 return 0;
2997}
2998
dcc36c16 2999struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 3000 bdaddr_t *bdaddr, u8 type)
b2a66aad 3001{
8035ded4 3002 struct bdaddr_list *b;
b2a66aad 3003
dcc36c16 3004 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 3005 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3006 return b;
b9ee0a78 3007 }
b2a66aad
AJ
3008
3009 return NULL;
3010}
3011
b950aa88
AN
3012struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3013 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3014 u8 type)
3015{
3016 struct bdaddr_list_with_irk *b;
3017
3018 list_for_each_entry(b, bdaddr_list, list) {
3019 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3020 return b;
3021 }
3022
3023 return NULL;
3024}
3025
8baaa403
APS
3026struct bdaddr_list_with_flags *
3027hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3028 bdaddr_t *bdaddr, u8 type)
3029{
3030 struct bdaddr_list_with_flags *b;
3031
3032 list_for_each_entry(b, bdaddr_list, list) {
3033 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3034 return b;
3035 }
3036
3037 return NULL;
3038}
3039
dcc36c16 3040void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad 3041{
7eb7404f 3042 struct bdaddr_list *b, *n;
b2a66aad 3043
7eb7404f
GT
3044 list_for_each_entry_safe(b, n, bdaddr_list, list) {
3045 list_del(&b->list);
b2a66aad
AJ
3046 kfree(b);
3047 }
b2a66aad
AJ
3048}
3049
dcc36c16 3050int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3051{
3052 struct bdaddr_list *entry;
b2a66aad 3053
b9ee0a78 3054 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3055 return -EBADF;
3056
dcc36c16 3057 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 3058 return -EEXIST;
b2a66aad 3059
27f70f3e 3060 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
3061 if (!entry)
3062 return -ENOMEM;
b2a66aad
AJ
3063
3064 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3065 entry->bdaddr_type = type;
b2a66aad 3066
dcc36c16 3067 list_add(&entry->list, list);
b2a66aad 3068
2a8357f2 3069 return 0;
b2a66aad
AJ
3070}
3071
b950aa88
AN
3072int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3073 u8 type, u8 *peer_irk, u8 *local_irk)
3074{
3075 struct bdaddr_list_with_irk *entry;
3076
3077 if (!bacmp(bdaddr, BDADDR_ANY))
3078 return -EBADF;
3079
3080 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3081 return -EEXIST;
3082
3083 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3084 if (!entry)
3085 return -ENOMEM;
3086
3087 bacpy(&entry->bdaddr, bdaddr);
3088 entry->bdaddr_type = type;
3089
3090 if (peer_irk)
3091 memcpy(entry->peer_irk, peer_irk, 16);
3092
3093 if (local_irk)
3094 memcpy(entry->local_irk, local_irk, 16);
3095
3096 list_add(&entry->list, list);
3097
3098 return 0;
3099}
3100
8baaa403
APS
3101int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3102 u8 type, u32 flags)
3103{
3104 struct bdaddr_list_with_flags *entry;
3105
3106 if (!bacmp(bdaddr, BDADDR_ANY))
3107 return -EBADF;
3108
3109 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3110 return -EEXIST;
3111
3112 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3113 if (!entry)
3114 return -ENOMEM;
3115
3116 bacpy(&entry->bdaddr, bdaddr);
3117 entry->bdaddr_type = type;
3118 entry->current_flags = flags;
3119
3120 list_add(&entry->list, list);
3121
3122 return 0;
3123}
3124
dcc36c16 3125int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3126{
3127 struct bdaddr_list *entry;
b2a66aad 3128
35f7498a 3129 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 3130 hci_bdaddr_list_clear(list);
35f7498a
JH
3131 return 0;
3132 }
b2a66aad 3133
dcc36c16 3134 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
3135 if (!entry)
3136 return -ENOENT;
3137
3138 list_del(&entry->list);
3139 kfree(entry);
3140
3141 return 0;
3142}
3143
b950aa88
AN
3144int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3145 u8 type)
3146{
3147 struct bdaddr_list_with_irk *entry;
3148
3149 if (!bacmp(bdaddr, BDADDR_ANY)) {
3150 hci_bdaddr_list_clear(list);
3151 return 0;
3152 }
3153
3154 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3155 if (!entry)
3156 return -ENOENT;
3157
3158 list_del(&entry->list);
3159 kfree(entry);
3160
3161 return 0;
3162}
3163
8baaa403
APS
3164int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3165 u8 type)
3166{
3167 struct bdaddr_list_with_flags *entry;
3168
3169 if (!bacmp(bdaddr, BDADDR_ANY)) {
3170 hci_bdaddr_list_clear(list);
3171 return 0;
3172 }
3173
3174 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3175 if (!entry)
3176 return -ENOENT;
3177
3178 list_del(&entry->list);
3179 kfree(entry);
3180
3181 return 0;
3182}
3183
15819a70
AG
3184/* This function requires the caller holds hdev->lock */
3185struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3186 bdaddr_t *addr, u8 addr_type)
3187{
3188 struct hci_conn_params *params;
3189
3190 list_for_each_entry(params, &hdev->le_conn_params, list) {
3191 if (bacmp(&params->addr, addr) == 0 &&
3192 params->addr_type == addr_type) {
3193 return params;
3194 }
3195 }
3196
3197 return NULL;
3198}
3199
4b10966f 3200/* This function requires the caller holds hdev->lock */
501f8827
JH
3201struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3202 bdaddr_t *addr, u8 addr_type)
a9b0a04c 3203{
912b42ef 3204 struct hci_conn_params *param;
a9b0a04c 3205
501f8827 3206 list_for_each_entry(param, list, action) {
912b42ef
JH
3207 if (bacmp(&param->addr, addr) == 0 &&
3208 param->addr_type == addr_type)
3209 return param;
4b10966f
MH
3210 }
3211
3212 return NULL;
a9b0a04c
AG
3213}
3214
15819a70 3215/* This function requires the caller holds hdev->lock */
51d167c0
MH
3216struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3217 bdaddr_t *addr, u8 addr_type)
15819a70
AG
3218{
3219 struct hci_conn_params *params;
3220
3221 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 3222 if (params)
51d167c0 3223 return params;
15819a70
AG
3224
3225 params = kzalloc(sizeof(*params), GFP_KERNEL);
3226 if (!params) {
2064ee33 3227 bt_dev_err(hdev, "out of memory");
51d167c0 3228 return NULL;
15819a70
AG
3229 }
3230
3231 bacpy(&params->addr, addr);
3232 params->addr_type = addr_type;
cef952ce
AG
3233
3234 list_add(&params->list, &hdev->le_conn_params);
93450c75 3235 INIT_LIST_HEAD(&params->action);
cef952ce 3236
bf5b3c8b
MH
3237 params->conn_min_interval = hdev->le_conn_min_interval;
3238 params->conn_max_interval = hdev->le_conn_max_interval;
3239 params->conn_latency = hdev->le_conn_latency;
3240 params->supervision_timeout = hdev->le_supv_timeout;
3241 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3242
3243 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3244
51d167c0 3245 return params;
bf5b3c8b
MH
3246}
3247
f6c63249 3248static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 3249{
f8aaf9b6 3250 if (params->conn) {
f161dd41 3251 hci_conn_drop(params->conn);
f8aaf9b6
JH
3252 hci_conn_put(params->conn);
3253 }
f161dd41 3254
95305baa 3255 list_del(&params->action);
15819a70
AG
3256 list_del(&params->list);
3257 kfree(params);
f6c63249
JH
3258}
3259
3260/* This function requires the caller holds hdev->lock */
3261void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3262{
3263 struct hci_conn_params *params;
3264
3265 params = hci_conn_params_lookup(hdev, addr, addr_type);
3266 if (!params)
3267 return;
3268
3269 hci_conn_params_free(params);
15819a70 3270
95305baa
JH
3271 hci_update_background_scan(hdev);
3272
15819a70
AG
3273 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3274}
3275
3276/* This function requires the caller holds hdev->lock */
55af49a8 3277void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3278{
3279 struct hci_conn_params *params, *tmp;
3280
3281 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3282 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3283 continue;
f75113a2
JP
3284
3285 /* If trying to estabilish one time connection to disabled
3286 * device, leave the params, but mark them as just once.
3287 */
3288 if (params->explicit_connect) {
3289 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3290 continue;
3291 }
3292
15819a70
AG
3293 list_del(&params->list);
3294 kfree(params);
3295 }
3296
55af49a8 3297 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3298}
3299
3300/* This function requires the caller holds hdev->lock */
030e7f81 3301static void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3302{
15819a70 3303 struct hci_conn_params *params, *tmp;
77a77a30 3304
f6c63249
JH
3305 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3306 hci_conn_params_free(params);
77a77a30 3307
15819a70 3308 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3309}
3310
a1f4c318
JH
3311/* Copy the Identity Address of the controller.
3312 *
3313 * If the controller has a public BD_ADDR, then by default use that one.
3314 * If this is a LE only controller without a public address, default to
3315 * the static random address.
3316 *
3317 * For debugging purposes it is possible to force controllers with a
3318 * public address to use the static random address instead.
50b5b952
MH
3319 *
3320 * In case BR/EDR has been disabled on a dual-mode controller and
3321 * userspace has configured a static address, then that address
3322 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
3323 */
3324void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3325 u8 *bdaddr_type)
3326{
b7cb93e5 3327 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 3328 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 3329 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 3330 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
3331 bacpy(bdaddr, &hdev->static_addr);
3332 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3333 } else {
3334 bacpy(bdaddr, &hdev->bdaddr);
3335 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3336 }
3337}
3338
9952d90e
APS
3339static int hci_suspend_wait_event(struct hci_dev *hdev)
3340{
3341#define WAKE_COND \
3342 (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
3343 __SUSPEND_NUM_TASKS)
3344
3345 int i;
3346 int ret = wait_event_timeout(hdev->suspend_wait_q,
3347 WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3348
3349 if (ret == 0) {
a9ec8423 3350 bt_dev_err(hdev, "Timed out waiting for suspend events");
9952d90e
APS
3351 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3352 if (test_bit(i, hdev->suspend_tasks))
a9ec8423 3353 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
9952d90e
APS
3354 clear_bit(i, hdev->suspend_tasks);
3355 }
3356
3357 ret = -ETIMEDOUT;
3358 } else {
3359 ret = 0;
3360 }
3361
3362 return ret;
3363}
3364
3365static void hci_prepare_suspend(struct work_struct *work)
3366{
3367 struct hci_dev *hdev =
3368 container_of(work, struct hci_dev, suspend_prepare);
3369
3370 hci_dev_lock(hdev);
3371 hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3372 hci_dev_unlock(hdev);
3373}
3374
8731840a
APS
3375static int hci_change_suspend_state(struct hci_dev *hdev,
3376 enum suspended_state next)
3377{
3378 hdev->suspend_state_next = next;
3379 set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3380 queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3381 return hci_suspend_wait_event(hdev);
3382}
3383
9952d90e
APS
3384static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3385 void *data)
3386{
3387 struct hci_dev *hdev =
3388 container_of(nb, struct hci_dev, suspend_notifier);
3389 int ret = 0;
3390
3391 /* If powering down, wait for completion. */
3392 if (mgmt_powering_down(hdev)) {
3393 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3394 ret = hci_suspend_wait_event(hdev);
3395 if (ret)
3396 goto done;
3397 }
3398
3399 /* Suspend notifier should only act on events when powered. */
3400 if (!hdev_is_powered(hdev))
3401 goto done;
3402
3403 if (action == PM_SUSPEND_PREPARE) {
4f40afc6
APS
3404 /* Suspend consists of two actions:
3405 * - First, disconnect everything and make the controller not
3406 * connectable (disabling scanning)
3407 * - Second, program event filter/whitelist and enable scan
3408 */
8731840a 3409 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
4f40afc6 3410
81dafad5
APS
3411 /* Only configure whitelist if disconnect succeeded and wake
3412 * isn't being prevented.
3413 */
3414 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev)))
8731840a 3415 ret = hci_change_suspend_state(hdev,
0d2c9825 3416 BT_SUSPEND_CONFIGURE_WAKE);
9952d90e 3417 } else if (action == PM_POST_SUSPEND) {
8731840a 3418 ret = hci_change_suspend_state(hdev, BT_RUNNING);
9952d90e
APS
3419 }
3420
3421done:
a9ec8423
APS
3422 /* We always allow suspend even if suspend preparation failed and
3423 * attempt to recover in resume.
3424 */
3425 if (ret)
3426 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3427 action, ret);
3428
3429 return NOTIFY_STOP;
9952d90e 3430}
8731840a 3431
9be0dab7
DH
3432/* Alloc HCI device */
3433struct hci_dev *hci_alloc_dev(void)
3434{
3435 struct hci_dev *hdev;
3436
27f70f3e 3437 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3438 if (!hdev)
3439 return NULL;
3440
b1b813d4
DH
3441 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3442 hdev->esco_type = (ESCO_HV1);
3443 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3444 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3445 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3446 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3447 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3448 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
3449 hdev->adv_instance_cnt = 0;
3450 hdev->cur_adv_instance = 0x00;
5d900e46 3451 hdev->adv_instance_timeout = 0;
b1b813d4 3452
b1b813d4
DH
3453 hdev->sniff_max_interval = 800;
3454 hdev->sniff_min_interval = 80;
3455
3f959d46 3456 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3457 hdev->le_adv_min_interval = 0x0800;
3458 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3459 hdev->le_scan_interval = 0x0060;
3460 hdev->le_scan_window = 0x0030;
10873f99
AM
3461 hdev->le_scan_int_suspend = 0x0400;
3462 hdev->le_scan_window_suspend = 0x0012;
3463 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3464 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3465 hdev->le_scan_int_connect = 0x0060;
3466 hdev->le_scan_window_connect = 0x0060;
b48c3b59
JH
3467 hdev->le_conn_min_interval = 0x0018;
3468 hdev->le_conn_max_interval = 0x0028;
04fb7d90
MH
3469 hdev->le_conn_latency = 0x0000;
3470 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3471 hdev->le_def_tx_len = 0x001b;
3472 hdev->le_def_tx_time = 0x0148;
3473 hdev->le_max_tx_len = 0x001b;
3474 hdev->le_max_tx_time = 0x0148;
3475 hdev->le_max_rx_len = 0x001b;
3476 hdev->le_max_rx_time = 0x0148;
30d65e08
MK
3477 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3478 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
6decb5b4
JK
3479 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3480 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
1d0fac2c 3481 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
10873f99 3482 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
bef64738 3483
d6bfd59c 3484 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3485 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3486 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3487 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
302975cb 3488 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
58a96fc3 3489 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
d6bfd59c 3490
10873f99
AM
3491 /* default 1.28 sec page scan */
3492 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3493 hdev->def_page_scan_int = 0x0800;
3494 hdev->def_page_scan_window = 0x0012;
3495
b1b813d4
DH
3496 mutex_init(&hdev->lock);
3497 mutex_init(&hdev->req_lock);
3498
3499 INIT_LIST_HEAD(&hdev->mgmt_pending);
3500 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3501 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3502 INIT_LIST_HEAD(&hdev->uuids);
3503 INIT_LIST_HEAD(&hdev->link_keys);
3504 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3505 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3506 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3507 INIT_LIST_HEAD(&hdev->le_white_list);
cfdb0c2d 3508 INIT_LIST_HEAD(&hdev->le_resolv_list);
15819a70 3509 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3510 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3511 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3512 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3513 INIT_LIST_HEAD(&hdev->adv_instances);
600a8749 3514 INIT_LIST_HEAD(&hdev->blocked_keys);
b1b813d4
DH
3515
3516 INIT_WORK(&hdev->rx_work, hci_rx_work);
3517 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3518 INIT_WORK(&hdev->tx_work, hci_tx_work);
3519 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3520 INIT_WORK(&hdev->error_reset, hci_error_reset);
9952d90e 3521 INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
b1b813d4 3522
b1b813d4 3523 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
b1b813d4 3524
b1b813d4
DH
3525 skb_queue_head_init(&hdev->rx_q);
3526 skb_queue_head_init(&hdev->cmd_q);
3527 skb_queue_head_init(&hdev->raw_q);
3528
3529 init_waitqueue_head(&hdev->req_wait_q);
9952d90e 3530 init_waitqueue_head(&hdev->suspend_wait_q);
b1b813d4 3531
65cc2b49 3532 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3533
5fc16cc4
JH
3534 hci_request_setup(hdev);
3535
b1b813d4
DH
3536 hci_init_sysfs(hdev);
3537 discovery_init(hdev);
9be0dab7
DH
3538
3539 return hdev;
3540}
3541EXPORT_SYMBOL(hci_alloc_dev);
3542
3543/* Free HCI device */
3544void hci_free_dev(struct hci_dev *hdev)
3545{
9be0dab7
DH
3546 /* will free via device release */
3547 put_device(&hdev->dev);
3548}
3549EXPORT_SYMBOL(hci_free_dev);
3550
1da177e4
LT
3551/* Register HCI device */
3552int hci_register_dev(struct hci_dev *hdev)
3553{
b1b813d4 3554 int id, error;
1da177e4 3555
74292d5a 3556 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3557 return -EINVAL;
3558
08add513
MM
3559 /* Do not allow HCI_AMP devices to register at index 0,
3560 * so the index can be used as the AMP controller ID.
3561 */
3df92b31 3562 switch (hdev->dev_type) {
ca8bee5d 3563 case HCI_PRIMARY:
3df92b31
SL
3564 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3565 break;
3566 case HCI_AMP:
3567 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3568 break;
3569 default:
3570 return -EINVAL;
1da177e4 3571 }
8e87d142 3572
3df92b31
SL
3573 if (id < 0)
3574 return id;
3575
1da177e4
LT
3576 sprintf(hdev->name, "hci%d", id);
3577 hdev->id = id;
2d8b3a11
AE
3578
3579 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3580
29e2dd0d 3581 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
33ca954d
DH
3582 if (!hdev->workqueue) {
3583 error = -ENOMEM;
3584 goto err;
3585 }
f48fd9c8 3586
29e2dd0d
TH
3587 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3588 hdev->name);
6ead1bbc
JH
3589 if (!hdev->req_workqueue) {
3590 destroy_workqueue(hdev->workqueue);
3591 error = -ENOMEM;
3592 goto err;
3593 }
3594
0153e2ec
MH
3595 if (!IS_ERR_OR_NULL(bt_debugfs))
3596 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3597
bdc3e0f1
MH
3598 dev_set_name(&hdev->dev, "%s", hdev->name);
3599
3600 error = device_add(&hdev->dev);
33ca954d 3601 if (error < 0)
54506918 3602 goto err_wqueue;
1da177e4 3603
6d5d2ee6
HK
3604 hci_leds_init(hdev);
3605
611b30f7 3606 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3607 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3608 hdev);
611b30f7
MH
3609 if (hdev->rfkill) {
3610 if (rfkill_register(hdev->rfkill) < 0) {
3611 rfkill_destroy(hdev->rfkill);
3612 hdev->rfkill = NULL;
3613 }
3614 }
3615
5e130367 3616 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3617 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3618
a1536da2
MH
3619 hci_dev_set_flag(hdev, HCI_SETUP);
3620 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3621
ca8bee5d 3622 if (hdev->dev_type == HCI_PRIMARY) {
56f87901
JH
3623 /* Assume BR/EDR support until proven otherwise (such as
3624 * through reading supported features during init.
3625 */
a1536da2 3626 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3627 }
ce2be9ac 3628
fcee3377
GP
3629 write_lock(&hci_dev_list_lock);
3630 list_add(&hdev->list, &hci_dev_list);
3631 write_unlock(&hci_dev_list_lock);
3632
4a964404
MH
3633 /* Devices that are marked for raw-only usage are unconfigured
3634 * and should not be included in normal operation.
fee746b0
MH
3635 */
3636 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3637 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3638
05fcd4c4 3639 hci_sock_dev_event(hdev, HCI_DEV_REG);
dc946bd8 3640 hci_dev_hold(hdev);
1da177e4 3641
9952d90e
APS
3642 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3643 error = register_pm_notifier(&hdev->suspend_notifier);
3644 if (error)
3645 goto err_wqueue;
3646
19202573 3647 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3648
1da177e4 3649 return id;
f48fd9c8 3650
33ca954d
DH
3651err_wqueue:
3652 destroy_workqueue(hdev->workqueue);
6ead1bbc 3653 destroy_workqueue(hdev->req_workqueue);
33ca954d 3654err:
3df92b31 3655 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3656
33ca954d 3657 return error;
1da177e4
LT
3658}
3659EXPORT_SYMBOL(hci_register_dev);
3660
3661/* Unregister HCI device */
59735631 3662void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3663{
2d7cc19e 3664 int id;
ef222013 3665
c13854ce 3666 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3667
a1536da2 3668 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3669
3df92b31
SL
3670 id = hdev->id;
3671
f20d09d5 3672 write_lock(&hci_dev_list_lock);
1da177e4 3673 list_del(&hdev->list);
f20d09d5 3674 write_unlock(&hci_dev_list_lock);
1da177e4 3675
b9b5ef18
GP
3676 cancel_work_sync(&hdev->power_on);
3677
bf389cab
JS
3678 hci_dev_do_close(hdev);
3679
9952d90e
APS
3680 unregister_pm_notifier(&hdev->suspend_notifier);
3681
ab81cbf9 3682 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3683 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3684 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3685 hci_dev_lock(hdev);
744cf19e 3686 mgmt_index_removed(hdev);
09fd0de5 3687 hci_dev_unlock(hdev);
56e5cb86 3688 }
ab81cbf9 3689
2e58ef3e
JH
3690 /* mgmt_index_removed should take care of emptying the
3691 * pending list */
3692 BUG_ON(!list_empty(&hdev->mgmt_pending));
3693
05fcd4c4 3694 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
1da177e4 3695
611b30f7
MH
3696 if (hdev->rfkill) {
3697 rfkill_unregister(hdev->rfkill);
3698 rfkill_destroy(hdev->rfkill);
3699 }
3700
bdc3e0f1 3701 device_del(&hdev->dev);
147e2d59 3702
0153e2ec 3703 debugfs_remove_recursive(hdev->debugfs);
5177a838
MH
3704 kfree_const(hdev->hw_info);
3705 kfree_const(hdev->fw_info);
0153e2ec 3706
f48fd9c8 3707 destroy_workqueue(hdev->workqueue);
6ead1bbc 3708 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3709
09fd0de5 3710 hci_dev_lock(hdev);
dcc36c16 3711 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3712 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3713 hci_uuids_clear(hdev);
55ed8ca1 3714 hci_link_keys_clear(hdev);
b899efaf 3715 hci_smp_ltks_clear(hdev);
970c4e46 3716 hci_smp_irks_clear(hdev);
2763eda6 3717 hci_remote_oob_data_clear(hdev);
d2609b34 3718 hci_adv_instances_clear(hdev);
dcc36c16 3719 hci_bdaddr_list_clear(&hdev->le_white_list);
cfdb0c2d 3720 hci_bdaddr_list_clear(&hdev->le_resolv_list);
373110c5 3721 hci_conn_params_clear_all(hdev);
22078800 3722 hci_discovery_filter_clear(hdev);
600a8749 3723 hci_blocked_keys_clear(hdev);
09fd0de5 3724 hci_dev_unlock(hdev);
e2e0cacb 3725
dc946bd8 3726 hci_dev_put(hdev);
3df92b31
SL
3727
3728 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3729}
3730EXPORT_SYMBOL(hci_unregister_dev);
3731
3732/* Suspend HCI device */
3733int hci_suspend_dev(struct hci_dev *hdev)
3734{
05fcd4c4 3735 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
1da177e4
LT
3736 return 0;
3737}
3738EXPORT_SYMBOL(hci_suspend_dev);
3739
3740/* Resume HCI device */
3741int hci_resume_dev(struct hci_dev *hdev)
3742{
05fcd4c4 3743 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
1da177e4
LT
3744 return 0;
3745}
3746EXPORT_SYMBOL(hci_resume_dev);
3747
75e0569f
MH
3748/* Reset HCI device */
3749int hci_reset_dev(struct hci_dev *hdev)
3750{
1e4b6e91 3751 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
75e0569f
MH
3752 struct sk_buff *skb;
3753
3754 skb = bt_skb_alloc(3, GFP_ATOMIC);
3755 if (!skb)
3756 return -ENOMEM;
3757
d79f34e3 3758 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
59ae1d12 3759 skb_put_data(skb, hw_err, 3);
75e0569f
MH
3760
3761 /* Send Hardware Error to upper stack */
3762 return hci_recv_frame(hdev, skb);
3763}
3764EXPORT_SYMBOL(hci_reset_dev);
3765
76bca880 3766/* Receive frame from HCI drivers */
e1a26170 3767int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3768{
76bca880 3769 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3770 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3771 kfree_skb(skb);
3772 return -ENXIO;
3773 }
3774
d79f34e3
MH
3775 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3776 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
cc974003
MH
3777 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
3778 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
fe806dce
MH
3779 kfree_skb(skb);
3780 return -EINVAL;
3781 }
3782
d82603c6 3783 /* Incoming skb */
76bca880
MH
3784 bt_cb(skb)->incoming = 1;
3785
3786 /* Time stamp */
3787 __net_timestamp(skb);
3788
76bca880 3789 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3790 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3791
76bca880
MH
3792 return 0;
3793}
3794EXPORT_SYMBOL(hci_recv_frame);
3795
e875ff84
MH
3796/* Receive diagnostic message from HCI drivers */
3797int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3798{
581d6fd6 3799 /* Mark as diagnostic packet */
d79f34e3 3800 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
581d6fd6 3801
e875ff84
MH
3802 /* Time stamp */
3803 __net_timestamp(skb);
3804
581d6fd6
MH
3805 skb_queue_tail(&hdev->rx_q, skb);
3806 queue_work(hdev->workqueue, &hdev->rx_work);
e875ff84 3807
e875ff84
MH
3808 return 0;
3809}
3810EXPORT_SYMBOL(hci_recv_diag);
3811
5177a838
MH
3812void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3813{
3814 va_list vargs;
3815
3816 va_start(vargs, fmt);
3817 kfree_const(hdev->hw_info);
3818 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3819 va_end(vargs);
3820}
3821EXPORT_SYMBOL(hci_set_hw_info);
3822
3823void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3824{
3825 va_list vargs;
3826
3827 va_start(vargs, fmt);
3828 kfree_const(hdev->fw_info);
3829 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3830 va_end(vargs);
3831}
3832EXPORT_SYMBOL(hci_set_fw_info);
3833
1da177e4
LT
3834/* ---- Interface to upper protocols ---- */
3835
1da177e4
LT
3836int hci_register_cb(struct hci_cb *cb)
3837{
3838 BT_DBG("%p name %s", cb, cb->name);
3839
fba7ecf0 3840 mutex_lock(&hci_cb_list_lock);
00629e0f 3841 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3842 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3843
3844 return 0;
3845}
3846EXPORT_SYMBOL(hci_register_cb);
3847
3848int hci_unregister_cb(struct hci_cb *cb)
3849{
3850 BT_DBG("%p name %s", cb, cb->name);
3851
fba7ecf0 3852 mutex_lock(&hci_cb_list_lock);
1da177e4 3853 list_del(&cb->list);
fba7ecf0 3854 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3855
3856 return 0;
3857}
3858EXPORT_SYMBOL(hci_unregister_cb);
3859
51086991 3860static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3861{
cdc52faa
MH
3862 int err;
3863
d79f34e3
MH
3864 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3865 skb->len);
1da177e4 3866
cd82e61c
MH
3867 /* Time stamp */
3868 __net_timestamp(skb);
1da177e4 3869
cd82e61c
MH
3870 /* Send copy to monitor */
3871 hci_send_to_monitor(hdev, skb);
3872
3873 if (atomic_read(&hdev->promisc)) {
3874 /* Send copy to the sockets */
470fe1b5 3875 hci_send_to_sock(hdev, skb);
1da177e4
LT
3876 }
3877
3878 /* Get rid of skb owner, prior to sending to the driver. */
3879 skb_orphan(skb);
3880
73d0d3c8
MH
3881 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3882 kfree_skb(skb);
3883 return;
3884 }
3885
cdc52faa
MH
3886 err = hdev->send(hdev, skb);
3887 if (err < 0) {
2064ee33 3888 bt_dev_err(hdev, "sending frame failed (%d)", err);
cdc52faa
MH
3889 kfree_skb(skb);
3890 }
1da177e4
LT
3891}
3892
1ca3a9d0 3893/* Send HCI command */
07dc93dd
JH
3894int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3895 const void *param)
1ca3a9d0
JH
3896{
3897 struct sk_buff *skb;
3898
3899 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3900
3901 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3902 if (!skb) {
2064ee33 3903 bt_dev_err(hdev, "no memory for command");
1ca3a9d0
JH
3904 return -ENOMEM;
3905 }
3906
49c922bb 3907 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3908 * single-command requests.
3909 */
44d27137 3910 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
11714b3d 3911
1da177e4 3912 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3913 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3914
3915 return 0;
3916}
1da177e4 3917
d6ee6ad7
LP
3918int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3919 const void *param)
3920{
3921 struct sk_buff *skb;
3922
3923 if (hci_opcode_ogf(opcode) != 0x3f) {
3924 /* A controller receiving a command shall respond with either
3925 * a Command Status Event or a Command Complete Event.
3926 * Therefore, all standard HCI commands must be sent via the
3927 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3928 * Some vendors do not comply with this rule for vendor-specific
3929 * commands and do not return any event. We want to support
3930 * unresponded commands for such cases only.
3931 */
3932 bt_dev_err(hdev, "unresponded command not supported");
3933 return -EINVAL;
3934 }
3935
3936 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3937 if (!skb) {
3938 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3939 opcode);
3940 return -ENOMEM;
3941 }
3942
3943 hci_send_frame(hdev, skb);
3944
3945 return 0;
3946}
3947EXPORT_SYMBOL(__hci_cmd_send);
3948
1da177e4 3949/* Get data from the previously sent command */
a9de9248 3950void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3951{
3952 struct hci_command_hdr *hdr;
3953
3954 if (!hdev->sent_cmd)
3955 return NULL;
3956
3957 hdr = (void *) hdev->sent_cmd->data;
3958
a9de9248 3959 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3960 return NULL;
3961
f0e09510 3962 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3963
3964 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3965}
3966
fbef168f
LP
3967/* Send HCI command and wait for command commplete event */
3968struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3969 const void *param, u32 timeout)
3970{
3971 struct sk_buff *skb;
3972
3973 if (!test_bit(HCI_UP, &hdev->flags))
3974 return ERR_PTR(-ENETDOWN);
3975
3976 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3977
b504430c 3978 hci_req_sync_lock(hdev);
fbef168f 3979 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
b504430c 3980 hci_req_sync_unlock(hdev);
fbef168f
LP
3981
3982 return skb;
3983}
3984EXPORT_SYMBOL(hci_cmd_sync);
3985
1da177e4
LT
3986/* Send ACL data */
3987static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3988{
3989 struct hci_acl_hdr *hdr;
3990 int len = skb->len;
3991
badff6d0
ACM
3992 skb_push(skb, HCI_ACL_HDR_SIZE);
3993 skb_reset_transport_header(skb);
9c70220b 3994 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3995 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3996 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3997}
3998
ee22be7e 3999static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4000 struct sk_buff *skb, __u16 flags)
1da177e4 4001{
ee22be7e 4002 struct hci_conn *conn = chan->conn;
1da177e4
LT
4003 struct hci_dev *hdev = conn->hdev;
4004 struct sk_buff *list;
4005
087bfd99
GP
4006 skb->len = skb_headlen(skb);
4007 skb->data_len = 0;
4008
d79f34e3 4009 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
204a6e54
AE
4010
4011 switch (hdev->dev_type) {
ca8bee5d 4012 case HCI_PRIMARY:
204a6e54
AE
4013 hci_add_acl_hdr(skb, conn->handle, flags);
4014 break;
4015 case HCI_AMP:
4016 hci_add_acl_hdr(skb, chan->handle, flags);
4017 break;
4018 default:
2064ee33 4019 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
204a6e54
AE
4020 return;
4021 }
087bfd99 4022
70f23020
AE
4023 list = skb_shinfo(skb)->frag_list;
4024 if (!list) {
1da177e4
LT
4025 /* Non fragmented */
4026 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4027
73d80deb 4028 skb_queue_tail(queue, skb);
1da177e4
LT
4029 } else {
4030 /* Fragmented */
4031 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4032
4033 skb_shinfo(skb)->frag_list = NULL;
4034
9cfd5a23
JR
4035 /* Queue all fragments atomically. We need to use spin_lock_bh
4036 * here because of 6LoWPAN links, as there this function is
4037 * called from softirq and using normal spin lock could cause
4038 * deadlocks.
4039 */
4040 spin_lock_bh(&queue->lock);
1da177e4 4041
73d80deb 4042 __skb_queue_tail(queue, skb);
e702112f
AE
4043
4044 flags &= ~ACL_START;
4045 flags |= ACL_CONT;
1da177e4
LT
4046 do {
4047 skb = list; list = list->next;
8e87d142 4048
d79f34e3 4049 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
e702112f 4050 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4051
4052 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4053
73d80deb 4054 __skb_queue_tail(queue, skb);
1da177e4
LT
4055 } while (list);
4056
9cfd5a23 4057 spin_unlock_bh(&queue->lock);
1da177e4 4058 }
73d80deb
LAD
4059}
4060
4061void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4062{
ee22be7e 4063 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4064
f0e09510 4065 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4066
ee22be7e 4067 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4068
3eff45ea 4069 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4070}
1da177e4
LT
4071
4072/* Send SCO data */
0d861d8b 4073void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4074{
4075 struct hci_dev *hdev = conn->hdev;
4076 struct hci_sco_hdr hdr;
4077
4078 BT_DBG("%s len %d", hdev->name, skb->len);
4079
aca3192c 4080 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4081 hdr.dlen = skb->len;
4082
badff6d0
ACM
4083 skb_push(skb, HCI_SCO_HDR_SIZE);
4084 skb_reset_transport_header(skb);
9c70220b 4085 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4086
d79f34e3 4087 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
c78ae283 4088
1da177e4 4089 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4090 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4091}
1da177e4
LT
4092
4093/* ---- HCI TX task (outgoing data) ---- */
4094
4095/* HCI Connection scheduler */
6039aa73
GP
4096static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4097 int *quote)
1da177e4
LT
4098{
4099 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4100 struct hci_conn *conn = NULL, *c;
abc5de8f 4101 unsigned int num = 0, min = ~0;
1da177e4 4102
8e87d142 4103 /* We don't have to lock device here. Connections are always
1da177e4 4104 * added and removed with TX task disabled. */
bf4c6325
GP
4105
4106 rcu_read_lock();
4107
4108 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4109 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4110 continue;
769be974
MH
4111
4112 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4113 continue;
4114
1da177e4
LT
4115 num++;
4116
4117 if (c->sent < min) {
4118 min = c->sent;
4119 conn = c;
4120 }
52087a79
LAD
4121
4122 if (hci_conn_num(hdev, type) == num)
4123 break;
1da177e4
LT
4124 }
4125
bf4c6325
GP
4126 rcu_read_unlock();
4127
1da177e4 4128 if (conn) {
6ed58ec5
VT
4129 int cnt, q;
4130
4131 switch (conn->type) {
4132 case ACL_LINK:
4133 cnt = hdev->acl_cnt;
4134 break;
4135 case SCO_LINK:
4136 case ESCO_LINK:
4137 cnt = hdev->sco_cnt;
4138 break;
4139 case LE_LINK:
4140 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4141 break;
4142 default:
4143 cnt = 0;
2064ee33 4144 bt_dev_err(hdev, "unknown link type %d", conn->type);
6ed58ec5
VT
4145 }
4146
4147 q = cnt / num;
1da177e4
LT
4148 *quote = q ? q : 1;
4149 } else
4150 *quote = 0;
4151
4152 BT_DBG("conn %p quote %d", conn, *quote);
4153 return conn;
4154}
4155
6039aa73 4156static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4157{
4158 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4159 struct hci_conn *c;
1da177e4 4160
2064ee33 4161 bt_dev_err(hdev, "link tx timeout");
1da177e4 4162
bf4c6325
GP
4163 rcu_read_lock();
4164
1da177e4 4165 /* Kill stalled connections */
bf4c6325 4166 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4167 if (c->type == type && c->sent) {
2064ee33
MH
4168 bt_dev_err(hdev, "killing stalled connection %pMR",
4169 &c->dst);
bed71748 4170 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4171 }
4172 }
bf4c6325
GP
4173
4174 rcu_read_unlock();
1da177e4
LT
4175}
4176
6039aa73
GP
4177static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4178 int *quote)
1da177e4 4179{
73d80deb
LAD
4180 struct hci_conn_hash *h = &hdev->conn_hash;
4181 struct hci_chan *chan = NULL;
abc5de8f 4182 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4183 struct hci_conn *conn;
73d80deb
LAD
4184 int cnt, q, conn_num = 0;
4185
4186 BT_DBG("%s", hdev->name);
4187
bf4c6325
GP
4188 rcu_read_lock();
4189
4190 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4191 struct hci_chan *tmp;
4192
4193 if (conn->type != type)
4194 continue;
4195
4196 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4197 continue;
4198
4199 conn_num++;
4200
8192edef 4201 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4202 struct sk_buff *skb;
4203
4204 if (skb_queue_empty(&tmp->data_q))
4205 continue;
4206
4207 skb = skb_peek(&tmp->data_q);
4208 if (skb->priority < cur_prio)
4209 continue;
4210
4211 if (skb->priority > cur_prio) {
4212 num = 0;
4213 min = ~0;
4214 cur_prio = skb->priority;
4215 }
4216
4217 num++;
4218
4219 if (conn->sent < min) {
4220 min = conn->sent;
4221 chan = tmp;
4222 }
4223 }
4224
4225 if (hci_conn_num(hdev, type) == conn_num)
4226 break;
4227 }
4228
bf4c6325
GP
4229 rcu_read_unlock();
4230
73d80deb
LAD
4231 if (!chan)
4232 return NULL;
4233
4234 switch (chan->conn->type) {
4235 case ACL_LINK:
4236 cnt = hdev->acl_cnt;
4237 break;
bd1eb66b
AE
4238 case AMP_LINK:
4239 cnt = hdev->block_cnt;
4240 break;
73d80deb
LAD
4241 case SCO_LINK:
4242 case ESCO_LINK:
4243 cnt = hdev->sco_cnt;
4244 break;
4245 case LE_LINK:
4246 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4247 break;
4248 default:
4249 cnt = 0;
2064ee33 4250 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
73d80deb
LAD
4251 }
4252
4253 q = cnt / num;
4254 *quote = q ? q : 1;
4255 BT_DBG("chan %p quote %d", chan, *quote);
4256 return chan;
4257}
4258
02b20f0b
LAD
4259static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4260{
4261 struct hci_conn_hash *h = &hdev->conn_hash;
4262 struct hci_conn *conn;
4263 int num = 0;
4264
4265 BT_DBG("%s", hdev->name);
4266
bf4c6325
GP
4267 rcu_read_lock();
4268
4269 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4270 struct hci_chan *chan;
4271
4272 if (conn->type != type)
4273 continue;
4274
4275 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4276 continue;
4277
4278 num++;
4279
8192edef 4280 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4281 struct sk_buff *skb;
4282
4283 if (chan->sent) {
4284 chan->sent = 0;
4285 continue;
4286 }
4287
4288 if (skb_queue_empty(&chan->data_q))
4289 continue;
4290
4291 skb = skb_peek(&chan->data_q);
4292 if (skb->priority >= HCI_PRIO_MAX - 1)
4293 continue;
4294
4295 skb->priority = HCI_PRIO_MAX - 1;
4296
4297 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4298 skb->priority);
02b20f0b
LAD
4299 }
4300
4301 if (hci_conn_num(hdev, type) == num)
4302 break;
4303 }
bf4c6325
GP
4304
4305 rcu_read_unlock();
4306
02b20f0b
LAD
4307}
4308
b71d385a
AE
4309static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4310{
4311 /* Calculate count of blocks used by this packet */
4312 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4313}
4314
6039aa73 4315static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4316{
d7a5a11d 4317 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
4318 /* ACL tx timeout must be longer than maximum
4319 * link supervision timeout (40.9 seconds) */
63d2bc1b 4320 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4321 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4322 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4323 }
63d2bc1b 4324}
1da177e4 4325
7fedd3bb
APS
4326/* Schedule SCO */
4327static void hci_sched_sco(struct hci_dev *hdev)
4328{
4329 struct hci_conn *conn;
4330 struct sk_buff *skb;
4331 int quote;
4332
4333 BT_DBG("%s", hdev->name);
4334
4335 if (!hci_conn_num(hdev, SCO_LINK))
4336 return;
4337
4338 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4339 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4340 BT_DBG("skb %p len %d", skb, skb->len);
4341 hci_send_frame(hdev, skb);
4342
4343 conn->sent++;
4344 if (conn->sent == ~0)
4345 conn->sent = 0;
4346 }
4347 }
4348}
4349
4350static void hci_sched_esco(struct hci_dev *hdev)
4351{
4352 struct hci_conn *conn;
4353 struct sk_buff *skb;
4354 int quote;
4355
4356 BT_DBG("%s", hdev->name);
4357
4358 if (!hci_conn_num(hdev, ESCO_LINK))
4359 return;
4360
4361 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4362 &quote))) {
4363 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4364 BT_DBG("skb %p len %d", skb, skb->len);
4365 hci_send_frame(hdev, skb);
4366
4367 conn->sent++;
4368 if (conn->sent == ~0)
4369 conn->sent = 0;
4370 }
4371 }
4372}
4373
6039aa73 4374static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4375{
4376 unsigned int cnt = hdev->acl_cnt;
4377 struct hci_chan *chan;
4378 struct sk_buff *skb;
4379 int quote;
4380
4381 __check_timeout(hdev, cnt);
04837f64 4382
73d80deb 4383 while (hdev->acl_cnt &&
a8c5fb1a 4384 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4385 u32 priority = (skb_peek(&chan->data_q))->priority;
4386 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4387 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4388 skb->len, skb->priority);
73d80deb 4389
ec1cce24
LAD
4390 /* Stop if priority has changed */
4391 if (skb->priority < priority)
4392 break;
4393
4394 skb = skb_dequeue(&chan->data_q);
4395
73d80deb 4396 hci_conn_enter_active_mode(chan->conn,
04124681 4397 bt_cb(skb)->force_active);
04837f64 4398
57d17d70 4399 hci_send_frame(hdev, skb);
1da177e4
LT
4400 hdev->acl_last_tx = jiffies;
4401
4402 hdev->acl_cnt--;
73d80deb
LAD
4403 chan->sent++;
4404 chan->conn->sent++;
7fedd3bb
APS
4405
4406 /* Send pending SCO packets right away */
4407 hci_sched_sco(hdev);
4408 hci_sched_esco(hdev);
1da177e4
LT
4409 }
4410 }
02b20f0b
LAD
4411
4412 if (cnt != hdev->acl_cnt)
4413 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4414}
4415
6039aa73 4416static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4417{
63d2bc1b 4418 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4419 struct hci_chan *chan;
4420 struct sk_buff *skb;
4421 int quote;
bd1eb66b 4422 u8 type;
b71d385a 4423
63d2bc1b 4424 __check_timeout(hdev, cnt);
b71d385a 4425
bd1eb66b
AE
4426 BT_DBG("%s", hdev->name);
4427
4428 if (hdev->dev_type == HCI_AMP)
4429 type = AMP_LINK;
4430 else
4431 type = ACL_LINK;
4432
b71d385a 4433 while (hdev->block_cnt > 0 &&
bd1eb66b 4434 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4435 u32 priority = (skb_peek(&chan->data_q))->priority;
4436 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4437 int blocks;
4438
4439 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4440 skb->len, skb->priority);
b71d385a
AE
4441
4442 /* Stop if priority has changed */
4443 if (skb->priority < priority)
4444 break;
4445
4446 skb = skb_dequeue(&chan->data_q);
4447
4448 blocks = __get_blocks(hdev, skb);
4449 if (blocks > hdev->block_cnt)
4450 return;
4451
4452 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4453 bt_cb(skb)->force_active);
b71d385a 4454
57d17d70 4455 hci_send_frame(hdev, skb);
b71d385a
AE
4456 hdev->acl_last_tx = jiffies;
4457
4458 hdev->block_cnt -= blocks;
4459 quote -= blocks;
4460
4461 chan->sent += blocks;
4462 chan->conn->sent += blocks;
4463 }
4464 }
4465
4466 if (cnt != hdev->block_cnt)
bd1eb66b 4467 hci_prio_recalculate(hdev, type);
b71d385a
AE
4468}
4469
6039aa73 4470static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4471{
4472 BT_DBG("%s", hdev->name);
4473
bd1eb66b 4474 /* No ACL link over BR/EDR controller */
ca8bee5d 4475 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
bd1eb66b
AE
4476 return;
4477
4478 /* No AMP link over AMP controller */
4479 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4480 return;
4481
4482 switch (hdev->flow_ctl_mode) {
4483 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4484 hci_sched_acl_pkt(hdev);
4485 break;
4486
4487 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4488 hci_sched_acl_blk(hdev);
4489 break;
4490 }
4491}
4492
6039aa73 4493static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4494{
73d80deb 4495 struct hci_chan *chan;
6ed58ec5 4496 struct sk_buff *skb;
02b20f0b 4497 int quote, cnt, tmp;
6ed58ec5
VT
4498
4499 BT_DBG("%s", hdev->name);
4500
52087a79
LAD
4501 if (!hci_conn_num(hdev, LE_LINK))
4502 return;
4503
6ed58ec5 4504 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1b1d29e5
LAD
4505
4506 __check_timeout(hdev, cnt);
4507
02b20f0b 4508 tmp = cnt;
73d80deb 4509 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4510 u32 priority = (skb_peek(&chan->data_q))->priority;
4511 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4512 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4513 skb->len, skb->priority);
6ed58ec5 4514
ec1cce24
LAD
4515 /* Stop if priority has changed */
4516 if (skb->priority < priority)
4517 break;
4518
4519 skb = skb_dequeue(&chan->data_q);
4520
57d17d70 4521 hci_send_frame(hdev, skb);
6ed58ec5
VT
4522 hdev->le_last_tx = jiffies;
4523
4524 cnt--;
73d80deb
LAD
4525 chan->sent++;
4526 chan->conn->sent++;
7fedd3bb
APS
4527
4528 /* Send pending SCO packets right away */
4529 hci_sched_sco(hdev);
4530 hci_sched_esco(hdev);
6ed58ec5
VT
4531 }
4532 }
73d80deb 4533
6ed58ec5
VT
4534 if (hdev->le_pkts)
4535 hdev->le_cnt = cnt;
4536 else
4537 hdev->acl_cnt = cnt;
02b20f0b
LAD
4538
4539 if (cnt != tmp)
4540 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4541}
4542
3eff45ea 4543static void hci_tx_work(struct work_struct *work)
1da177e4 4544{
3eff45ea 4545 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4546 struct sk_buff *skb;
4547
6ed58ec5 4548 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4549 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4550
d7a5a11d 4551 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e 4552 /* Schedule queues and send stuff to HCI driver */
52de599e
MH
4553 hci_sched_sco(hdev);
4554 hci_sched_esco(hdev);
7fedd3bb 4555 hci_sched_acl(hdev);
52de599e
MH
4556 hci_sched_le(hdev);
4557 }
6ed58ec5 4558
1da177e4
LT
4559 /* Send next queued raw (unknown type) packet */
4560 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4561 hci_send_frame(hdev, skb);
1da177e4
LT
4562}
4563
25985edc 4564/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4565
4566/* ACL data packet */
6039aa73 4567static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4568{
4569 struct hci_acl_hdr *hdr = (void *) skb->data;
4570 struct hci_conn *conn;
4571 __u16 handle, flags;
4572
4573 skb_pull(skb, HCI_ACL_HDR_SIZE);
4574
4575 handle = __le16_to_cpu(hdr->handle);
4576 flags = hci_flags(handle);
4577 handle = hci_handle(handle);
4578
f0e09510 4579 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4580 handle, flags);
1da177e4
LT
4581
4582 hdev->stat.acl_rx++;
4583
4584 hci_dev_lock(hdev);
4585 conn = hci_conn_hash_lookup_handle(hdev, handle);
4586 hci_dev_unlock(hdev);
8e87d142 4587
1da177e4 4588 if (conn) {
65983fc7 4589 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4590
1da177e4 4591 /* Send to upper protocol */
686ebf28
UF
4592 l2cap_recv_acldata(conn, skb, flags);
4593 return;
1da177e4 4594 } else {
2064ee33
MH
4595 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4596 handle);
1da177e4
LT
4597 }
4598
4599 kfree_skb(skb);
4600}
4601
4602/* SCO data packet */
6039aa73 4603static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4604{
4605 struct hci_sco_hdr *hdr = (void *) skb->data;
4606 struct hci_conn *conn;
debdedf2 4607 __u16 handle, flags;
1da177e4
LT
4608
4609 skb_pull(skb, HCI_SCO_HDR_SIZE);
4610
4611 handle = __le16_to_cpu(hdr->handle);
debdedf2
MH
4612 flags = hci_flags(handle);
4613 handle = hci_handle(handle);
1da177e4 4614
debdedf2
MH
4615 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4616 handle, flags);
1da177e4
LT
4617
4618 hdev->stat.sco_rx++;
4619
4620 hci_dev_lock(hdev);
4621 conn = hci_conn_hash_lookup_handle(hdev, handle);
4622 hci_dev_unlock(hdev);
4623
4624 if (conn) {
1da177e4 4625 /* Send to upper protocol */
00398e1d 4626 bt_cb(skb)->sco.pkt_status = flags & 0x03;
686ebf28
UF
4627 sco_recv_scodata(conn, skb);
4628 return;
1da177e4 4629 } else {
2064ee33
MH
4630 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4631 handle);
1da177e4
LT
4632 }
4633
4634 kfree_skb(skb);
4635}
4636
9238f36a
JH
4637static bool hci_req_is_complete(struct hci_dev *hdev)
4638{
4639 struct sk_buff *skb;
4640
4641 skb = skb_peek(&hdev->cmd_q);
4642 if (!skb)
4643 return true;
4644
44d27137 4645 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
9238f36a
JH
4646}
4647
42c6b129
JH
4648static void hci_resend_last(struct hci_dev *hdev)
4649{
4650 struct hci_command_hdr *sent;
4651 struct sk_buff *skb;
4652 u16 opcode;
4653
4654 if (!hdev->sent_cmd)
4655 return;
4656
4657 sent = (void *) hdev->sent_cmd->data;
4658 opcode = __le16_to_cpu(sent->opcode);
4659 if (opcode == HCI_OP_RESET)
4660 return;
4661
4662 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4663 if (!skb)
4664 return;
4665
4666 skb_queue_head(&hdev->cmd_q, skb);
4667 queue_work(hdev->workqueue, &hdev->cmd_work);
4668}
4669
e6214487
JH
4670void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4671 hci_req_complete_t *req_complete,
4672 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4673{
9238f36a
JH
4674 struct sk_buff *skb;
4675 unsigned long flags;
4676
4677 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4678
42c6b129
JH
4679 /* If the completed command doesn't match the last one that was
4680 * sent we need to do special handling of it.
9238f36a 4681 */
42c6b129
JH
4682 if (!hci_sent_cmd_data(hdev, opcode)) {
4683 /* Some CSR based controllers generate a spontaneous
4684 * reset complete event during init and any pending
4685 * command will never be completed. In such a case we
4686 * need to resend whatever was the last sent
4687 * command.
4688 */
4689 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4690 hci_resend_last(hdev);
4691
9238f36a 4692 return;
42c6b129 4693 }
9238f36a 4694
f80c5dad
JPRV
4695 /* If we reach this point this event matches the last command sent */
4696 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4697
9238f36a
JH
4698 /* If the command succeeded and there's still more commands in
4699 * this request the request is not yet complete.
4700 */
4701 if (!status && !hci_req_is_complete(hdev))
4702 return;
4703
4704 /* If this was the last command in a request the complete
4705 * callback would be found in hdev->sent_cmd instead of the
4706 * command queue (hdev->cmd_q).
4707 */
44d27137
JH
4708 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4709 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
e6214487
JH
4710 return;
4711 }
53e21fbc 4712
44d27137
JH
4713 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4714 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
e6214487 4715 return;
9238f36a
JH
4716 }
4717
4718 /* Remove all pending commands belonging to this request */
4719 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4720 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
44d27137 4721 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
9238f36a
JH
4722 __skb_queue_head(&hdev->cmd_q, skb);
4723 break;
4724 }
4725
3bd7594e
DA
4726 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4727 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4728 else
4729 *req_complete = bt_cb(skb)->hci.req_complete;
9238f36a
JH
4730 kfree_skb(skb);
4731 }
4732 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4733}
4734
b78752cc 4735static void hci_rx_work(struct work_struct *work)
1da177e4 4736{
b78752cc 4737 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4738 struct sk_buff *skb;
4739
4740 BT_DBG("%s", hdev->name);
4741
1da177e4 4742 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4743 /* Send copy to monitor */
4744 hci_send_to_monitor(hdev, skb);
4745
1da177e4
LT
4746 if (atomic_read(&hdev->promisc)) {
4747 /* Send copy to the sockets */
470fe1b5 4748 hci_send_to_sock(hdev, skb);
1da177e4
LT
4749 }
4750
eb8c101e
MK
4751 /* If the device has been opened in HCI_USER_CHANNEL,
4752 * the userspace has exclusive access to device.
4753 * When device is HCI_INIT, we still need to process
4754 * the data packets to the driver in order
4755 * to complete its setup().
4756 */
4757 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4758 !test_bit(HCI_INIT, &hdev->flags)) {
1da177e4
LT
4759 kfree_skb(skb);
4760 continue;
4761 }
4762
4763 if (test_bit(HCI_INIT, &hdev->flags)) {
4764 /* Don't process data packets in this states. */
d79f34e3 4765 switch (hci_skb_pkt_type(skb)) {
1da177e4
LT
4766 case HCI_ACLDATA_PKT:
4767 case HCI_SCODATA_PKT:
cc974003 4768 case HCI_ISODATA_PKT:
1da177e4
LT
4769 kfree_skb(skb);
4770 continue;
3ff50b79 4771 }
1da177e4
LT
4772 }
4773
4774 /* Process frame */
d79f34e3 4775 switch (hci_skb_pkt_type(skb)) {
1da177e4 4776 case HCI_EVENT_PKT:
b78752cc 4777 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4778 hci_event_packet(hdev, skb);
4779 break;
4780
4781 case HCI_ACLDATA_PKT:
4782 BT_DBG("%s ACL data packet", hdev->name);
4783 hci_acldata_packet(hdev, skb);
4784 break;
4785
4786 case HCI_SCODATA_PKT:
4787 BT_DBG("%s SCO data packet", hdev->name);
4788 hci_scodata_packet(hdev, skb);
4789 break;
4790
4791 default:
4792 kfree_skb(skb);
4793 break;
4794 }
4795 }
1da177e4
LT
4796}
4797
c347b765 4798static void hci_cmd_work(struct work_struct *work)
1da177e4 4799{
c347b765 4800 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4801 struct sk_buff *skb;
4802
2104786b
AE
4803 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4804 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4805
1da177e4 4806 /* Send queued commands */
5a08ecce
AE
4807 if (atomic_read(&hdev->cmd_cnt)) {
4808 skb = skb_dequeue(&hdev->cmd_q);
4809 if (!skb)
4810 return;
4811
7585b97a 4812 kfree_skb(hdev->sent_cmd);
1da177e4 4813
a675d7f1 4814 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4815 if (hdev->sent_cmd) {
f80c5dad
JPRV
4816 if (hci_req_status_pend(hdev))
4817 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
1da177e4 4818 atomic_dec(&hdev->cmd_cnt);
57d17d70 4819 hci_send_frame(hdev, skb);
7bdb8a5c 4820 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4821 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4822 else
65cc2b49
MH
4823 schedule_delayed_work(&hdev->cmd_timer,
4824 HCI_CMD_TIMEOUT);
1da177e4
LT
4825 } else {
4826 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4827 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4828 }
4829 }
4830}