bluetooth: hci_qca: Replace GFP_ATOMIC with GFP_KERNEL
[linux-block.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46 42#include "smp.h"
6d5d2ee6 43#include "leds.h"
970c4e46 44
b78752cc 45static void hci_rx_work(struct work_struct *work);
c347b765 46static void hci_cmd_work(struct work_struct *work);
3eff45ea 47static void hci_tx_work(struct work_struct *work);
1da177e4 48
1da177e4
LT
49/* HCI device list */
50LIST_HEAD(hci_dev_list);
51DEFINE_RWLOCK(hci_dev_list_lock);
52
53/* HCI callback list */
54LIST_HEAD(hci_cb_list);
fba7ecf0 55DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 56
3df92b31
SL
57/* HCI ID Numbering */
58static DEFINE_IDA(hci_index_ida);
59
baf27f6e
MH
60/* ---- HCI debugfs entries ---- */
61
4b4148e9
MH
62static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
64{
65 struct hci_dev *hdev = file->private_data;
66 char buf[3];
67
74b93e9f 68 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
4b4148e9
MH
69 buf[1] = '\n';
70 buf[2] = '\0';
71 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72}
73
74static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos)
76{
77 struct hci_dev *hdev = file->private_data;
78 struct sk_buff *skb;
4b4148e9 79 bool enable;
3bf5e97d 80 int err;
4b4148e9
MH
81
82 if (!test_bit(HCI_UP, &hdev->flags))
83 return -ENETDOWN;
84
3bf5e97d
AS
85 err = kstrtobool_from_user(user_buf, count, &enable);
86 if (err)
87 return err;
4b4148e9 88
b7cb93e5 89 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
90 return -EALREADY;
91
b504430c 92 hci_req_sync_lock(hdev);
4b4148e9
MH
93 if (enable)
94 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
95 HCI_CMD_TIMEOUT);
96 else
97 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
98 HCI_CMD_TIMEOUT);
b504430c 99 hci_req_sync_unlock(hdev);
4b4148e9
MH
100
101 if (IS_ERR(skb))
102 return PTR_ERR(skb);
103
4b4148e9
MH
104 kfree_skb(skb);
105
b7cb93e5 106 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
107
108 return count;
109}
110
111static const struct file_operations dut_mode_fops = {
112 .open = simple_open,
113 .read = dut_mode_read,
114 .write = dut_mode_write,
115 .llseek = default_llseek,
116};
117
4b4113d6
MH
118static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
119 size_t count, loff_t *ppos)
120{
121 struct hci_dev *hdev = file->private_data;
122 char buf[3];
123
74b93e9f 124 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
4b4113d6
MH
125 buf[1] = '\n';
126 buf[2] = '\0';
127 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
128}
129
130static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
131 size_t count, loff_t *ppos)
132{
133 struct hci_dev *hdev = file->private_data;
4b4113d6
MH
134 bool enable;
135 int err;
136
3bf5e97d
AS
137 err = kstrtobool_from_user(user_buf, count, &enable);
138 if (err)
139 return err;
4b4113d6 140
7e995b9e 141 /* When the diagnostic flags are not persistent and the transport
b56c7b25
MH
142 * is not active or in user channel operation, then there is no need
143 * for the vendor callback. Instead just store the desired value and
144 * the setting will be programmed when the controller gets powered on.
7e995b9e
MH
145 */
146 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
b56c7b25
MH
147 (!test_bit(HCI_RUNNING, &hdev->flags) ||
148 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
7e995b9e
MH
149 goto done;
150
b504430c 151 hci_req_sync_lock(hdev);
4b4113d6 152 err = hdev->set_diag(hdev, enable);
b504430c 153 hci_req_sync_unlock(hdev);
4b4113d6
MH
154
155 if (err < 0)
156 return err;
157
7e995b9e 158done:
4b4113d6
MH
159 if (enable)
160 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
161 else
162 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
163
164 return count;
165}
166
167static const struct file_operations vendor_diag_fops = {
168 .open = simple_open,
169 .read = vendor_diag_read,
170 .write = vendor_diag_write,
171 .llseek = default_llseek,
172};
173
f640ee98
MH
174static void hci_debugfs_create_basic(struct hci_dev *hdev)
175{
176 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
177 &dut_mode_fops);
178
179 if (hdev->set_diag)
180 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
181 &vendor_diag_fops);
182}
183
a1d01db1 184static int hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 185{
42c6b129 186 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
187
188 /* Reset device */
42c6b129
JH
189 set_bit(HCI_RESET, &req->hdev->flags);
190 hci_req_add(req, HCI_OP_RESET, 0, NULL);
a1d01db1 191 return 0;
1da177e4
LT
192}
193
42c6b129 194static void bredr_init(struct hci_request *req)
1da177e4 195{
42c6b129 196 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 197
1da177e4 198 /* Read Local Supported Features */
42c6b129 199 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 200
1143e5a6 201 /* Read Local Version */
42c6b129 202 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
203
204 /* Read BD Address */
42c6b129 205 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
206}
207
0af801b9 208static void amp_init1(struct hci_request *req)
e61ef499 209{
42c6b129 210 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 211
e61ef499 212 /* Read Local Version */
42c6b129 213 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 214
f6996cfe
MH
215 /* Read Local Supported Commands */
216 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
217
6bcbc489 218 /* Read Local AMP Info */
42c6b129 219 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
220
221 /* Read Data Blk size */
42c6b129 222 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 223
f38ba941
MH
224 /* Read Flow Control Mode */
225 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
226
7528ca1c
MH
227 /* Read Location Data */
228 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
229}
230
a1d01db1 231static int amp_init2(struct hci_request *req)
0af801b9
JH
232{
233 /* Read Local Supported Features. Not all AMP controllers
234 * support this so it's placed conditionally in the second
235 * stage init.
236 */
237 if (req->hdev->commands[14] & 0x20)
238 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
a1d01db1
JH
239
240 return 0;
0af801b9
JH
241}
242
a1d01db1 243static int hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 244{
42c6b129 245 struct hci_dev *hdev = req->hdev;
e61ef499
AE
246
247 BT_DBG("%s %ld", hdev->name, opt);
248
11778716
AE
249 /* Reset */
250 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 251 hci_reset_req(req, 0);
11778716 252
e61ef499 253 switch (hdev->dev_type) {
ca8bee5d 254 case HCI_PRIMARY:
42c6b129 255 bredr_init(req);
e61ef499 256 break;
e61ef499 257 case HCI_AMP:
0af801b9 258 amp_init1(req);
e61ef499 259 break;
e61ef499 260 default:
2064ee33 261 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
e61ef499
AE
262 break;
263 }
a1d01db1
JH
264
265 return 0;
e61ef499
AE
266}
267
42c6b129 268static void bredr_setup(struct hci_request *req)
2177bab5 269{
2177bab5
JH
270 __le16 param;
271 __u8 flt_type;
272
273 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 274 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
275
276 /* Read Class of Device */
42c6b129 277 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
278
279 /* Read Local Name */
42c6b129 280 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
281
282 /* Read Voice Setting */
42c6b129 283 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 284
b4cb9fb2
MH
285 /* Read Number of Supported IAC */
286 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
287
4b836f39
MH
288 /* Read Current IAC LAP */
289 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
290
2177bab5
JH
291 /* Clear Event Filters */
292 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 293 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
294
295 /* Connection accept timeout ~20 secs */
dcf4adbf 296 param = cpu_to_le16(0x7d00);
42c6b129 297 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
298}
299
42c6b129 300static void le_setup(struct hci_request *req)
2177bab5 301{
c73eee91
JH
302 struct hci_dev *hdev = req->hdev;
303
2177bab5 304 /* Read LE Buffer Size */
42c6b129 305 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
306
307 /* Read LE Local Supported Features */
42c6b129 308 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 309
747d3f03
MH
310 /* Read LE Supported States */
311 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
312
c73eee91
JH
313 /* LE-only controllers have LE implicitly enabled */
314 if (!lmp_bredr_capable(hdev))
a1536da2 315 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
316}
317
42c6b129 318static void hci_setup_event_mask(struct hci_request *req)
2177bab5 319{
42c6b129
JH
320 struct hci_dev *hdev = req->hdev;
321
2177bab5
JH
322 /* The second byte is 0xff instead of 0x9f (two reserved bits
323 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
324 * command otherwise.
325 */
326 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
327
328 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
329 * any event mask for pre 1.2 devices.
330 */
331 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
332 return;
333
334 if (lmp_bredr_capable(hdev)) {
335 events[4] |= 0x01; /* Flow Specification Complete */
c7882cbd
MH
336 } else {
337 /* Use a different default for LE-only devices */
338 memset(events, 0, sizeof(events));
c7882cbd
MH
339 events[1] |= 0x20; /* Command Complete */
340 events[1] |= 0x40; /* Command Status */
341 events[1] |= 0x80; /* Hardware Error */
5c3d3b4c
MH
342
343 /* If the controller supports the Disconnect command, enable
344 * the corresponding event. In addition enable packet flow
345 * control related events.
346 */
347 if (hdev->commands[0] & 0x20) {
348 events[0] |= 0x10; /* Disconnection Complete */
349 events[2] |= 0x04; /* Number of Completed Packets */
350 events[3] |= 0x02; /* Data Buffer Overflow */
351 }
352
353 /* If the controller supports the Read Remote Version
354 * Information command, enable the corresponding event.
355 */
356 if (hdev->commands[2] & 0x80)
357 events[1] |= 0x08; /* Read Remote Version Information
358 * Complete
359 */
0da71f1b
MH
360
361 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
362 events[0] |= 0x80; /* Encryption Change */
363 events[5] |= 0x80; /* Encryption Key Refresh Complete */
364 }
2177bab5
JH
365 }
366
9fe759ce
MH
367 if (lmp_inq_rssi_capable(hdev) ||
368 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
2177bab5
JH
369 events[4] |= 0x02; /* Inquiry Result with RSSI */
370
70f56aa2
MH
371 if (lmp_ext_feat_capable(hdev))
372 events[4] |= 0x04; /* Read Remote Extended Features Complete */
373
374 if (lmp_esco_capable(hdev)) {
375 events[5] |= 0x08; /* Synchronous Connection Complete */
376 events[5] |= 0x10; /* Synchronous Connection Changed */
377 }
378
2177bab5
JH
379 if (lmp_sniffsubr_capable(hdev))
380 events[5] |= 0x20; /* Sniff Subrating */
381
382 if (lmp_pause_enc_capable(hdev))
383 events[5] |= 0x80; /* Encryption Key Refresh Complete */
384
385 if (lmp_ext_inq_capable(hdev))
386 events[5] |= 0x40; /* Extended Inquiry Result */
387
388 if (lmp_no_flush_capable(hdev))
389 events[7] |= 0x01; /* Enhanced Flush Complete */
390
391 if (lmp_lsto_capable(hdev))
392 events[6] |= 0x80; /* Link Supervision Timeout Changed */
393
394 if (lmp_ssp_capable(hdev)) {
395 events[6] |= 0x01; /* IO Capability Request */
396 events[6] |= 0x02; /* IO Capability Response */
397 events[6] |= 0x04; /* User Confirmation Request */
398 events[6] |= 0x08; /* User Passkey Request */
399 events[6] |= 0x10; /* Remote OOB Data Request */
400 events[6] |= 0x20; /* Simple Pairing Complete */
401 events[7] |= 0x04; /* User Passkey Notification */
402 events[7] |= 0x08; /* Keypress Notification */
403 events[7] |= 0x10; /* Remote Host Supported
404 * Features Notification
405 */
406 }
407
408 if (lmp_le_capable(hdev))
409 events[7] |= 0x20; /* LE Meta-Event */
410
42c6b129 411 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
412}
413
a1d01db1 414static int hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 415{
42c6b129
JH
416 struct hci_dev *hdev = req->hdev;
417
0af801b9
JH
418 if (hdev->dev_type == HCI_AMP)
419 return amp_init2(req);
420
2177bab5 421 if (lmp_bredr_capable(hdev))
42c6b129 422 bredr_setup(req);
56f87901 423 else
a358dc11 424 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
425
426 if (lmp_le_capable(hdev))
42c6b129 427 le_setup(req);
2177bab5 428
0f3adeae
MH
429 /* All Bluetooth 1.2 and later controllers should support the
430 * HCI command for reading the local supported commands.
431 *
432 * Unfortunately some controllers indicate Bluetooth 1.2 support,
433 * but do not have support for this command. If that is the case,
434 * the driver can quirk the behavior and skip reading the local
435 * supported commands.
3f8e2d75 436 */
0f3adeae
MH
437 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
438 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 439 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
440
441 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
442 /* When SSP is available, then the host features page
443 * should also be available as well. However some
444 * controllers list the max_page as 0 as long as SSP
445 * has not been enabled. To achieve proper debugging
446 * output, force the minimum max_page to 1 at least.
447 */
448 hdev->max_page = 0x01;
449
d7a5a11d 450 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 451 u8 mode = 0x01;
574ea3c7 452
42c6b129
JH
453 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
454 sizeof(mode), &mode);
2177bab5
JH
455 } else {
456 struct hci_cp_write_eir cp;
457
458 memset(hdev->eir, 0, sizeof(hdev->eir));
459 memset(&cp, 0, sizeof(cp));
460
42c6b129 461 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
462 }
463 }
464
043ec9bf
MH
465 if (lmp_inq_rssi_capable(hdev) ||
466 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
467 u8 mode;
468
469 /* If Extended Inquiry Result events are supported, then
470 * they are clearly preferred over Inquiry Result with RSSI
471 * events.
472 */
473 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
474
475 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
476 }
2177bab5
JH
477
478 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 479 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
480
481 if (lmp_ext_feat_capable(hdev)) {
482 struct hci_cp_read_local_ext_features cp;
483
484 cp.page = 0x01;
42c6b129
JH
485 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
486 sizeof(cp), &cp);
2177bab5
JH
487 }
488
d7a5a11d 489 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 490 u8 enable = 1;
42c6b129
JH
491 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
492 &enable);
2177bab5 493 }
a1d01db1
JH
494
495 return 0;
2177bab5
JH
496}
497
42c6b129 498static void hci_setup_link_policy(struct hci_request *req)
2177bab5 499{
42c6b129 500 struct hci_dev *hdev = req->hdev;
2177bab5
JH
501 struct hci_cp_write_def_link_policy cp;
502 u16 link_policy = 0;
503
504 if (lmp_rswitch_capable(hdev))
505 link_policy |= HCI_LP_RSWITCH;
506 if (lmp_hold_capable(hdev))
507 link_policy |= HCI_LP_HOLD;
508 if (lmp_sniff_capable(hdev))
509 link_policy |= HCI_LP_SNIFF;
510 if (lmp_park_capable(hdev))
511 link_policy |= HCI_LP_PARK;
512
513 cp.policy = cpu_to_le16(link_policy);
42c6b129 514 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
515}
516
42c6b129 517static void hci_set_le_support(struct hci_request *req)
2177bab5 518{
42c6b129 519 struct hci_dev *hdev = req->hdev;
2177bab5
JH
520 struct hci_cp_write_le_host_supported cp;
521
c73eee91
JH
522 /* LE-only devices do not support explicit enablement */
523 if (!lmp_bredr_capable(hdev))
524 return;
525
2177bab5
JH
526 memset(&cp, 0, sizeof(cp));
527
d7a5a11d 528 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 529 cp.le = 0x01;
32226e4f 530 cp.simul = 0x00;
2177bab5
JH
531 }
532
533 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
534 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
535 &cp);
2177bab5
JH
536}
537
d62e6d67
JH
538static void hci_set_event_mask_page_2(struct hci_request *req)
539{
540 struct hci_dev *hdev = req->hdev;
541 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
313f6888 542 bool changed = false;
d62e6d67
JH
543
544 /* If Connectionless Slave Broadcast master role is supported
545 * enable all necessary events for it.
546 */
53b834d2 547 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
548 events[1] |= 0x40; /* Triggered Clock Capture */
549 events[1] |= 0x80; /* Synchronization Train Complete */
550 events[2] |= 0x10; /* Slave Page Response Timeout */
551 events[2] |= 0x20; /* CSB Channel Map Change */
313f6888 552 changed = true;
d62e6d67
JH
553 }
554
555 /* If Connectionless Slave Broadcast slave role is supported
556 * enable all necessary events for it.
557 */
53b834d2 558 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
559 events[2] |= 0x01; /* Synchronization Train Received */
560 events[2] |= 0x02; /* CSB Receive */
561 events[2] |= 0x04; /* CSB Timeout */
562 events[2] |= 0x08; /* Truncated Page Complete */
313f6888 563 changed = true;
d62e6d67
JH
564 }
565
40c59fcb 566 /* Enable Authenticated Payload Timeout Expired event if supported */
313f6888 567 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
40c59fcb 568 events[2] |= 0x80;
313f6888
MH
569 changed = true;
570 }
40c59fcb 571
313f6888
MH
572 /* Some Broadcom based controllers indicate support for Set Event
573 * Mask Page 2 command, but then actually do not support it. Since
574 * the default value is all bits set to zero, the command is only
575 * required if the event mask has to be changed. In case no change
576 * to the event mask is needed, skip this command.
577 */
578 if (changed)
579 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
580 sizeof(events), events);
d62e6d67
JH
581}
582
a1d01db1 583static int hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 584{
42c6b129 585 struct hci_dev *hdev = req->hdev;
d2c5d77f 586 u8 p;
42c6b129 587
0da71f1b
MH
588 hci_setup_event_mask(req);
589
e81be90b
JH
590 if (hdev->commands[6] & 0x20 &&
591 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
592 struct hci_cp_read_stored_link_key cp;
593
594 bacpy(&cp.bdaddr, BDADDR_ANY);
595 cp.read_all = 0x01;
596 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
597 }
598
2177bab5 599 if (hdev->commands[5] & 0x10)
42c6b129 600 hci_setup_link_policy(req);
2177bab5 601
417287de
MH
602 if (hdev->commands[8] & 0x01)
603 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
604
605 /* Some older Broadcom based Bluetooth 1.2 controllers do not
606 * support the Read Page Scan Type command. Check support for
607 * this command in the bit mask of supported commands.
608 */
609 if (hdev->commands[13] & 0x01)
610 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
611
9193c6e8
AG
612 if (lmp_le_capable(hdev)) {
613 u8 events[8];
614
615 memset(events, 0, sizeof(events));
4d6c705b
MH
616
617 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
618 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
619
620 /* If controller supports the Connection Parameters Request
621 * Link Layer Procedure, enable the corresponding event.
622 */
623 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
624 events[0] |= 0x20; /* LE Remote Connection
625 * Parameter Request
626 */
627
a9f6068e
MH
628 /* If the controller supports the Data Length Extension
629 * feature, enable the corresponding event.
630 */
631 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
632 events[0] |= 0x40; /* LE Data Length Change */
633
4b71bba4
MH
634 /* If the controller supports Extended Scanner Filter
635 * Policies, enable the correspondig event.
636 */
637 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
638 events[1] |= 0x04; /* LE Direct Advertising
639 * Report
640 */
641
9756d33b
MH
642 /* If the controller supports Channel Selection Algorithm #2
643 * feature, enable the corresponding event.
644 */
645 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
646 events[2] |= 0x08; /* LE Channel Selection
647 * Algorithm
648 */
649
7d26f5c4
MH
650 /* If the controller supports the LE Set Scan Enable command,
651 * enable the corresponding advertising report event.
652 */
653 if (hdev->commands[26] & 0x08)
654 events[0] |= 0x02; /* LE Advertising Report */
655
656 /* If the controller supports the LE Create Connection
657 * command, enable the corresponding event.
658 */
659 if (hdev->commands[26] & 0x10)
660 events[0] |= 0x01; /* LE Connection Complete */
661
662 /* If the controller supports the LE Connection Update
663 * command, enable the corresponding event.
664 */
665 if (hdev->commands[27] & 0x04)
666 events[0] |= 0x04; /* LE Connection Update
667 * Complete
668 */
669
670 /* If the controller supports the LE Read Remote Used Features
671 * command, enable the corresponding event.
672 */
673 if (hdev->commands[27] & 0x20)
674 events[0] |= 0x08; /* LE Read Remote Used
675 * Features Complete
676 */
677
5a34bd5f
MH
678 /* If the controller supports the LE Read Local P-256
679 * Public Key command, enable the corresponding event.
680 */
681 if (hdev->commands[34] & 0x02)
682 events[0] |= 0x80; /* LE Read Local P-256
683 * Public Key Complete
684 */
685
686 /* If the controller supports the LE Generate DHKey
687 * command, enable the corresponding event.
688 */
689 if (hdev->commands[34] & 0x04)
690 events[1] |= 0x01; /* LE Generate DHKey Complete */
691
27bbca44
MH
692 /* If the controller supports the LE Set Default PHY or
693 * LE Set PHY commands, enable the corresponding event.
694 */
695 if (hdev->commands[35] & (0x20 | 0x40))
696 events[1] |= 0x08; /* LE PHY Update Complete */
697
c215e939
JK
698 /* If the controller supports LE Set Extended Scan Parameters
699 * and LE Set Extended Scan Enable commands, enable the
700 * corresponding event.
701 */
702 if (use_ext_scan(hdev))
703 events[1] |= 0x10; /* LE Extended Advertising
704 * Report
705 */
706
4d94f95d
JK
707 /* If the controller supports the LE Extended Create Connection
708 * command, enable the corresponding event.
709 */
710 if (use_ext_conn(hdev))
711 events[1] |= 0x02; /* LE Enhanced Connection
712 * Complete
713 */
714
9193c6e8
AG
715 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
716 events);
717
15a49cca
MH
718 if (hdev->commands[25] & 0x40) {
719 /* Read LE Advertising Channel TX Power */
720 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
721 }
722
2ab216a7
MH
723 if (hdev->commands[26] & 0x40) {
724 /* Read LE White List Size */
725 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
726 0, NULL);
727 }
728
729 if (hdev->commands[26] & 0x80) {
730 /* Clear LE White List */
731 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
732 }
733
cfdb0c2d
AN
734 if (hdev->commands[34] & 0x40) {
735 /* Read LE Resolving List Size */
736 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
737 0, NULL);
738 }
739
545f2596
AN
740 if (hdev->commands[34] & 0x20) {
741 /* Clear LE Resolving List */
742 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
743 }
744
a9f6068e
MH
745 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
746 /* Read LE Maximum Data Length */
747 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
748
749 /* Read LE Suggested Default Data Length */
750 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
751 }
752
42c6b129 753 hci_set_le_support(req);
9193c6e8 754 }
d2c5d77f
JH
755
756 /* Read features beyond page 1 if available */
757 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
758 struct hci_cp_read_local_ext_features cp;
759
760 cp.page = p;
761 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
762 sizeof(cp), &cp);
763 }
a1d01db1
JH
764
765 return 0;
2177bab5
JH
766}
767
a1d01db1 768static int hci_init4_req(struct hci_request *req, unsigned long opt)
5d4e7e8d
JH
769{
770 struct hci_dev *hdev = req->hdev;
771
36f260ce
MH
772 /* Some Broadcom based Bluetooth controllers do not support the
773 * Delete Stored Link Key command. They are clearly indicating its
774 * absence in the bit mask of supported commands.
775 *
776 * Check the supported commands and only if the the command is marked
777 * as supported send it. If not supported assume that the controller
778 * does not have actual support for stored link keys which makes this
779 * command redundant anyway.
780 *
781 * Some controllers indicate that they support handling deleting
782 * stored link keys, but they don't. The quirk lets a driver
783 * just disable this command.
784 */
785 if (hdev->commands[6] & 0x80 &&
786 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
787 struct hci_cp_delete_stored_link_key cp;
788
789 bacpy(&cp.bdaddr, BDADDR_ANY);
790 cp.delete_all = 0x01;
791 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
792 sizeof(cp), &cp);
793 }
794
d62e6d67
JH
795 /* Set event mask page 2 if the HCI command for it is supported */
796 if (hdev->commands[22] & 0x04)
797 hci_set_event_mask_page_2(req);
798
109e3191
MH
799 /* Read local codec list if the HCI command is supported */
800 if (hdev->commands[29] & 0x20)
801 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
802
f4fe73ed
MH
803 /* Get MWS transport configuration if the HCI command is supported */
804 if (hdev->commands[30] & 0x08)
805 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
806
5d4e7e8d 807 /* Check for Synchronization Train support */
53b834d2 808 if (lmp_sync_train_capable(hdev))
5d4e7e8d 809 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
810
811 /* Enable Secure Connections if supported and configured */
d7a5a11d 812 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 813 bredr_sc_enabled(hdev)) {
a6d0d690 814 u8 support = 0x01;
574ea3c7 815
a6d0d690
MH
816 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
817 sizeof(support), &support);
818 }
a1d01db1 819
12204875
MH
820 /* Set Suggested Default Data Length to maximum if supported */
821 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
822 struct hci_cp_le_write_def_data_len cp;
823
824 cp.tx_len = hdev->le_max_tx_len;
825 cp.tx_time = hdev->le_max_tx_time;
826 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
827 }
828
de2ba303
MH
829 /* Set Default PHY parameters if command is supported */
830 if (hdev->commands[35] & 0x20) {
831 struct hci_cp_le_set_default_phy cp;
832
833 /* No transmitter PHY or receiver PHY preferences */
834 cp.all_phys = 0x03;
835 cp.tx_phys = 0;
836 cp.rx_phys = 0;
837
838 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
839 }
840
a1d01db1 841 return 0;
5d4e7e8d
JH
842}
843
2177bab5
JH
844static int __hci_init(struct hci_dev *hdev)
845{
846 int err;
847
4ebeee2d 848 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
2177bab5
JH
849 if (err < 0)
850 return err;
851
f640ee98
MH
852 if (hci_dev_test_flag(hdev, HCI_SETUP))
853 hci_debugfs_create_basic(hdev);
4b4148e9 854
4ebeee2d 855 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
0af801b9
JH
856 if (err < 0)
857 return err;
858
ca8bee5d 859 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
2177bab5 860 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 861 * first two stages of init.
2177bab5 862 */
ca8bee5d 863 if (hdev->dev_type != HCI_PRIMARY)
2177bab5
JH
864 return 0;
865
4ebeee2d 866 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
5d4e7e8d
JH
867 if (err < 0)
868 return err;
869
4ebeee2d 870 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
baf27f6e
MH
871 if (err < 0)
872 return err;
873
ec6cef9c
MH
874 /* This function is only called when the controller is actually in
875 * configured state. When the controller is marked as unconfigured,
876 * this initialization procedure is not run.
877 *
878 * It means that it is possible that a controller runs through its
879 * setup phase and then discovers missing settings. If that is the
880 * case, then this function will not be called. It then will only
881 * be called during the config phase.
882 *
883 * So only when in setup phase or config phase, create the debugfs
884 * entries and register the SMP channels.
baf27f6e 885 */
d7a5a11d
MH
886 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
887 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
888 return 0;
889
60c5f5fb
MH
890 hci_debugfs_create_common(hdev);
891
71c3b60e 892 if (lmp_bredr_capable(hdev))
60c5f5fb 893 hci_debugfs_create_bredr(hdev);
2bfa3531 894
162a3bac 895 if (lmp_le_capable(hdev))
60c5f5fb 896 hci_debugfs_create_le(hdev);
e7b8fc92 897
baf27f6e 898 return 0;
2177bab5
JH
899}
900
a1d01db1 901static int hci_init0_req(struct hci_request *req, unsigned long opt)
0ebca7d6
MH
902{
903 struct hci_dev *hdev = req->hdev;
904
905 BT_DBG("%s %ld", hdev->name, opt);
906
907 /* Reset */
908 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
909 hci_reset_req(req, 0);
910
911 /* Read Local Version */
912 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
913
914 /* Read BD Address */
915 if (hdev->set_bdaddr)
916 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
a1d01db1
JH
917
918 return 0;
0ebca7d6
MH
919}
920
921static int __hci_unconf_init(struct hci_dev *hdev)
922{
923 int err;
924
cc78b44b
MH
925 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
926 return 0;
927
4ebeee2d 928 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
0ebca7d6
MH
929 if (err < 0)
930 return err;
931
f640ee98
MH
932 if (hci_dev_test_flag(hdev, HCI_SETUP))
933 hci_debugfs_create_basic(hdev);
934
0ebca7d6
MH
935 return 0;
936}
937
a1d01db1 938static int hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
939{
940 __u8 scan = opt;
941
42c6b129 942 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
943
944 /* Inquiry and Page scans */
42c6b129 945 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
a1d01db1 946 return 0;
1da177e4
LT
947}
948
a1d01db1 949static int hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
950{
951 __u8 auth = opt;
952
42c6b129 953 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
954
955 /* Authentication */
42c6b129 956 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
a1d01db1 957 return 0;
1da177e4
LT
958}
959
a1d01db1 960static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
961{
962 __u8 encrypt = opt;
963
42c6b129 964 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 965
e4e8e37c 966 /* Encryption */
42c6b129 967 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
a1d01db1 968 return 0;
1da177e4
LT
969}
970
a1d01db1 971static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
972{
973 __le16 policy = cpu_to_le16(opt);
974
42c6b129 975 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
976
977 /* Default link policy */
42c6b129 978 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
a1d01db1 979 return 0;
e4e8e37c
MH
980}
981
8e87d142 982/* Get HCI device by index.
1da177e4
LT
983 * Device is held on return. */
984struct hci_dev *hci_dev_get(int index)
985{
8035ded4 986 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
987
988 BT_DBG("%d", index);
989
990 if (index < 0)
991 return NULL;
992
993 read_lock(&hci_dev_list_lock);
8035ded4 994 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
995 if (d->id == index) {
996 hdev = hci_dev_hold(d);
997 break;
998 }
999 }
1000 read_unlock(&hci_dev_list_lock);
1001 return hdev;
1002}
1da177e4
LT
1003
1004/* ---- Inquiry support ---- */
ff9ef578 1005
30dc78e1
JH
1006bool hci_discovery_active(struct hci_dev *hdev)
1007{
1008 struct discovery_state *discov = &hdev->discovery;
1009
6fbe195d 1010 switch (discov->state) {
343f935b 1011 case DISCOVERY_FINDING:
6fbe195d 1012 case DISCOVERY_RESOLVING:
30dc78e1
JH
1013 return true;
1014
6fbe195d
AG
1015 default:
1016 return false;
1017 }
30dc78e1
JH
1018}
1019
ff9ef578
JH
1020void hci_discovery_set_state(struct hci_dev *hdev, int state)
1021{
bb3e0a33
JH
1022 int old_state = hdev->discovery.state;
1023
ff9ef578
JH
1024 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1025
bb3e0a33 1026 if (old_state == state)
ff9ef578
JH
1027 return;
1028
bb3e0a33
JH
1029 hdev->discovery.state = state;
1030
ff9ef578
JH
1031 switch (state) {
1032 case DISCOVERY_STOPPED:
c54c3860
AG
1033 hci_update_background_scan(hdev);
1034
bb3e0a33 1035 if (old_state != DISCOVERY_STARTING)
7b99b659 1036 mgmt_discovering(hdev, 0);
ff9ef578
JH
1037 break;
1038 case DISCOVERY_STARTING:
1039 break;
343f935b 1040 case DISCOVERY_FINDING:
ff9ef578
JH
1041 mgmt_discovering(hdev, 1);
1042 break;
30dc78e1
JH
1043 case DISCOVERY_RESOLVING:
1044 break;
ff9ef578
JH
1045 case DISCOVERY_STOPPING:
1046 break;
1047 }
ff9ef578
JH
1048}
1049
1f9b9a5d 1050void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1051{
30883512 1052 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1053 struct inquiry_entry *p, *n;
1da177e4 1054
561aafbc
JH
1055 list_for_each_entry_safe(p, n, &cache->all, all) {
1056 list_del(&p->all);
b57c1a56 1057 kfree(p);
1da177e4 1058 }
561aafbc
JH
1059
1060 INIT_LIST_HEAD(&cache->unknown);
1061 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1062}
1063
a8c5fb1a
GP
1064struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1065 bdaddr_t *bdaddr)
1da177e4 1066{
30883512 1067 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1068 struct inquiry_entry *e;
1069
6ed93dc6 1070 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1071
561aafbc
JH
1072 list_for_each_entry(e, &cache->all, all) {
1073 if (!bacmp(&e->data.bdaddr, bdaddr))
1074 return e;
1075 }
1076
1077 return NULL;
1078}
1079
1080struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1081 bdaddr_t *bdaddr)
561aafbc 1082{
30883512 1083 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1084 struct inquiry_entry *e;
1085
6ed93dc6 1086 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1087
1088 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1089 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1090 return e;
1091 }
1092
1093 return NULL;
1da177e4
LT
1094}
1095
30dc78e1 1096struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1097 bdaddr_t *bdaddr,
1098 int state)
30dc78e1
JH
1099{
1100 struct discovery_state *cache = &hdev->discovery;
1101 struct inquiry_entry *e;
1102
6ed93dc6 1103 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1104
1105 list_for_each_entry(e, &cache->resolve, list) {
1106 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1107 return e;
1108 if (!bacmp(&e->data.bdaddr, bdaddr))
1109 return e;
1110 }
1111
1112 return NULL;
1113}
1114
a3d4e20a 1115void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1116 struct inquiry_entry *ie)
a3d4e20a
JH
1117{
1118 struct discovery_state *cache = &hdev->discovery;
1119 struct list_head *pos = &cache->resolve;
1120 struct inquiry_entry *p;
1121
1122 list_del(&ie->list);
1123
1124 list_for_each_entry(p, &cache->resolve, list) {
1125 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1126 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1127 break;
1128 pos = &p->list;
1129 }
1130
1131 list_add(&ie->list, pos);
1132}
1133
af58925c
MH
1134u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1135 bool name_known)
1da177e4 1136{
30883512 1137 struct discovery_state *cache = &hdev->discovery;
70f23020 1138 struct inquiry_entry *ie;
af58925c 1139 u32 flags = 0;
1da177e4 1140
6ed93dc6 1141 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1142
6928a924 1143 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1144
af58925c
MH
1145 if (!data->ssp_mode)
1146 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1147
70f23020 1148 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1149 if (ie) {
af58925c
MH
1150 if (!ie->data.ssp_mode)
1151 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1152
a3d4e20a 1153 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1154 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1155 ie->data.rssi = data->rssi;
1156 hci_inquiry_cache_update_resolve(hdev, ie);
1157 }
1158
561aafbc 1159 goto update;
a3d4e20a 1160 }
561aafbc
JH
1161
1162 /* Entry not in the cache. Add new one. */
27f70f3e 1163 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1164 if (!ie) {
1165 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1166 goto done;
1167 }
561aafbc
JH
1168
1169 list_add(&ie->all, &cache->all);
1170
1171 if (name_known) {
1172 ie->name_state = NAME_KNOWN;
1173 } else {
1174 ie->name_state = NAME_NOT_KNOWN;
1175 list_add(&ie->list, &cache->unknown);
1176 }
70f23020 1177
561aafbc
JH
1178update:
1179 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1180 ie->name_state != NAME_PENDING) {
561aafbc
JH
1181 ie->name_state = NAME_KNOWN;
1182 list_del(&ie->list);
1da177e4
LT
1183 }
1184
70f23020
AE
1185 memcpy(&ie->data, data, sizeof(*data));
1186 ie->timestamp = jiffies;
1da177e4 1187 cache->timestamp = jiffies;
3175405b
JH
1188
1189 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1190 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1191
af58925c
MH
1192done:
1193 return flags;
1da177e4
LT
1194}
1195
1196static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1197{
30883512 1198 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1199 struct inquiry_info *info = (struct inquiry_info *) buf;
1200 struct inquiry_entry *e;
1201 int copied = 0;
1202
561aafbc 1203 list_for_each_entry(e, &cache->all, all) {
1da177e4 1204 struct inquiry_data *data = &e->data;
b57c1a56
JH
1205
1206 if (copied >= num)
1207 break;
1208
1da177e4
LT
1209 bacpy(&info->bdaddr, &data->bdaddr);
1210 info->pscan_rep_mode = data->pscan_rep_mode;
1211 info->pscan_period_mode = data->pscan_period_mode;
1212 info->pscan_mode = data->pscan_mode;
1213 memcpy(info->dev_class, data->dev_class, 3);
1214 info->clock_offset = data->clock_offset;
b57c1a56 1215
1da177e4 1216 info++;
b57c1a56 1217 copied++;
1da177e4
LT
1218 }
1219
1220 BT_DBG("cache %p, copied %d", cache, copied);
1221 return copied;
1222}
1223
a1d01db1 1224static int hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1225{
1226 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1227 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1228 struct hci_cp_inquiry cp;
1229
1230 BT_DBG("%s", hdev->name);
1231
1232 if (test_bit(HCI_INQUIRY, &hdev->flags))
a1d01db1 1233 return 0;
1da177e4
LT
1234
1235 /* Start Inquiry */
1236 memcpy(&cp.lap, &ir->lap, 3);
1237 cp.length = ir->length;
1238 cp.num_rsp = ir->num_rsp;
42c6b129 1239 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
a1d01db1
JH
1240
1241 return 0;
1da177e4
LT
1242}
1243
1244int hci_inquiry(void __user *arg)
1245{
1246 __u8 __user *ptr = arg;
1247 struct hci_inquiry_req ir;
1248 struct hci_dev *hdev;
1249 int err = 0, do_inquiry = 0, max_rsp;
1250 long timeo;
1251 __u8 *buf;
1252
1253 if (copy_from_user(&ir, ptr, sizeof(ir)))
1254 return -EFAULT;
1255
5a08ecce
AE
1256 hdev = hci_dev_get(ir.dev_id);
1257 if (!hdev)
1da177e4
LT
1258 return -ENODEV;
1259
d7a5a11d 1260 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1261 err = -EBUSY;
1262 goto done;
1263 }
1264
d7a5a11d 1265 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1266 err = -EOPNOTSUPP;
1267 goto done;
1268 }
1269
ca8bee5d 1270 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
1271 err = -EOPNOTSUPP;
1272 goto done;
1273 }
1274
d7a5a11d 1275 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1276 err = -EOPNOTSUPP;
1277 goto done;
1278 }
1279
09fd0de5 1280 hci_dev_lock(hdev);
8e87d142 1281 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1282 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1283 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1284 do_inquiry = 1;
1285 }
09fd0de5 1286 hci_dev_unlock(hdev);
1da177e4 1287
04837f64 1288 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1289
1290 if (do_inquiry) {
01178cd4 1291 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
4ebeee2d 1292 timeo, NULL);
70f23020
AE
1293 if (err < 0)
1294 goto done;
3e13fa1e
AG
1295
1296 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1297 * cleared). If it is interrupted by a signal, return -EINTR.
1298 */
74316201 1299 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1300 TASK_INTERRUPTIBLE))
1301 return -EINTR;
70f23020 1302 }
1da177e4 1303
8fc9ced3
GP
1304 /* for unlimited number of responses we will use buffer with
1305 * 255 entries
1306 */
1da177e4
LT
1307 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1308
1309 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1310 * copy it to the user space.
1311 */
6da2ec56 1312 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
70f23020 1313 if (!buf) {
1da177e4
LT
1314 err = -ENOMEM;
1315 goto done;
1316 }
1317
09fd0de5 1318 hci_dev_lock(hdev);
1da177e4 1319 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1320 hci_dev_unlock(hdev);
1da177e4
LT
1321
1322 BT_DBG("num_rsp %d", ir.num_rsp);
1323
1324 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1325 ptr += sizeof(ir);
1326 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1327 ir.num_rsp))
1da177e4 1328 err = -EFAULT;
8e87d142 1329 } else
1da177e4
LT
1330 err = -EFAULT;
1331
1332 kfree(buf);
1333
1334done:
1335 hci_dev_put(hdev);
1336 return err;
1337}
1338
cbed0ca1 1339static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1340{
1da177e4
LT
1341 int ret = 0;
1342
1da177e4
LT
1343 BT_DBG("%s %p", hdev->name, hdev);
1344
b504430c 1345 hci_req_sync_lock(hdev);
1da177e4 1346
d7a5a11d 1347 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1348 ret = -ENODEV;
1349 goto done;
1350 }
1351
d7a5a11d
MH
1352 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1353 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1354 /* Check for rfkill but allow the HCI setup stage to
1355 * proceed (which in itself doesn't cause any RF activity).
1356 */
d7a5a11d 1357 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1358 ret = -ERFKILL;
1359 goto done;
1360 }
1361
1362 /* Check for valid public address or a configured static
1363 * random adddress, but let the HCI setup proceed to
1364 * be able to determine if there is a public address
1365 * or not.
1366 *
c6beca0e
MH
1367 * In case of user channel usage, it is not important
1368 * if a public address or static random address is
1369 * available.
1370 *
a5c8f270
MH
1371 * This check is only valid for BR/EDR controllers
1372 * since AMP controllers do not have an address.
1373 */
d7a5a11d 1374 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
ca8bee5d 1375 hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
1376 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1377 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1378 ret = -EADDRNOTAVAIL;
1379 goto done;
1380 }
611b30f7
MH
1381 }
1382
1da177e4
LT
1383 if (test_bit(HCI_UP, &hdev->flags)) {
1384 ret = -EALREADY;
1385 goto done;
1386 }
1387
1da177e4
LT
1388 if (hdev->open(hdev)) {
1389 ret = -EIO;
1390 goto done;
1391 }
1392
e9ca8bf1 1393 set_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1394 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
4a3f95b7 1395
f41c70c4
MH
1396 atomic_set(&hdev->cmd_cnt, 1);
1397 set_bit(HCI_INIT, &hdev->flags);
1398
d7a5a11d 1399 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
e131d74a
MH
1400 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1401
af202f84
MH
1402 if (hdev->setup)
1403 ret = hdev->setup(hdev);
f41c70c4 1404
af202f84
MH
1405 /* The transport driver can set these quirks before
1406 * creating the HCI device or in its setup callback.
1407 *
1408 * In case any of them is set, the controller has to
1409 * start up as unconfigured.
1410 */
eb1904f4
MH
1411 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1412 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
a1536da2 1413 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1414
0ebca7d6
MH
1415 /* For an unconfigured controller it is required to
1416 * read at least the version information provided by
1417 * the Read Local Version Information command.
1418 *
1419 * If the set_bdaddr driver callback is provided, then
1420 * also the original Bluetooth public device address
1421 * will be read using the Read BD Address command.
1422 */
d7a5a11d 1423 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1424 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1425 }
1426
d7a5a11d 1427 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1428 /* If public address change is configured, ensure that
1429 * the address gets programmed. If the driver does not
1430 * support changing the public address, fail the power
1431 * on procedure.
1432 */
1433 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1434 hdev->set_bdaddr)
24c457e2
MH
1435 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1436 else
1437 ret = -EADDRNOTAVAIL;
1438 }
1439
f41c70c4 1440 if (!ret) {
d7a5a11d 1441 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
98a63aaf 1442 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
f41c70c4 1443 ret = __hci_init(hdev);
98a63aaf
MH
1444 if (!ret && hdev->post_init)
1445 ret = hdev->post_init(hdev);
1446 }
1da177e4
LT
1447 }
1448
7e995b9e
MH
1449 /* If the HCI Reset command is clearing all diagnostic settings,
1450 * then they need to be reprogrammed after the init procedure
1451 * completed.
1452 */
1453 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
b56c7b25 1454 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
7e995b9e
MH
1455 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1456 ret = hdev->set_diag(hdev, true);
1457
f41c70c4
MH
1458 clear_bit(HCI_INIT, &hdev->flags);
1459
1da177e4
LT
1460 if (!ret) {
1461 hci_dev_hold(hdev);
a1536da2 1462 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1da177e4 1463 set_bit(HCI_UP, &hdev->flags);
05fcd4c4 1464 hci_sock_dev_event(hdev, HCI_DEV_UP);
6d5d2ee6 1465 hci_leds_update_powered(hdev, true);
d7a5a11d
MH
1466 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1467 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1468 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1469 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
2ff13894 1470 hci_dev_test_flag(hdev, HCI_MGMT) &&
ca8bee5d 1471 hdev->dev_type == HCI_PRIMARY) {
2ff13894
JH
1472 ret = __hci_req_hci_power_on(hdev);
1473 mgmt_power_on(hdev, ret);
56e5cb86 1474 }
8e87d142 1475 } else {
1da177e4 1476 /* Init failed, cleanup */
3eff45ea 1477 flush_work(&hdev->tx_work);
c347b765 1478 flush_work(&hdev->cmd_work);
b78752cc 1479 flush_work(&hdev->rx_work);
1da177e4
LT
1480
1481 skb_queue_purge(&hdev->cmd_q);
1482 skb_queue_purge(&hdev->rx_q);
1483
1484 if (hdev->flush)
1485 hdev->flush(hdev);
1486
1487 if (hdev->sent_cmd) {
1488 kfree_skb(hdev->sent_cmd);
1489 hdev->sent_cmd = NULL;
1490 }
1491
e9ca8bf1 1492 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1493 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1494
1da177e4 1495 hdev->close(hdev);
fee746b0 1496 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1497 }
1498
1499done:
b504430c 1500 hci_req_sync_unlock(hdev);
1da177e4
LT
1501 return ret;
1502}
1503
cbed0ca1
JH
1504/* ---- HCI ioctl helpers ---- */
1505
1506int hci_dev_open(__u16 dev)
1507{
1508 struct hci_dev *hdev;
1509 int err;
1510
1511 hdev = hci_dev_get(dev);
1512 if (!hdev)
1513 return -ENODEV;
1514
4a964404 1515 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1516 * up as user channel. Trying to bring them up as normal devices
1517 * will result into a failure. Only user channel operation is
1518 * possible.
1519 *
1520 * When this function is called for a user channel, the flag
1521 * HCI_USER_CHANNEL will be set first before attempting to
1522 * open the device.
1523 */
d7a5a11d
MH
1524 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1525 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1526 err = -EOPNOTSUPP;
1527 goto done;
1528 }
1529
e1d08f40
JH
1530 /* We need to ensure that no other power on/off work is pending
1531 * before proceeding to call hci_dev_do_open. This is
1532 * particularly important if the setup procedure has not yet
1533 * completed.
1534 */
a69d8927 1535 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1536 cancel_delayed_work(&hdev->power_off);
1537
a5c8f270
MH
1538 /* After this call it is guaranteed that the setup procedure
1539 * has finished. This means that error conditions like RFKILL
1540 * or no valid public or static random address apply.
1541 */
e1d08f40
JH
1542 flush_workqueue(hdev->req_workqueue);
1543
12aa4f0a 1544 /* For controllers not using the management interface and that
b6ae8457 1545 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1546 * so that pairing works for them. Once the management interface
1547 * is in use this bit will be cleared again and userspace has
1548 * to explicitly enable it.
1549 */
d7a5a11d
MH
1550 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1551 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1552 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1553
cbed0ca1
JH
1554 err = hci_dev_do_open(hdev);
1555
fee746b0 1556done:
cbed0ca1 1557 hci_dev_put(hdev);
cbed0ca1
JH
1558 return err;
1559}
1560
d7347f3c
JH
1561/* This function requires the caller holds hdev->lock */
1562static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1563{
1564 struct hci_conn_params *p;
1565
f161dd41
JH
1566 list_for_each_entry(p, &hdev->le_conn_params, list) {
1567 if (p->conn) {
1568 hci_conn_drop(p->conn);
f8aaf9b6 1569 hci_conn_put(p->conn);
f161dd41
JH
1570 p->conn = NULL;
1571 }
d7347f3c 1572 list_del_init(&p->action);
f161dd41 1573 }
d7347f3c
JH
1574
1575 BT_DBG("All LE pending actions cleared");
1576}
1577
6b3cc1db 1578int hci_dev_do_close(struct hci_dev *hdev)
1da177e4 1579{
acc649c6
MH
1580 bool auto_off;
1581
1da177e4
LT
1582 BT_DBG("%s %p", hdev->name, hdev);
1583
d24d8144 1584 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1585 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1586 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1587 /* Execute vendor specific shutdown routine */
1588 if (hdev->shutdown)
1589 hdev->shutdown(hdev);
1590 }
1591
78c04c0b
VCG
1592 cancel_delayed_work(&hdev->power_off);
1593
7df0f73e 1594 hci_request_cancel_all(hdev);
b504430c 1595 hci_req_sync_lock(hdev);
1da177e4
LT
1596
1597 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1598 cancel_delayed_work_sync(&hdev->cmd_timer);
b504430c 1599 hci_req_sync_unlock(hdev);
1da177e4
LT
1600 return 0;
1601 }
1602
6d5d2ee6
HK
1603 hci_leds_update_powered(hdev, false);
1604
3eff45ea
GP
1605 /* Flush RX and TX works */
1606 flush_work(&hdev->tx_work);
b78752cc 1607 flush_work(&hdev->rx_work);
1da177e4 1608
16ab91ab 1609 if (hdev->discov_timeout > 0) {
16ab91ab 1610 hdev->discov_timeout = 0;
a358dc11
MH
1611 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1612 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1613 }
1614
a69d8927 1615 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1616 cancel_delayed_work(&hdev->service_cache);
1617
d7a5a11d 1618 if (hci_dev_test_flag(hdev, HCI_MGMT))
4518bb0f 1619 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1620
76727c02
JH
1621 /* Avoid potential lockdep warnings from the *_flush() calls by
1622 * ensuring the workqueue is empty up front.
1623 */
1624 drain_workqueue(hdev->workqueue);
1625
09fd0de5 1626 hci_dev_lock(hdev);
1aeb9c65 1627
8f502f84
JH
1628 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1629
acc649c6
MH
1630 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1631
ca8bee5d 1632 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
baab7932 1633 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
2ff13894
JH
1634 hci_dev_test_flag(hdev, HCI_MGMT))
1635 __mgmt_power_off(hdev);
1aeb9c65 1636
1f9b9a5d 1637 hci_inquiry_cache_flush(hdev);
d7347f3c 1638 hci_pend_le_actions_clear(hdev);
f161dd41 1639 hci_conn_hash_flush(hdev);
09fd0de5 1640 hci_dev_unlock(hdev);
1da177e4 1641
64dae967
MH
1642 smp_unregister(hdev);
1643
05fcd4c4 1644 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1da177e4
LT
1645
1646 if (hdev->flush)
1647 hdev->flush(hdev);
1648
1649 /* Reset device */
1650 skb_queue_purge(&hdev->cmd_q);
1651 atomic_set(&hdev->cmd_cnt, 1);
acc649c6
MH
1652 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1653 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4 1654 set_bit(HCI_INIT, &hdev->flags);
4ebeee2d 1655 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1da177e4
LT
1656 clear_bit(HCI_INIT, &hdev->flags);
1657 }
1658
c347b765
GP
1659 /* flush cmd work */
1660 flush_work(&hdev->cmd_work);
1da177e4
LT
1661
1662 /* Drop queues */
1663 skb_queue_purge(&hdev->rx_q);
1664 skb_queue_purge(&hdev->cmd_q);
1665 skb_queue_purge(&hdev->raw_q);
1666
1667 /* Drop last sent command */
1668 if (hdev->sent_cmd) {
65cc2b49 1669 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1670 kfree_skb(hdev->sent_cmd);
1671 hdev->sent_cmd = NULL;
1672 }
1673
e9ca8bf1 1674 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1675 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1676
1da177e4
LT
1677 /* After this point our queues are empty
1678 * and no tasks are scheduled. */
1679 hdev->close(hdev);
1680
35b973c9 1681 /* Clear flags */
fee746b0 1682 hdev->flags &= BIT(HCI_RAW);
eacb44df 1683 hci_dev_clear_volatile_flags(hdev);
35b973c9 1684
ced5c338 1685 /* Controller radio is available but is currently powered down */
536619e8 1686 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1687
e59fda8d 1688 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1689 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1690 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1691
b504430c 1692 hci_req_sync_unlock(hdev);
1da177e4
LT
1693
1694 hci_dev_put(hdev);
1695 return 0;
1696}
1697
1698int hci_dev_close(__u16 dev)
1699{
1700 struct hci_dev *hdev;
1701 int err;
1702
70f23020
AE
1703 hdev = hci_dev_get(dev);
1704 if (!hdev)
1da177e4 1705 return -ENODEV;
8ee56540 1706
d7a5a11d 1707 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1708 err = -EBUSY;
1709 goto done;
1710 }
1711
a69d8927 1712 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1713 cancel_delayed_work(&hdev->power_off);
1714
1da177e4 1715 err = hci_dev_do_close(hdev);
8ee56540 1716
0736cfa8 1717done:
1da177e4
LT
1718 hci_dev_put(hdev);
1719 return err;
1720}
1721
5c912495 1722static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1723{
5c912495 1724 int ret;
1da177e4 1725
5c912495 1726 BT_DBG("%s %p", hdev->name, hdev);
1da177e4 1727
b504430c 1728 hci_req_sync_lock(hdev);
1da177e4 1729
1da177e4
LT
1730 /* Drop queues */
1731 skb_queue_purge(&hdev->rx_q);
1732 skb_queue_purge(&hdev->cmd_q);
1733
76727c02
JH
1734 /* Avoid potential lockdep warnings from the *_flush() calls by
1735 * ensuring the workqueue is empty up front.
1736 */
1737 drain_workqueue(hdev->workqueue);
1738
09fd0de5 1739 hci_dev_lock(hdev);
1f9b9a5d 1740 hci_inquiry_cache_flush(hdev);
1da177e4 1741 hci_conn_hash_flush(hdev);
09fd0de5 1742 hci_dev_unlock(hdev);
1da177e4
LT
1743
1744 if (hdev->flush)
1745 hdev->flush(hdev);
1746
8e87d142 1747 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1748 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1749
4ebeee2d 1750 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1da177e4 1751
b504430c 1752 hci_req_sync_unlock(hdev);
1da177e4
LT
1753 return ret;
1754}
1755
5c912495
MH
1756int hci_dev_reset(__u16 dev)
1757{
1758 struct hci_dev *hdev;
1759 int err;
1760
1761 hdev = hci_dev_get(dev);
1762 if (!hdev)
1763 return -ENODEV;
1764
1765 if (!test_bit(HCI_UP, &hdev->flags)) {
1766 err = -ENETDOWN;
1767 goto done;
1768 }
1769
d7a5a11d 1770 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1771 err = -EBUSY;
1772 goto done;
1773 }
1774
d7a5a11d 1775 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1776 err = -EOPNOTSUPP;
1777 goto done;
1778 }
1779
1780 err = hci_dev_do_reset(hdev);
1781
1782done:
1783 hci_dev_put(hdev);
1784 return err;
1785}
1786
1da177e4
LT
1787int hci_dev_reset_stat(__u16 dev)
1788{
1789 struct hci_dev *hdev;
1790 int ret = 0;
1791
70f23020
AE
1792 hdev = hci_dev_get(dev);
1793 if (!hdev)
1da177e4
LT
1794 return -ENODEV;
1795
d7a5a11d 1796 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1797 ret = -EBUSY;
1798 goto done;
1799 }
1800
d7a5a11d 1801 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1802 ret = -EOPNOTSUPP;
1803 goto done;
1804 }
1805
1da177e4
LT
1806 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1807
0736cfa8 1808done:
1da177e4 1809 hci_dev_put(hdev);
1da177e4
LT
1810 return ret;
1811}
1812
123abc08
JH
1813static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1814{
bc6d2d04 1815 bool conn_changed, discov_changed;
123abc08
JH
1816
1817 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1818
1819 if ((scan & SCAN_PAGE))
238be788
MH
1820 conn_changed = !hci_dev_test_and_set_flag(hdev,
1821 HCI_CONNECTABLE);
123abc08 1822 else
a69d8927
MH
1823 conn_changed = hci_dev_test_and_clear_flag(hdev,
1824 HCI_CONNECTABLE);
123abc08 1825
bc6d2d04 1826 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1827 discov_changed = !hci_dev_test_and_set_flag(hdev,
1828 HCI_DISCOVERABLE);
bc6d2d04 1829 } else {
a358dc11 1830 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1831 discov_changed = hci_dev_test_and_clear_flag(hdev,
1832 HCI_DISCOVERABLE);
bc6d2d04
JH
1833 }
1834
d7a5a11d 1835 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1836 return;
1837
bc6d2d04
JH
1838 if (conn_changed || discov_changed) {
1839 /* In case this was disabled through mgmt */
a1536da2 1840 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1841
d7a5a11d 1842 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
cab054ab 1843 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
bc6d2d04 1844
123abc08 1845 mgmt_new_settings(hdev);
bc6d2d04 1846 }
123abc08
JH
1847}
1848
1da177e4
LT
1849int hci_dev_cmd(unsigned int cmd, void __user *arg)
1850{
1851 struct hci_dev *hdev;
1852 struct hci_dev_req dr;
1853 int err = 0;
1854
1855 if (copy_from_user(&dr, arg, sizeof(dr)))
1856 return -EFAULT;
1857
70f23020
AE
1858 hdev = hci_dev_get(dr.dev_id);
1859 if (!hdev)
1da177e4
LT
1860 return -ENODEV;
1861
d7a5a11d 1862 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1863 err = -EBUSY;
1864 goto done;
1865 }
1866
d7a5a11d 1867 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1868 err = -EOPNOTSUPP;
1869 goto done;
1870 }
1871
ca8bee5d 1872 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
1873 err = -EOPNOTSUPP;
1874 goto done;
1875 }
1876
d7a5a11d 1877 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1878 err = -EOPNOTSUPP;
1879 goto done;
1880 }
1881
1da177e4
LT
1882 switch (cmd) {
1883 case HCISETAUTH:
01178cd4 1884 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 1885 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
1886 break;
1887
1888 case HCISETENCRYPT:
1889 if (!lmp_encrypt_capable(hdev)) {
1890 err = -EOPNOTSUPP;
1891 break;
1892 }
1893
1894 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1895 /* Auth must be enabled first */
01178cd4 1896 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 1897 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
1898 if (err)
1899 break;
1900 }
1901
01178cd4 1902 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
4ebeee2d 1903 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
1904 break;
1905
1906 case HCISETSCAN:
01178cd4 1907 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
4ebeee2d 1908 HCI_INIT_TIMEOUT, NULL);
91a668b0 1909
bc6d2d04
JH
1910 /* Ensure that the connectable and discoverable states
1911 * get correctly modified as this was a non-mgmt change.
91a668b0 1912 */
123abc08
JH
1913 if (!err)
1914 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1915 break;
1916
1da177e4 1917 case HCISETLINKPOL:
01178cd4 1918 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
4ebeee2d 1919 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
1920 break;
1921
1922 case HCISETLINKMODE:
e4e8e37c
MH
1923 hdev->link_mode = ((__u16) dr.dev_opt) &
1924 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1925 break;
1926
1927 case HCISETPTYPE:
1928 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1929 break;
1930
1931 case HCISETACLMTU:
e4e8e37c
MH
1932 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1933 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1934 break;
1935
1936 case HCISETSCOMTU:
e4e8e37c
MH
1937 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1938 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1939 break;
1940
1941 default:
1942 err = -EINVAL;
1943 break;
1944 }
e4e8e37c 1945
0736cfa8 1946done:
1da177e4
LT
1947 hci_dev_put(hdev);
1948 return err;
1949}
1950
1951int hci_get_dev_list(void __user *arg)
1952{
8035ded4 1953 struct hci_dev *hdev;
1da177e4
LT
1954 struct hci_dev_list_req *dl;
1955 struct hci_dev_req *dr;
1da177e4
LT
1956 int n = 0, size, err;
1957 __u16 dev_num;
1958
1959 if (get_user(dev_num, (__u16 __user *) arg))
1960 return -EFAULT;
1961
1962 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1963 return -EINVAL;
1964
1965 size = sizeof(*dl) + dev_num * sizeof(*dr);
1966
70f23020
AE
1967 dl = kzalloc(size, GFP_KERNEL);
1968 if (!dl)
1da177e4
LT
1969 return -ENOMEM;
1970
1971 dr = dl->dev_req;
1972
f20d09d5 1973 read_lock(&hci_dev_list_lock);
8035ded4 1974 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 1975 unsigned long flags = hdev->flags;
c542a06c 1976
2e84d8db
MH
1977 /* When the auto-off is configured it means the transport
1978 * is running, but in that case still indicate that the
1979 * device is actually down.
1980 */
d7a5a11d 1981 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 1982 flags &= ~BIT(HCI_UP);
c542a06c 1983
1da177e4 1984 (dr + n)->dev_id = hdev->id;
2e84d8db 1985 (dr + n)->dev_opt = flags;
c542a06c 1986
1da177e4
LT
1987 if (++n >= dev_num)
1988 break;
1989 }
f20d09d5 1990 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1991
1992 dl->dev_num = n;
1993 size = sizeof(*dl) + n * sizeof(*dr);
1994
1995 err = copy_to_user(arg, dl, size);
1996 kfree(dl);
1997
1998 return err ? -EFAULT : 0;
1999}
2000
2001int hci_get_dev_info(void __user *arg)
2002{
2003 struct hci_dev *hdev;
2004 struct hci_dev_info di;
2e84d8db 2005 unsigned long flags;
1da177e4
LT
2006 int err = 0;
2007
2008 if (copy_from_user(&di, arg, sizeof(di)))
2009 return -EFAULT;
2010
70f23020
AE
2011 hdev = hci_dev_get(di.dev_id);
2012 if (!hdev)
1da177e4
LT
2013 return -ENODEV;
2014
2e84d8db
MH
2015 /* When the auto-off is configured it means the transport
2016 * is running, but in that case still indicate that the
2017 * device is actually down.
2018 */
d7a5a11d 2019 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
2020 flags = hdev->flags & ~BIT(HCI_UP);
2021 else
2022 flags = hdev->flags;
c542a06c 2023
1da177e4
LT
2024 strcpy(di.name, hdev->name);
2025 di.bdaddr = hdev->bdaddr;
60f2a3ed 2026 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2027 di.flags = flags;
1da177e4 2028 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2029 if (lmp_bredr_capable(hdev)) {
2030 di.acl_mtu = hdev->acl_mtu;
2031 di.acl_pkts = hdev->acl_pkts;
2032 di.sco_mtu = hdev->sco_mtu;
2033 di.sco_pkts = hdev->sco_pkts;
2034 } else {
2035 di.acl_mtu = hdev->le_mtu;
2036 di.acl_pkts = hdev->le_pkts;
2037 di.sco_mtu = 0;
2038 di.sco_pkts = 0;
2039 }
1da177e4
LT
2040 di.link_policy = hdev->link_policy;
2041 di.link_mode = hdev->link_mode;
2042
2043 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2044 memcpy(&di.features, &hdev->features, sizeof(di.features));
2045
2046 if (copy_to_user(arg, &di, sizeof(di)))
2047 err = -EFAULT;
2048
2049 hci_dev_put(hdev);
2050
2051 return err;
2052}
2053
2054/* ---- Interface to HCI drivers ---- */
2055
611b30f7
MH
2056static int hci_rfkill_set_block(void *data, bool blocked)
2057{
2058 struct hci_dev *hdev = data;
2059
2060 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2061
d7a5a11d 2062 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2063 return -EBUSY;
2064
5e130367 2065 if (blocked) {
a1536da2 2066 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2067 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2068 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2069 hci_dev_do_close(hdev);
5e130367 2070 } else {
a358dc11 2071 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2072 }
611b30f7
MH
2073
2074 return 0;
2075}
2076
2077static const struct rfkill_ops hci_rfkill_ops = {
2078 .set_block = hci_rfkill_set_block,
2079};
2080
ab81cbf9
JH
2081static void hci_power_on(struct work_struct *work)
2082{
2083 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2084 int err;
ab81cbf9
JH
2085
2086 BT_DBG("%s", hdev->name);
2087
2ff13894
JH
2088 if (test_bit(HCI_UP, &hdev->flags) &&
2089 hci_dev_test_flag(hdev, HCI_MGMT) &&
2090 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
d82142a8 2091 cancel_delayed_work(&hdev->power_off);
2ff13894
JH
2092 hci_req_sync_lock(hdev);
2093 err = __hci_req_hci_power_on(hdev);
2094 hci_req_sync_unlock(hdev);
2095 mgmt_power_on(hdev, err);
2096 return;
2097 }
2098
cbed0ca1 2099 err = hci_dev_do_open(hdev);
96570ffc 2100 if (err < 0) {
3ad67582 2101 hci_dev_lock(hdev);
96570ffc 2102 mgmt_set_powered_failed(hdev, err);
3ad67582 2103 hci_dev_unlock(hdev);
ab81cbf9 2104 return;
96570ffc 2105 }
ab81cbf9 2106
a5c8f270
MH
2107 /* During the HCI setup phase, a few error conditions are
2108 * ignored and they need to be checked now. If they are still
2109 * valid, it is important to turn the device back off.
2110 */
d7a5a11d
MH
2111 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2112 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
ca8bee5d 2113 (hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
2114 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2115 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2116 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2117 hci_dev_do_close(hdev);
d7a5a11d 2118 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2119 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2120 HCI_AUTO_OFF_TIMEOUT);
bf543036 2121 }
ab81cbf9 2122
a69d8927 2123 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2124 /* For unconfigured devices, set the HCI_RAW flag
2125 * so that userspace can easily identify them.
4a964404 2126 */
d7a5a11d 2127 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2128 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2129
2130 /* For fully configured devices, this will send
2131 * the Index Added event. For unconfigured devices,
2132 * it will send Unconfigued Index Added event.
2133 *
2134 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2135 * and no event will be send.
2136 */
2137 mgmt_index_added(hdev);
a69d8927 2138 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2139 /* When the controller is now configured, then it
2140 * is important to clear the HCI_RAW flag.
2141 */
d7a5a11d 2142 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2143 clear_bit(HCI_RAW, &hdev->flags);
2144
d603b76b
MH
2145 /* Powering on the controller with HCI_CONFIG set only
2146 * happens with the transition from unconfigured to
2147 * configured. This will send the Index Added event.
2148 */
744cf19e 2149 mgmt_index_added(hdev);
fee746b0 2150 }
ab81cbf9
JH
2151}
2152
2153static void hci_power_off(struct work_struct *work)
2154{
3243553f 2155 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2156 power_off.work);
ab81cbf9
JH
2157
2158 BT_DBG("%s", hdev->name);
2159
8ee56540 2160 hci_dev_do_close(hdev);
ab81cbf9
JH
2161}
2162
c7741d16
MH
2163static void hci_error_reset(struct work_struct *work)
2164{
2165 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2166
2167 BT_DBG("%s", hdev->name);
2168
2169 if (hdev->hw_error)
2170 hdev->hw_error(hdev, hdev->hw_error_code);
2171 else
2064ee33 2172 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
c7741d16
MH
2173
2174 if (hci_dev_do_close(hdev))
2175 return;
2176
c7741d16
MH
2177 hci_dev_do_open(hdev);
2178}
2179
35f7498a 2180void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2181{
4821002c 2182 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2183
4821002c
JH
2184 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2185 list_del(&uuid->list);
2aeb9a1a
JH
2186 kfree(uuid);
2187 }
2aeb9a1a
JH
2188}
2189
35f7498a 2190void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2191{
0378b597 2192 struct link_key *key;
55ed8ca1 2193
0378b597
JH
2194 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2195 list_del_rcu(&key->list);
2196 kfree_rcu(key, rcu);
55ed8ca1 2197 }
55ed8ca1
JH
2198}
2199
35f7498a 2200void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2201{
970d0f1b 2202 struct smp_ltk *k;
b899efaf 2203
970d0f1b
JH
2204 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2205 list_del_rcu(&k->list);
2206 kfree_rcu(k, rcu);
b899efaf 2207 }
b899efaf
VCG
2208}
2209
970c4e46
JH
2210void hci_smp_irks_clear(struct hci_dev *hdev)
2211{
adae20cb 2212 struct smp_irk *k;
970c4e46 2213
adae20cb
JH
2214 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2215 list_del_rcu(&k->list);
2216 kfree_rcu(k, rcu);
970c4e46
JH
2217 }
2218}
2219
55ed8ca1
JH
2220struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2221{
8035ded4 2222 struct link_key *k;
55ed8ca1 2223
0378b597
JH
2224 rcu_read_lock();
2225 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2226 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2227 rcu_read_unlock();
55ed8ca1 2228 return k;
0378b597
JH
2229 }
2230 }
2231 rcu_read_unlock();
55ed8ca1
JH
2232
2233 return NULL;
2234}
2235
745c0ce3 2236static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2237 u8 key_type, u8 old_key_type)
d25e28ab
JH
2238{
2239 /* Legacy key */
2240 if (key_type < 0x03)
745c0ce3 2241 return true;
d25e28ab
JH
2242
2243 /* Debug keys are insecure so don't store them persistently */
2244 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2245 return false;
d25e28ab
JH
2246
2247 /* Changed combination key and there's no previous one */
2248 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2249 return false;
d25e28ab
JH
2250
2251 /* Security mode 3 case */
2252 if (!conn)
745c0ce3 2253 return true;
d25e28ab 2254
e3befab9
JH
2255 /* BR/EDR key derived using SC from an LE link */
2256 if (conn->type == LE_LINK)
2257 return true;
2258
d25e28ab
JH
2259 /* Neither local nor remote side had no-bonding as requirement */
2260 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2261 return true;
d25e28ab
JH
2262
2263 /* Local side had dedicated bonding as requirement */
2264 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2265 return true;
d25e28ab
JH
2266
2267 /* Remote side had dedicated bonding as requirement */
2268 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2269 return true;
d25e28ab
JH
2270
2271 /* If none of the above criteria match, then don't store the key
2272 * persistently */
745c0ce3 2273 return false;
d25e28ab
JH
2274}
2275
e804d25d 2276static u8 ltk_role(u8 type)
98a0b845 2277{
e804d25d
JH
2278 if (type == SMP_LTK)
2279 return HCI_ROLE_MASTER;
98a0b845 2280
e804d25d 2281 return HCI_ROLE_SLAVE;
98a0b845
JH
2282}
2283
f3a73d97
JH
2284struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2285 u8 addr_type, u8 role)
75d262c2 2286{
c9839a11 2287 struct smp_ltk *k;
75d262c2 2288
970d0f1b
JH
2289 rcu_read_lock();
2290 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2291 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2292 continue;
2293
923e2414 2294 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2295 rcu_read_unlock();
75d262c2 2296 return k;
970d0f1b
JH
2297 }
2298 }
2299 rcu_read_unlock();
75d262c2
VCG
2300
2301 return NULL;
2302}
75d262c2 2303
970c4e46
JH
2304struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2305{
2306 struct smp_irk *irk;
2307
adae20cb
JH
2308 rcu_read_lock();
2309 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2310 if (!bacmp(&irk->rpa, rpa)) {
2311 rcu_read_unlock();
970c4e46 2312 return irk;
adae20cb 2313 }
970c4e46
JH
2314 }
2315
adae20cb 2316 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2317 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2318 bacpy(&irk->rpa, rpa);
adae20cb 2319 rcu_read_unlock();
970c4e46
JH
2320 return irk;
2321 }
2322 }
adae20cb 2323 rcu_read_unlock();
970c4e46
JH
2324
2325 return NULL;
2326}
2327
2328struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2329 u8 addr_type)
2330{
2331 struct smp_irk *irk;
2332
6cfc9988
JH
2333 /* Identity Address must be public or static random */
2334 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2335 return NULL;
2336
adae20cb
JH
2337 rcu_read_lock();
2338 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2339 if (addr_type == irk->addr_type &&
adae20cb
JH
2340 bacmp(bdaddr, &irk->bdaddr) == 0) {
2341 rcu_read_unlock();
970c4e46 2342 return irk;
adae20cb 2343 }
970c4e46 2344 }
adae20cb 2345 rcu_read_unlock();
970c4e46
JH
2346
2347 return NULL;
2348}
2349
567fa2aa 2350struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2351 bdaddr_t *bdaddr, u8 *val, u8 type,
2352 u8 pin_len, bool *persistent)
55ed8ca1
JH
2353{
2354 struct link_key *key, *old_key;
745c0ce3 2355 u8 old_key_type;
55ed8ca1
JH
2356
2357 old_key = hci_find_link_key(hdev, bdaddr);
2358 if (old_key) {
2359 old_key_type = old_key->type;
2360 key = old_key;
2361 } else {
12adcf3a 2362 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2363 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2364 if (!key)
567fa2aa 2365 return NULL;
0378b597 2366 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2367 }
2368
6ed93dc6 2369 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2370
d25e28ab
JH
2371 /* Some buggy controller combinations generate a changed
2372 * combination key for legacy pairing even when there's no
2373 * previous key */
2374 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2375 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2376 type = HCI_LK_COMBINATION;
655fe6ec
JH
2377 if (conn)
2378 conn->key_type = type;
2379 }
d25e28ab 2380
55ed8ca1 2381 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2382 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2383 key->pin_len = pin_len;
2384
b6020ba0 2385 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2386 key->type = old_key_type;
4748fed2
JH
2387 else
2388 key->type = type;
2389
7652ff6a
JH
2390 if (persistent)
2391 *persistent = hci_persistent_key(hdev, conn, type,
2392 old_key_type);
4df378a1 2393
567fa2aa 2394 return key;
55ed8ca1
JH
2395}
2396
ca9142b8 2397struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2398 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2399 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2400{
c9839a11 2401 struct smp_ltk *key, *old_key;
e804d25d 2402 u8 role = ltk_role(type);
75d262c2 2403
f3a73d97 2404 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2405 if (old_key)
75d262c2 2406 key = old_key;
c9839a11 2407 else {
0a14ab41 2408 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2409 if (!key)
ca9142b8 2410 return NULL;
970d0f1b 2411 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2412 }
2413
75d262c2 2414 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2415 key->bdaddr_type = addr_type;
2416 memcpy(key->val, tk, sizeof(key->val));
2417 key->authenticated = authenticated;
2418 key->ediv = ediv;
fe39c7b2 2419 key->rand = rand;
c9839a11
VCG
2420 key->enc_size = enc_size;
2421 key->type = type;
75d262c2 2422
ca9142b8 2423 return key;
75d262c2
VCG
2424}
2425
ca9142b8
JH
2426struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2427 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2428{
2429 struct smp_irk *irk;
2430
2431 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2432 if (!irk) {
2433 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2434 if (!irk)
ca9142b8 2435 return NULL;
970c4e46
JH
2436
2437 bacpy(&irk->bdaddr, bdaddr);
2438 irk->addr_type = addr_type;
2439
adae20cb 2440 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2441 }
2442
2443 memcpy(irk->val, val, 16);
2444 bacpy(&irk->rpa, rpa);
2445
ca9142b8 2446 return irk;
970c4e46
JH
2447}
2448
55ed8ca1
JH
2449int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2450{
2451 struct link_key *key;
2452
2453 key = hci_find_link_key(hdev, bdaddr);
2454 if (!key)
2455 return -ENOENT;
2456
6ed93dc6 2457 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2458
0378b597
JH
2459 list_del_rcu(&key->list);
2460 kfree_rcu(key, rcu);
55ed8ca1
JH
2461
2462 return 0;
2463}
2464
e0b2b27e 2465int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2466{
970d0f1b 2467 struct smp_ltk *k;
c51ffa0b 2468 int removed = 0;
b899efaf 2469
970d0f1b 2470 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2471 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2472 continue;
2473
6ed93dc6 2474 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2475
970d0f1b
JH
2476 list_del_rcu(&k->list);
2477 kfree_rcu(k, rcu);
c51ffa0b 2478 removed++;
b899efaf
VCG
2479 }
2480
c51ffa0b 2481 return removed ? 0 : -ENOENT;
b899efaf
VCG
2482}
2483
a7ec7338
JH
2484void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2485{
adae20cb 2486 struct smp_irk *k;
a7ec7338 2487
adae20cb 2488 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2489 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2490 continue;
2491
2492 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2493
adae20cb
JH
2494 list_del_rcu(&k->list);
2495 kfree_rcu(k, rcu);
a7ec7338
JH
2496 }
2497}
2498
55e76b38
JH
2499bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2500{
2501 struct smp_ltk *k;
4ba9faf3 2502 struct smp_irk *irk;
55e76b38
JH
2503 u8 addr_type;
2504
2505 if (type == BDADDR_BREDR) {
2506 if (hci_find_link_key(hdev, bdaddr))
2507 return true;
2508 return false;
2509 }
2510
2511 /* Convert to HCI addr type which struct smp_ltk uses */
2512 if (type == BDADDR_LE_PUBLIC)
2513 addr_type = ADDR_LE_DEV_PUBLIC;
2514 else
2515 addr_type = ADDR_LE_DEV_RANDOM;
2516
4ba9faf3
JH
2517 irk = hci_get_irk(hdev, bdaddr, addr_type);
2518 if (irk) {
2519 bdaddr = &irk->bdaddr;
2520 addr_type = irk->addr_type;
2521 }
2522
55e76b38
JH
2523 rcu_read_lock();
2524 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2525 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2526 rcu_read_unlock();
55e76b38 2527 return true;
87c8b28d 2528 }
55e76b38
JH
2529 }
2530 rcu_read_unlock();
2531
2532 return false;
2533}
2534
6bd32326 2535/* HCI command timer function */
65cc2b49 2536static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2537{
65cc2b49
MH
2538 struct hci_dev *hdev = container_of(work, struct hci_dev,
2539 cmd_timer.work);
6bd32326 2540
bda4f23a
AE
2541 if (hdev->sent_cmd) {
2542 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2543 u16 opcode = __le16_to_cpu(sent->opcode);
2544
2064ee33 2545 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
bda4f23a 2546 } else {
2064ee33 2547 bt_dev_err(hdev, "command tx timeout");
bda4f23a
AE
2548 }
2549
6bd32326 2550 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2551 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2552}
2553
2763eda6 2554struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2555 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2556{
2557 struct oob_data *data;
2558
6928a924
JH
2559 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2560 if (bacmp(bdaddr, &data->bdaddr) != 0)
2561 continue;
2562 if (data->bdaddr_type != bdaddr_type)
2563 continue;
2564 return data;
2565 }
2763eda6
SJ
2566
2567 return NULL;
2568}
2569
6928a924
JH
2570int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2571 u8 bdaddr_type)
2763eda6
SJ
2572{
2573 struct oob_data *data;
2574
6928a924 2575 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2576 if (!data)
2577 return -ENOENT;
2578
6928a924 2579 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2580
2581 list_del(&data->list);
2582 kfree(data);
2583
2584 return 0;
2585}
2586
35f7498a 2587void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2588{
2589 struct oob_data *data, *n;
2590
2591 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2592 list_del(&data->list);
2593 kfree(data);
2594 }
2763eda6
SJ
2595}
2596
0798872e 2597int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2598 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2599 u8 *hash256, u8 *rand256)
2763eda6
SJ
2600{
2601 struct oob_data *data;
2602
6928a924 2603 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2604 if (!data) {
0a14ab41 2605 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2606 if (!data)
2607 return -ENOMEM;
2608
2609 bacpy(&data->bdaddr, bdaddr);
6928a924 2610 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2611 list_add(&data->list, &hdev->remote_oob_data);
2612 }
2613
81328d5c
JH
2614 if (hash192 && rand192) {
2615 memcpy(data->hash192, hash192, sizeof(data->hash192));
2616 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2617 if (hash256 && rand256)
2618 data->present = 0x03;
81328d5c
JH
2619 } else {
2620 memset(data->hash192, 0, sizeof(data->hash192));
2621 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2622 if (hash256 && rand256)
2623 data->present = 0x02;
2624 else
2625 data->present = 0x00;
0798872e
MH
2626 }
2627
81328d5c
JH
2628 if (hash256 && rand256) {
2629 memcpy(data->hash256, hash256, sizeof(data->hash256));
2630 memcpy(data->rand256, rand256, sizeof(data->rand256));
2631 } else {
2632 memset(data->hash256, 0, sizeof(data->hash256));
2633 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2634 if (hash192 && rand192)
2635 data->present = 0x01;
81328d5c 2636 }
0798872e 2637
6ed93dc6 2638 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2639
2640 return 0;
2641}
2642
d2609b34
FG
2643/* This function requires the caller holds hdev->lock */
2644struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2645{
2646 struct adv_info *adv_instance;
2647
2648 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2649 if (adv_instance->instance == instance)
2650 return adv_instance;
2651 }
2652
2653 return NULL;
2654}
2655
2656/* This function requires the caller holds hdev->lock */
74b93e9f
PK
2657struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2658{
d2609b34
FG
2659 struct adv_info *cur_instance;
2660
2661 cur_instance = hci_find_adv_instance(hdev, instance);
2662 if (!cur_instance)
2663 return NULL;
2664
2665 if (cur_instance == list_last_entry(&hdev->adv_instances,
2666 struct adv_info, list))
2667 return list_first_entry(&hdev->adv_instances,
2668 struct adv_info, list);
2669 else
2670 return list_next_entry(cur_instance, list);
2671}
2672
2673/* This function requires the caller holds hdev->lock */
2674int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2675{
2676 struct adv_info *adv_instance;
2677
2678 adv_instance = hci_find_adv_instance(hdev, instance);
2679 if (!adv_instance)
2680 return -ENOENT;
2681
2682 BT_DBG("%s removing %dMR", hdev->name, instance);
2683
cab054ab
JH
2684 if (hdev->cur_adv_instance == instance) {
2685 if (hdev->adv_instance_timeout) {
2686 cancel_delayed_work(&hdev->adv_instance_expire);
2687 hdev->adv_instance_timeout = 0;
2688 }
2689 hdev->cur_adv_instance = 0x00;
5d900e46
FG
2690 }
2691
d2609b34
FG
2692 list_del(&adv_instance->list);
2693 kfree(adv_instance);
2694
2695 hdev->adv_instance_cnt--;
2696
2697 return 0;
2698}
2699
2700/* This function requires the caller holds hdev->lock */
2701void hci_adv_instances_clear(struct hci_dev *hdev)
2702{
2703 struct adv_info *adv_instance, *n;
2704
5d900e46
FG
2705 if (hdev->adv_instance_timeout) {
2706 cancel_delayed_work(&hdev->adv_instance_expire);
2707 hdev->adv_instance_timeout = 0;
2708 }
2709
d2609b34
FG
2710 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2711 list_del(&adv_instance->list);
2712 kfree(adv_instance);
2713 }
2714
2715 hdev->adv_instance_cnt = 0;
cab054ab 2716 hdev->cur_adv_instance = 0x00;
d2609b34
FG
2717}
2718
2719/* This function requires the caller holds hdev->lock */
2720int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2721 u16 adv_data_len, u8 *adv_data,
2722 u16 scan_rsp_len, u8 *scan_rsp_data,
2723 u16 timeout, u16 duration)
2724{
2725 struct adv_info *adv_instance;
2726
2727 adv_instance = hci_find_adv_instance(hdev, instance);
2728 if (adv_instance) {
2729 memset(adv_instance->adv_data, 0,
2730 sizeof(adv_instance->adv_data));
2731 memset(adv_instance->scan_rsp_data, 0,
2732 sizeof(adv_instance->scan_rsp_data));
2733 } else {
2734 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2735 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2736 return -EOVERFLOW;
2737
39ecfad6 2738 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2739 if (!adv_instance)
2740 return -ENOMEM;
2741
fffd38bc 2742 adv_instance->pending = true;
d2609b34
FG
2743 adv_instance->instance = instance;
2744 list_add(&adv_instance->list, &hdev->adv_instances);
2745 hdev->adv_instance_cnt++;
2746 }
2747
2748 adv_instance->flags = flags;
2749 adv_instance->adv_data_len = adv_data_len;
2750 adv_instance->scan_rsp_len = scan_rsp_len;
2751
2752 if (adv_data_len)
2753 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2754
2755 if (scan_rsp_len)
2756 memcpy(adv_instance->scan_rsp_data,
2757 scan_rsp_data, scan_rsp_len);
2758
2759 adv_instance->timeout = timeout;
5d900e46 2760 adv_instance->remaining_time = timeout;
d2609b34
FG
2761
2762 if (duration == 0)
2763 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2764 else
2765 adv_instance->duration = duration;
2766
2767 BT_DBG("%s for %dMR", hdev->name, instance);
2768
2769 return 0;
2770}
2771
dcc36c16 2772struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2773 bdaddr_t *bdaddr, u8 type)
b2a66aad 2774{
8035ded4 2775 struct bdaddr_list *b;
b2a66aad 2776
dcc36c16 2777 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2778 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2779 return b;
b9ee0a78 2780 }
b2a66aad
AJ
2781
2782 return NULL;
2783}
2784
dcc36c16 2785void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad 2786{
7eb7404f 2787 struct bdaddr_list *b, *n;
b2a66aad 2788
7eb7404f
GT
2789 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2790 list_del(&b->list);
b2a66aad
AJ
2791 kfree(b);
2792 }
b2a66aad
AJ
2793}
2794
dcc36c16 2795int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2796{
2797 struct bdaddr_list *entry;
b2a66aad 2798
b9ee0a78 2799 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2800 return -EBADF;
2801
dcc36c16 2802 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2803 return -EEXIST;
b2a66aad 2804
27f70f3e 2805 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2806 if (!entry)
2807 return -ENOMEM;
b2a66aad
AJ
2808
2809 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2810 entry->bdaddr_type = type;
b2a66aad 2811
dcc36c16 2812 list_add(&entry->list, list);
b2a66aad 2813
2a8357f2 2814 return 0;
b2a66aad
AJ
2815}
2816
dcc36c16 2817int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2818{
2819 struct bdaddr_list *entry;
b2a66aad 2820
35f7498a 2821 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2822 hci_bdaddr_list_clear(list);
35f7498a
JH
2823 return 0;
2824 }
b2a66aad 2825
dcc36c16 2826 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2827 if (!entry)
2828 return -ENOENT;
2829
2830 list_del(&entry->list);
2831 kfree(entry);
2832
2833 return 0;
2834}
2835
15819a70
AG
2836/* This function requires the caller holds hdev->lock */
2837struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2838 bdaddr_t *addr, u8 addr_type)
2839{
2840 struct hci_conn_params *params;
2841
2842 list_for_each_entry(params, &hdev->le_conn_params, list) {
2843 if (bacmp(&params->addr, addr) == 0 &&
2844 params->addr_type == addr_type) {
2845 return params;
2846 }
2847 }
2848
2849 return NULL;
2850}
2851
4b10966f 2852/* This function requires the caller holds hdev->lock */
501f8827
JH
2853struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2854 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2855{
912b42ef 2856 struct hci_conn_params *param;
a9b0a04c 2857
501f8827 2858 list_for_each_entry(param, list, action) {
912b42ef
JH
2859 if (bacmp(&param->addr, addr) == 0 &&
2860 param->addr_type == addr_type)
2861 return param;
4b10966f
MH
2862 }
2863
2864 return NULL;
a9b0a04c
AG
2865}
2866
15819a70 2867/* This function requires the caller holds hdev->lock */
51d167c0
MH
2868struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2869 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2870{
2871 struct hci_conn_params *params;
2872
2873 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2874 if (params)
51d167c0 2875 return params;
15819a70
AG
2876
2877 params = kzalloc(sizeof(*params), GFP_KERNEL);
2878 if (!params) {
2064ee33 2879 bt_dev_err(hdev, "out of memory");
51d167c0 2880 return NULL;
15819a70
AG
2881 }
2882
2883 bacpy(&params->addr, addr);
2884 params->addr_type = addr_type;
cef952ce
AG
2885
2886 list_add(&params->list, &hdev->le_conn_params);
93450c75 2887 INIT_LIST_HEAD(&params->action);
cef952ce 2888
bf5b3c8b
MH
2889 params->conn_min_interval = hdev->le_conn_min_interval;
2890 params->conn_max_interval = hdev->le_conn_max_interval;
2891 params->conn_latency = hdev->le_conn_latency;
2892 params->supervision_timeout = hdev->le_supv_timeout;
2893 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2894
2895 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2896
51d167c0 2897 return params;
bf5b3c8b
MH
2898}
2899
f6c63249 2900static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2901{
f8aaf9b6 2902 if (params->conn) {
f161dd41 2903 hci_conn_drop(params->conn);
f8aaf9b6
JH
2904 hci_conn_put(params->conn);
2905 }
f161dd41 2906
95305baa 2907 list_del(&params->action);
15819a70
AG
2908 list_del(&params->list);
2909 kfree(params);
f6c63249
JH
2910}
2911
2912/* This function requires the caller holds hdev->lock */
2913void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2914{
2915 struct hci_conn_params *params;
2916
2917 params = hci_conn_params_lookup(hdev, addr, addr_type);
2918 if (!params)
2919 return;
2920
2921 hci_conn_params_free(params);
15819a70 2922
95305baa
JH
2923 hci_update_background_scan(hdev);
2924
15819a70
AG
2925 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2926}
2927
2928/* This function requires the caller holds hdev->lock */
55af49a8 2929void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
2930{
2931 struct hci_conn_params *params, *tmp;
2932
2933 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
2934 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2935 continue;
f75113a2
JP
2936
2937 /* If trying to estabilish one time connection to disabled
2938 * device, leave the params, but mark them as just once.
2939 */
2940 if (params->explicit_connect) {
2941 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2942 continue;
2943 }
2944
15819a70
AG
2945 list_del(&params->list);
2946 kfree(params);
2947 }
2948
55af49a8 2949 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
2950}
2951
2952/* This function requires the caller holds hdev->lock */
030e7f81 2953static void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 2954{
15819a70 2955 struct hci_conn_params *params, *tmp;
77a77a30 2956
f6c63249
JH
2957 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2958 hci_conn_params_free(params);
77a77a30 2959
15819a70 2960 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
2961}
2962
a1f4c318
JH
2963/* Copy the Identity Address of the controller.
2964 *
2965 * If the controller has a public BD_ADDR, then by default use that one.
2966 * If this is a LE only controller without a public address, default to
2967 * the static random address.
2968 *
2969 * For debugging purposes it is possible to force controllers with a
2970 * public address to use the static random address instead.
50b5b952
MH
2971 *
2972 * In case BR/EDR has been disabled on a dual-mode controller and
2973 * userspace has configured a static address, then that address
2974 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
2975 */
2976void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2977 u8 *bdaddr_type)
2978{
b7cb93e5 2979 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 2980 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 2981 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 2982 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
2983 bacpy(bdaddr, &hdev->static_addr);
2984 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2985 } else {
2986 bacpy(bdaddr, &hdev->bdaddr);
2987 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2988 }
2989}
2990
9be0dab7
DH
2991/* Alloc HCI device */
2992struct hci_dev *hci_alloc_dev(void)
2993{
2994 struct hci_dev *hdev;
2995
27f70f3e 2996 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
2997 if (!hdev)
2998 return NULL;
2999
b1b813d4
DH
3000 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3001 hdev->esco_type = (ESCO_HV1);
3002 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3003 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3004 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3005 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3006 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3007 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
3008 hdev->adv_instance_cnt = 0;
3009 hdev->cur_adv_instance = 0x00;
5d900e46 3010 hdev->adv_instance_timeout = 0;
b1b813d4 3011
b1b813d4
DH
3012 hdev->sniff_max_interval = 800;
3013 hdev->sniff_min_interval = 80;
3014
3f959d46 3015 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3016 hdev->le_adv_min_interval = 0x0800;
3017 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3018 hdev->le_scan_interval = 0x0060;
3019 hdev->le_scan_window = 0x0030;
b48c3b59
JH
3020 hdev->le_conn_min_interval = 0x0018;
3021 hdev->le_conn_max_interval = 0x0028;
04fb7d90
MH
3022 hdev->le_conn_latency = 0x0000;
3023 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3024 hdev->le_def_tx_len = 0x001b;
3025 hdev->le_def_tx_time = 0x0148;
3026 hdev->le_max_tx_len = 0x001b;
3027 hdev->le_max_tx_time = 0x0148;
3028 hdev->le_max_rx_len = 0x001b;
3029 hdev->le_max_rx_time = 0x0148;
bef64738 3030
d6bfd59c 3031 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3032 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3033 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3034 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3035
b1b813d4
DH
3036 mutex_init(&hdev->lock);
3037 mutex_init(&hdev->req_lock);
3038
3039 INIT_LIST_HEAD(&hdev->mgmt_pending);
3040 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3041 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3042 INIT_LIST_HEAD(&hdev->uuids);
3043 INIT_LIST_HEAD(&hdev->link_keys);
3044 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3045 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3046 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3047 INIT_LIST_HEAD(&hdev->le_white_list);
cfdb0c2d 3048 INIT_LIST_HEAD(&hdev->le_resolv_list);
15819a70 3049 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3050 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3051 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3052 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3053 INIT_LIST_HEAD(&hdev->adv_instances);
b1b813d4
DH
3054
3055 INIT_WORK(&hdev->rx_work, hci_rx_work);
3056 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3057 INIT_WORK(&hdev->tx_work, hci_tx_work);
3058 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3059 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 3060
b1b813d4 3061 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
b1b813d4 3062
b1b813d4
DH
3063 skb_queue_head_init(&hdev->rx_q);
3064 skb_queue_head_init(&hdev->cmd_q);
3065 skb_queue_head_init(&hdev->raw_q);
3066
3067 init_waitqueue_head(&hdev->req_wait_q);
3068
65cc2b49 3069 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3070
5fc16cc4
JH
3071 hci_request_setup(hdev);
3072
b1b813d4
DH
3073 hci_init_sysfs(hdev);
3074 discovery_init(hdev);
9be0dab7
DH
3075
3076 return hdev;
3077}
3078EXPORT_SYMBOL(hci_alloc_dev);
3079
3080/* Free HCI device */
3081void hci_free_dev(struct hci_dev *hdev)
3082{
9be0dab7
DH
3083 /* will free via device release */
3084 put_device(&hdev->dev);
3085}
3086EXPORT_SYMBOL(hci_free_dev);
3087
1da177e4
LT
3088/* Register HCI device */
3089int hci_register_dev(struct hci_dev *hdev)
3090{
b1b813d4 3091 int id, error;
1da177e4 3092
74292d5a 3093 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3094 return -EINVAL;
3095
08add513
MM
3096 /* Do not allow HCI_AMP devices to register at index 0,
3097 * so the index can be used as the AMP controller ID.
3098 */
3df92b31 3099 switch (hdev->dev_type) {
ca8bee5d 3100 case HCI_PRIMARY:
3df92b31
SL
3101 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3102 break;
3103 case HCI_AMP:
3104 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3105 break;
3106 default:
3107 return -EINVAL;
1da177e4 3108 }
8e87d142 3109
3df92b31
SL
3110 if (id < 0)
3111 return id;
3112
1da177e4
LT
3113 sprintf(hdev->name, "hci%d", id);
3114 hdev->id = id;
2d8b3a11
AE
3115
3116 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3117
29e2dd0d 3118 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
33ca954d
DH
3119 if (!hdev->workqueue) {
3120 error = -ENOMEM;
3121 goto err;
3122 }
f48fd9c8 3123
29e2dd0d
TH
3124 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3125 hdev->name);
6ead1bbc
JH
3126 if (!hdev->req_workqueue) {
3127 destroy_workqueue(hdev->workqueue);
3128 error = -ENOMEM;
3129 goto err;
3130 }
3131
0153e2ec
MH
3132 if (!IS_ERR_OR_NULL(bt_debugfs))
3133 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3134
bdc3e0f1
MH
3135 dev_set_name(&hdev->dev, "%s", hdev->name);
3136
3137 error = device_add(&hdev->dev);
33ca954d 3138 if (error < 0)
54506918 3139 goto err_wqueue;
1da177e4 3140
6d5d2ee6
HK
3141 hci_leds_init(hdev);
3142
611b30f7 3143 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3144 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3145 hdev);
611b30f7
MH
3146 if (hdev->rfkill) {
3147 if (rfkill_register(hdev->rfkill) < 0) {
3148 rfkill_destroy(hdev->rfkill);
3149 hdev->rfkill = NULL;
3150 }
3151 }
3152
5e130367 3153 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3154 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3155
a1536da2
MH
3156 hci_dev_set_flag(hdev, HCI_SETUP);
3157 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3158
ca8bee5d 3159 if (hdev->dev_type == HCI_PRIMARY) {
56f87901
JH
3160 /* Assume BR/EDR support until proven otherwise (such as
3161 * through reading supported features during init.
3162 */
a1536da2 3163 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3164 }
ce2be9ac 3165
fcee3377
GP
3166 write_lock(&hci_dev_list_lock);
3167 list_add(&hdev->list, &hci_dev_list);
3168 write_unlock(&hci_dev_list_lock);
3169
4a964404
MH
3170 /* Devices that are marked for raw-only usage are unconfigured
3171 * and should not be included in normal operation.
fee746b0
MH
3172 */
3173 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3174 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3175
05fcd4c4 3176 hci_sock_dev_event(hdev, HCI_DEV_REG);
dc946bd8 3177 hci_dev_hold(hdev);
1da177e4 3178
19202573 3179 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3180
1da177e4 3181 return id;
f48fd9c8 3182
33ca954d
DH
3183err_wqueue:
3184 destroy_workqueue(hdev->workqueue);
6ead1bbc 3185 destroy_workqueue(hdev->req_workqueue);
33ca954d 3186err:
3df92b31 3187 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3188
33ca954d 3189 return error;
1da177e4
LT
3190}
3191EXPORT_SYMBOL(hci_register_dev);
3192
3193/* Unregister HCI device */
59735631 3194void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3195{
2d7cc19e 3196 int id;
ef222013 3197
c13854ce 3198 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3199
a1536da2 3200 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3201
3df92b31
SL
3202 id = hdev->id;
3203
f20d09d5 3204 write_lock(&hci_dev_list_lock);
1da177e4 3205 list_del(&hdev->list);
f20d09d5 3206 write_unlock(&hci_dev_list_lock);
1da177e4 3207
b9b5ef18
GP
3208 cancel_work_sync(&hdev->power_on);
3209
bf389cab
JS
3210 hci_dev_do_close(hdev);
3211
ab81cbf9 3212 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3213 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3214 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3215 hci_dev_lock(hdev);
744cf19e 3216 mgmt_index_removed(hdev);
09fd0de5 3217 hci_dev_unlock(hdev);
56e5cb86 3218 }
ab81cbf9 3219
2e58ef3e
JH
3220 /* mgmt_index_removed should take care of emptying the
3221 * pending list */
3222 BUG_ON(!list_empty(&hdev->mgmt_pending));
3223
05fcd4c4 3224 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
1da177e4 3225
611b30f7
MH
3226 if (hdev->rfkill) {
3227 rfkill_unregister(hdev->rfkill);
3228 rfkill_destroy(hdev->rfkill);
3229 }
3230
bdc3e0f1 3231 device_del(&hdev->dev);
147e2d59 3232
0153e2ec 3233 debugfs_remove_recursive(hdev->debugfs);
5177a838
MH
3234 kfree_const(hdev->hw_info);
3235 kfree_const(hdev->fw_info);
0153e2ec 3236
f48fd9c8 3237 destroy_workqueue(hdev->workqueue);
6ead1bbc 3238 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3239
09fd0de5 3240 hci_dev_lock(hdev);
dcc36c16 3241 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3242 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3243 hci_uuids_clear(hdev);
55ed8ca1 3244 hci_link_keys_clear(hdev);
b899efaf 3245 hci_smp_ltks_clear(hdev);
970c4e46 3246 hci_smp_irks_clear(hdev);
2763eda6 3247 hci_remote_oob_data_clear(hdev);
d2609b34 3248 hci_adv_instances_clear(hdev);
dcc36c16 3249 hci_bdaddr_list_clear(&hdev->le_white_list);
cfdb0c2d 3250 hci_bdaddr_list_clear(&hdev->le_resolv_list);
373110c5 3251 hci_conn_params_clear_all(hdev);
22078800 3252 hci_discovery_filter_clear(hdev);
09fd0de5 3253 hci_dev_unlock(hdev);
e2e0cacb 3254
dc946bd8 3255 hci_dev_put(hdev);
3df92b31
SL
3256
3257 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3258}
3259EXPORT_SYMBOL(hci_unregister_dev);
3260
3261/* Suspend HCI device */
3262int hci_suspend_dev(struct hci_dev *hdev)
3263{
05fcd4c4 3264 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
1da177e4
LT
3265 return 0;
3266}
3267EXPORT_SYMBOL(hci_suspend_dev);
3268
3269/* Resume HCI device */
3270int hci_resume_dev(struct hci_dev *hdev)
3271{
05fcd4c4 3272 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
1da177e4
LT
3273 return 0;
3274}
3275EXPORT_SYMBOL(hci_resume_dev);
3276
75e0569f
MH
3277/* Reset HCI device */
3278int hci_reset_dev(struct hci_dev *hdev)
3279{
3280 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3281 struct sk_buff *skb;
3282
3283 skb = bt_skb_alloc(3, GFP_ATOMIC);
3284 if (!skb)
3285 return -ENOMEM;
3286
d79f34e3 3287 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
59ae1d12 3288 skb_put_data(skb, hw_err, 3);
75e0569f
MH
3289
3290 /* Send Hardware Error to upper stack */
3291 return hci_recv_frame(hdev, skb);
3292}
3293EXPORT_SYMBOL(hci_reset_dev);
3294
76bca880 3295/* Receive frame from HCI drivers */
e1a26170 3296int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3297{
76bca880 3298 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3299 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3300 kfree_skb(skb);
3301 return -ENXIO;
3302 }
3303
d79f34e3
MH
3304 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3305 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3306 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
fe806dce
MH
3307 kfree_skb(skb);
3308 return -EINVAL;
3309 }
3310
d82603c6 3311 /* Incoming skb */
76bca880
MH
3312 bt_cb(skb)->incoming = 1;
3313
3314 /* Time stamp */
3315 __net_timestamp(skb);
3316
76bca880 3317 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3318 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3319
76bca880
MH
3320 return 0;
3321}
3322EXPORT_SYMBOL(hci_recv_frame);
3323
e875ff84
MH
3324/* Receive diagnostic message from HCI drivers */
3325int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3326{
581d6fd6 3327 /* Mark as diagnostic packet */
d79f34e3 3328 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
581d6fd6 3329
e875ff84
MH
3330 /* Time stamp */
3331 __net_timestamp(skb);
3332
581d6fd6
MH
3333 skb_queue_tail(&hdev->rx_q, skb);
3334 queue_work(hdev->workqueue, &hdev->rx_work);
e875ff84 3335
e875ff84
MH
3336 return 0;
3337}
3338EXPORT_SYMBOL(hci_recv_diag);
3339
5177a838
MH
3340void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3341{
3342 va_list vargs;
3343
3344 va_start(vargs, fmt);
3345 kfree_const(hdev->hw_info);
3346 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3347 va_end(vargs);
3348}
3349EXPORT_SYMBOL(hci_set_hw_info);
3350
3351void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3352{
3353 va_list vargs;
3354
3355 va_start(vargs, fmt);
3356 kfree_const(hdev->fw_info);
3357 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3358 va_end(vargs);
3359}
3360EXPORT_SYMBOL(hci_set_fw_info);
3361
1da177e4
LT
3362/* ---- Interface to upper protocols ---- */
3363
1da177e4
LT
3364int hci_register_cb(struct hci_cb *cb)
3365{
3366 BT_DBG("%p name %s", cb, cb->name);
3367
fba7ecf0 3368 mutex_lock(&hci_cb_list_lock);
00629e0f 3369 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3370 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3371
3372 return 0;
3373}
3374EXPORT_SYMBOL(hci_register_cb);
3375
3376int hci_unregister_cb(struct hci_cb *cb)
3377{
3378 BT_DBG("%p name %s", cb, cb->name);
3379
fba7ecf0 3380 mutex_lock(&hci_cb_list_lock);
1da177e4 3381 list_del(&cb->list);
fba7ecf0 3382 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3383
3384 return 0;
3385}
3386EXPORT_SYMBOL(hci_unregister_cb);
3387
51086991 3388static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3389{
cdc52faa
MH
3390 int err;
3391
d79f34e3
MH
3392 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3393 skb->len);
1da177e4 3394
cd82e61c
MH
3395 /* Time stamp */
3396 __net_timestamp(skb);
1da177e4 3397
cd82e61c
MH
3398 /* Send copy to monitor */
3399 hci_send_to_monitor(hdev, skb);
3400
3401 if (atomic_read(&hdev->promisc)) {
3402 /* Send copy to the sockets */
470fe1b5 3403 hci_send_to_sock(hdev, skb);
1da177e4
LT
3404 }
3405
3406 /* Get rid of skb owner, prior to sending to the driver. */
3407 skb_orphan(skb);
3408
73d0d3c8
MH
3409 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3410 kfree_skb(skb);
3411 return;
3412 }
3413
cdc52faa
MH
3414 err = hdev->send(hdev, skb);
3415 if (err < 0) {
2064ee33 3416 bt_dev_err(hdev, "sending frame failed (%d)", err);
cdc52faa
MH
3417 kfree_skb(skb);
3418 }
1da177e4
LT
3419}
3420
1ca3a9d0 3421/* Send HCI command */
07dc93dd
JH
3422int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3423 const void *param)
1ca3a9d0
JH
3424{
3425 struct sk_buff *skb;
3426
3427 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3428
3429 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3430 if (!skb) {
2064ee33 3431 bt_dev_err(hdev, "no memory for command");
1ca3a9d0
JH
3432 return -ENOMEM;
3433 }
3434
49c922bb 3435 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3436 * single-command requests.
3437 */
44d27137 3438 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
11714b3d 3439
1da177e4 3440 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3441 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3442
3443 return 0;
3444}
1da177e4 3445
d6ee6ad7
LP
3446int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3447 const void *param)
3448{
3449 struct sk_buff *skb;
3450
3451 if (hci_opcode_ogf(opcode) != 0x3f) {
3452 /* A controller receiving a command shall respond with either
3453 * a Command Status Event or a Command Complete Event.
3454 * Therefore, all standard HCI commands must be sent via the
3455 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3456 * Some vendors do not comply with this rule for vendor-specific
3457 * commands and do not return any event. We want to support
3458 * unresponded commands for such cases only.
3459 */
3460 bt_dev_err(hdev, "unresponded command not supported");
3461 return -EINVAL;
3462 }
3463
3464 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3465 if (!skb) {
3466 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3467 opcode);
3468 return -ENOMEM;
3469 }
3470
3471 hci_send_frame(hdev, skb);
3472
3473 return 0;
3474}
3475EXPORT_SYMBOL(__hci_cmd_send);
3476
1da177e4 3477/* Get data from the previously sent command */
a9de9248 3478void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3479{
3480 struct hci_command_hdr *hdr;
3481
3482 if (!hdev->sent_cmd)
3483 return NULL;
3484
3485 hdr = (void *) hdev->sent_cmd->data;
3486
a9de9248 3487 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3488 return NULL;
3489
f0e09510 3490 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3491
3492 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3493}
3494
fbef168f
LP
3495/* Send HCI command and wait for command commplete event */
3496struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3497 const void *param, u32 timeout)
3498{
3499 struct sk_buff *skb;
3500
3501 if (!test_bit(HCI_UP, &hdev->flags))
3502 return ERR_PTR(-ENETDOWN);
3503
3504 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3505
b504430c 3506 hci_req_sync_lock(hdev);
fbef168f 3507 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
b504430c 3508 hci_req_sync_unlock(hdev);
fbef168f
LP
3509
3510 return skb;
3511}
3512EXPORT_SYMBOL(hci_cmd_sync);
3513
1da177e4
LT
3514/* Send ACL data */
3515static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3516{
3517 struct hci_acl_hdr *hdr;
3518 int len = skb->len;
3519
badff6d0
ACM
3520 skb_push(skb, HCI_ACL_HDR_SIZE);
3521 skb_reset_transport_header(skb);
9c70220b 3522 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3523 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3524 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3525}
3526
ee22be7e 3527static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3528 struct sk_buff *skb, __u16 flags)
1da177e4 3529{
ee22be7e 3530 struct hci_conn *conn = chan->conn;
1da177e4
LT
3531 struct hci_dev *hdev = conn->hdev;
3532 struct sk_buff *list;
3533
087bfd99
GP
3534 skb->len = skb_headlen(skb);
3535 skb->data_len = 0;
3536
d79f34e3 3537 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
204a6e54
AE
3538
3539 switch (hdev->dev_type) {
ca8bee5d 3540 case HCI_PRIMARY:
204a6e54
AE
3541 hci_add_acl_hdr(skb, conn->handle, flags);
3542 break;
3543 case HCI_AMP:
3544 hci_add_acl_hdr(skb, chan->handle, flags);
3545 break;
3546 default:
2064ee33 3547 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
204a6e54
AE
3548 return;
3549 }
087bfd99 3550
70f23020
AE
3551 list = skb_shinfo(skb)->frag_list;
3552 if (!list) {
1da177e4
LT
3553 /* Non fragmented */
3554 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3555
73d80deb 3556 skb_queue_tail(queue, skb);
1da177e4
LT
3557 } else {
3558 /* Fragmented */
3559 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3560
3561 skb_shinfo(skb)->frag_list = NULL;
3562
9cfd5a23
JR
3563 /* Queue all fragments atomically. We need to use spin_lock_bh
3564 * here because of 6LoWPAN links, as there this function is
3565 * called from softirq and using normal spin lock could cause
3566 * deadlocks.
3567 */
3568 spin_lock_bh(&queue->lock);
1da177e4 3569
73d80deb 3570 __skb_queue_tail(queue, skb);
e702112f
AE
3571
3572 flags &= ~ACL_START;
3573 flags |= ACL_CONT;
1da177e4
LT
3574 do {
3575 skb = list; list = list->next;
8e87d142 3576
d79f34e3 3577 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
e702112f 3578 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3579
3580 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3581
73d80deb 3582 __skb_queue_tail(queue, skb);
1da177e4
LT
3583 } while (list);
3584
9cfd5a23 3585 spin_unlock_bh(&queue->lock);
1da177e4 3586 }
73d80deb
LAD
3587}
3588
3589void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3590{
ee22be7e 3591 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3592
f0e09510 3593 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3594
ee22be7e 3595 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3596
3eff45ea 3597 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3598}
1da177e4
LT
3599
3600/* Send SCO data */
0d861d8b 3601void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3602{
3603 struct hci_dev *hdev = conn->hdev;
3604 struct hci_sco_hdr hdr;
3605
3606 BT_DBG("%s len %d", hdev->name, skb->len);
3607
aca3192c 3608 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3609 hdr.dlen = skb->len;
3610
badff6d0
ACM
3611 skb_push(skb, HCI_SCO_HDR_SIZE);
3612 skb_reset_transport_header(skb);
9c70220b 3613 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3614
d79f34e3 3615 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
c78ae283 3616
1da177e4 3617 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3618 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3619}
1da177e4
LT
3620
3621/* ---- HCI TX task (outgoing data) ---- */
3622
3623/* HCI Connection scheduler */
6039aa73
GP
3624static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3625 int *quote)
1da177e4
LT
3626{
3627 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3628 struct hci_conn *conn = NULL, *c;
abc5de8f 3629 unsigned int num = 0, min = ~0;
1da177e4 3630
8e87d142 3631 /* We don't have to lock device here. Connections are always
1da177e4 3632 * added and removed with TX task disabled. */
bf4c6325
GP
3633
3634 rcu_read_lock();
3635
3636 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3637 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3638 continue;
769be974
MH
3639
3640 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3641 continue;
3642
1da177e4
LT
3643 num++;
3644
3645 if (c->sent < min) {
3646 min = c->sent;
3647 conn = c;
3648 }
52087a79
LAD
3649
3650 if (hci_conn_num(hdev, type) == num)
3651 break;
1da177e4
LT
3652 }
3653
bf4c6325
GP
3654 rcu_read_unlock();
3655
1da177e4 3656 if (conn) {
6ed58ec5
VT
3657 int cnt, q;
3658
3659 switch (conn->type) {
3660 case ACL_LINK:
3661 cnt = hdev->acl_cnt;
3662 break;
3663 case SCO_LINK:
3664 case ESCO_LINK:
3665 cnt = hdev->sco_cnt;
3666 break;
3667 case LE_LINK:
3668 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3669 break;
3670 default:
3671 cnt = 0;
2064ee33 3672 bt_dev_err(hdev, "unknown link type %d", conn->type);
6ed58ec5
VT
3673 }
3674
3675 q = cnt / num;
1da177e4
LT
3676 *quote = q ? q : 1;
3677 } else
3678 *quote = 0;
3679
3680 BT_DBG("conn %p quote %d", conn, *quote);
3681 return conn;
3682}
3683
6039aa73 3684static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3685{
3686 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3687 struct hci_conn *c;
1da177e4 3688
2064ee33 3689 bt_dev_err(hdev, "link tx timeout");
1da177e4 3690
bf4c6325
GP
3691 rcu_read_lock();
3692
1da177e4 3693 /* Kill stalled connections */
bf4c6325 3694 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3695 if (c->type == type && c->sent) {
2064ee33
MH
3696 bt_dev_err(hdev, "killing stalled connection %pMR",
3697 &c->dst);
bed71748 3698 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3699 }
3700 }
bf4c6325
GP
3701
3702 rcu_read_unlock();
1da177e4
LT
3703}
3704
6039aa73
GP
3705static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3706 int *quote)
1da177e4 3707{
73d80deb
LAD
3708 struct hci_conn_hash *h = &hdev->conn_hash;
3709 struct hci_chan *chan = NULL;
abc5de8f 3710 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3711 struct hci_conn *conn;
73d80deb
LAD
3712 int cnt, q, conn_num = 0;
3713
3714 BT_DBG("%s", hdev->name);
3715
bf4c6325
GP
3716 rcu_read_lock();
3717
3718 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3719 struct hci_chan *tmp;
3720
3721 if (conn->type != type)
3722 continue;
3723
3724 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3725 continue;
3726
3727 conn_num++;
3728
8192edef 3729 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3730 struct sk_buff *skb;
3731
3732 if (skb_queue_empty(&tmp->data_q))
3733 continue;
3734
3735 skb = skb_peek(&tmp->data_q);
3736 if (skb->priority < cur_prio)
3737 continue;
3738
3739 if (skb->priority > cur_prio) {
3740 num = 0;
3741 min = ~0;
3742 cur_prio = skb->priority;
3743 }
3744
3745 num++;
3746
3747 if (conn->sent < min) {
3748 min = conn->sent;
3749 chan = tmp;
3750 }
3751 }
3752
3753 if (hci_conn_num(hdev, type) == conn_num)
3754 break;
3755 }
3756
bf4c6325
GP
3757 rcu_read_unlock();
3758
73d80deb
LAD
3759 if (!chan)
3760 return NULL;
3761
3762 switch (chan->conn->type) {
3763 case ACL_LINK:
3764 cnt = hdev->acl_cnt;
3765 break;
bd1eb66b
AE
3766 case AMP_LINK:
3767 cnt = hdev->block_cnt;
3768 break;
73d80deb
LAD
3769 case SCO_LINK:
3770 case ESCO_LINK:
3771 cnt = hdev->sco_cnt;
3772 break;
3773 case LE_LINK:
3774 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3775 break;
3776 default:
3777 cnt = 0;
2064ee33 3778 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
73d80deb
LAD
3779 }
3780
3781 q = cnt / num;
3782 *quote = q ? q : 1;
3783 BT_DBG("chan %p quote %d", chan, *quote);
3784 return chan;
3785}
3786
02b20f0b
LAD
3787static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3788{
3789 struct hci_conn_hash *h = &hdev->conn_hash;
3790 struct hci_conn *conn;
3791 int num = 0;
3792
3793 BT_DBG("%s", hdev->name);
3794
bf4c6325
GP
3795 rcu_read_lock();
3796
3797 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3798 struct hci_chan *chan;
3799
3800 if (conn->type != type)
3801 continue;
3802
3803 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3804 continue;
3805
3806 num++;
3807
8192edef 3808 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3809 struct sk_buff *skb;
3810
3811 if (chan->sent) {
3812 chan->sent = 0;
3813 continue;
3814 }
3815
3816 if (skb_queue_empty(&chan->data_q))
3817 continue;
3818
3819 skb = skb_peek(&chan->data_q);
3820 if (skb->priority >= HCI_PRIO_MAX - 1)
3821 continue;
3822
3823 skb->priority = HCI_PRIO_MAX - 1;
3824
3825 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3826 skb->priority);
02b20f0b
LAD
3827 }
3828
3829 if (hci_conn_num(hdev, type) == num)
3830 break;
3831 }
bf4c6325
GP
3832
3833 rcu_read_unlock();
3834
02b20f0b
LAD
3835}
3836
b71d385a
AE
3837static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3838{
3839 /* Calculate count of blocks used by this packet */
3840 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3841}
3842
6039aa73 3843static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3844{
d7a5a11d 3845 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
3846 /* ACL tx timeout must be longer than maximum
3847 * link supervision timeout (40.9 seconds) */
63d2bc1b 3848 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3849 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3850 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3851 }
63d2bc1b 3852}
1da177e4 3853
6039aa73 3854static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3855{
3856 unsigned int cnt = hdev->acl_cnt;
3857 struct hci_chan *chan;
3858 struct sk_buff *skb;
3859 int quote;
3860
3861 __check_timeout(hdev, cnt);
04837f64 3862
73d80deb 3863 while (hdev->acl_cnt &&
a8c5fb1a 3864 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3865 u32 priority = (skb_peek(&chan->data_q))->priority;
3866 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3867 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3868 skb->len, skb->priority);
73d80deb 3869
ec1cce24
LAD
3870 /* Stop if priority has changed */
3871 if (skb->priority < priority)
3872 break;
3873
3874 skb = skb_dequeue(&chan->data_q);
3875
73d80deb 3876 hci_conn_enter_active_mode(chan->conn,
04124681 3877 bt_cb(skb)->force_active);
04837f64 3878
57d17d70 3879 hci_send_frame(hdev, skb);
1da177e4
LT
3880 hdev->acl_last_tx = jiffies;
3881
3882 hdev->acl_cnt--;
73d80deb
LAD
3883 chan->sent++;
3884 chan->conn->sent++;
1da177e4
LT
3885 }
3886 }
02b20f0b
LAD
3887
3888 if (cnt != hdev->acl_cnt)
3889 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3890}
3891
6039aa73 3892static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3893{
63d2bc1b 3894 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3895 struct hci_chan *chan;
3896 struct sk_buff *skb;
3897 int quote;
bd1eb66b 3898 u8 type;
b71d385a 3899
63d2bc1b 3900 __check_timeout(hdev, cnt);
b71d385a 3901
bd1eb66b
AE
3902 BT_DBG("%s", hdev->name);
3903
3904 if (hdev->dev_type == HCI_AMP)
3905 type = AMP_LINK;
3906 else
3907 type = ACL_LINK;
3908
b71d385a 3909 while (hdev->block_cnt > 0 &&
bd1eb66b 3910 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3911 u32 priority = (skb_peek(&chan->data_q))->priority;
3912 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3913 int blocks;
3914
3915 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3916 skb->len, skb->priority);
b71d385a
AE
3917
3918 /* Stop if priority has changed */
3919 if (skb->priority < priority)
3920 break;
3921
3922 skb = skb_dequeue(&chan->data_q);
3923
3924 blocks = __get_blocks(hdev, skb);
3925 if (blocks > hdev->block_cnt)
3926 return;
3927
3928 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3929 bt_cb(skb)->force_active);
b71d385a 3930
57d17d70 3931 hci_send_frame(hdev, skb);
b71d385a
AE
3932 hdev->acl_last_tx = jiffies;
3933
3934 hdev->block_cnt -= blocks;
3935 quote -= blocks;
3936
3937 chan->sent += blocks;
3938 chan->conn->sent += blocks;
3939 }
3940 }
3941
3942 if (cnt != hdev->block_cnt)
bd1eb66b 3943 hci_prio_recalculate(hdev, type);
b71d385a
AE
3944}
3945
6039aa73 3946static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3947{
3948 BT_DBG("%s", hdev->name);
3949
bd1eb66b 3950 /* No ACL link over BR/EDR controller */
ca8bee5d 3951 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
bd1eb66b
AE
3952 return;
3953
3954 /* No AMP link over AMP controller */
3955 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3956 return;
3957
3958 switch (hdev->flow_ctl_mode) {
3959 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3960 hci_sched_acl_pkt(hdev);
3961 break;
3962
3963 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3964 hci_sched_acl_blk(hdev);
3965 break;
3966 }
3967}
3968
1da177e4 3969/* Schedule SCO */
6039aa73 3970static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3971{
3972 struct hci_conn *conn;
3973 struct sk_buff *skb;
3974 int quote;
3975
3976 BT_DBG("%s", hdev->name);
3977
52087a79
LAD
3978 if (!hci_conn_num(hdev, SCO_LINK))
3979 return;
3980
1da177e4
LT
3981 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3982 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3983 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3984 hci_send_frame(hdev, skb);
1da177e4
LT
3985
3986 conn->sent++;
3987 if (conn->sent == ~0)
3988 conn->sent = 0;
3989 }
3990 }
3991}
3992
6039aa73 3993static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3994{
3995 struct hci_conn *conn;
3996 struct sk_buff *skb;
3997 int quote;
3998
3999 BT_DBG("%s", hdev->name);
4000
52087a79
LAD
4001 if (!hci_conn_num(hdev, ESCO_LINK))
4002 return;
4003
8fc9ced3
GP
4004 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4005 &quote))) {
b6a0dc82
MH
4006 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4007 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4008 hci_send_frame(hdev, skb);
b6a0dc82
MH
4009
4010 conn->sent++;
4011 if (conn->sent == ~0)
4012 conn->sent = 0;
4013 }
4014 }
4015}
4016
6039aa73 4017static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4018{
73d80deb 4019 struct hci_chan *chan;
6ed58ec5 4020 struct sk_buff *skb;
02b20f0b 4021 int quote, cnt, tmp;
6ed58ec5
VT
4022
4023 BT_DBG("%s", hdev->name);
4024
52087a79
LAD
4025 if (!hci_conn_num(hdev, LE_LINK))
4026 return;
4027
d7a5a11d 4028 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6ed58ec5
VT
4029 /* LE tx timeout must be longer than maximum
4030 * link supervision timeout (40.9 seconds) */
bae1f5d9 4031 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4032 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4033 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4034 }
4035
4036 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4037 tmp = cnt;
73d80deb 4038 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4039 u32 priority = (skb_peek(&chan->data_q))->priority;
4040 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4041 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4042 skb->len, skb->priority);
6ed58ec5 4043
ec1cce24
LAD
4044 /* Stop if priority has changed */
4045 if (skb->priority < priority)
4046 break;
4047
4048 skb = skb_dequeue(&chan->data_q);
4049
57d17d70 4050 hci_send_frame(hdev, skb);
6ed58ec5
VT
4051 hdev->le_last_tx = jiffies;
4052
4053 cnt--;
73d80deb
LAD
4054 chan->sent++;
4055 chan->conn->sent++;
6ed58ec5
VT
4056 }
4057 }
73d80deb 4058
6ed58ec5
VT
4059 if (hdev->le_pkts)
4060 hdev->le_cnt = cnt;
4061 else
4062 hdev->acl_cnt = cnt;
02b20f0b
LAD
4063
4064 if (cnt != tmp)
4065 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4066}
4067
3eff45ea 4068static void hci_tx_work(struct work_struct *work)
1da177e4 4069{
3eff45ea 4070 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4071 struct sk_buff *skb;
4072
6ed58ec5 4073 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4074 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4075
d7a5a11d 4076 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e
MH
4077 /* Schedule queues and send stuff to HCI driver */
4078 hci_sched_acl(hdev);
4079 hci_sched_sco(hdev);
4080 hci_sched_esco(hdev);
4081 hci_sched_le(hdev);
4082 }
6ed58ec5 4083
1da177e4
LT
4084 /* Send next queued raw (unknown type) packet */
4085 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4086 hci_send_frame(hdev, skb);
1da177e4
LT
4087}
4088
25985edc 4089/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4090
4091/* ACL data packet */
6039aa73 4092static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4093{
4094 struct hci_acl_hdr *hdr = (void *) skb->data;
4095 struct hci_conn *conn;
4096 __u16 handle, flags;
4097
4098 skb_pull(skb, HCI_ACL_HDR_SIZE);
4099
4100 handle = __le16_to_cpu(hdr->handle);
4101 flags = hci_flags(handle);
4102 handle = hci_handle(handle);
4103
f0e09510 4104 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4105 handle, flags);
1da177e4
LT
4106
4107 hdev->stat.acl_rx++;
4108
4109 hci_dev_lock(hdev);
4110 conn = hci_conn_hash_lookup_handle(hdev, handle);
4111 hci_dev_unlock(hdev);
8e87d142 4112
1da177e4 4113 if (conn) {
65983fc7 4114 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4115
1da177e4 4116 /* Send to upper protocol */
686ebf28
UF
4117 l2cap_recv_acldata(conn, skb, flags);
4118 return;
1da177e4 4119 } else {
2064ee33
MH
4120 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4121 handle);
1da177e4
LT
4122 }
4123
4124 kfree_skb(skb);
4125}
4126
4127/* SCO data packet */
6039aa73 4128static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4129{
4130 struct hci_sco_hdr *hdr = (void *) skb->data;
4131 struct hci_conn *conn;
4132 __u16 handle;
4133
4134 skb_pull(skb, HCI_SCO_HDR_SIZE);
4135
4136 handle = __le16_to_cpu(hdr->handle);
4137
f0e09510 4138 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4139
4140 hdev->stat.sco_rx++;
4141
4142 hci_dev_lock(hdev);
4143 conn = hci_conn_hash_lookup_handle(hdev, handle);
4144 hci_dev_unlock(hdev);
4145
4146 if (conn) {
1da177e4 4147 /* Send to upper protocol */
686ebf28
UF
4148 sco_recv_scodata(conn, skb);
4149 return;
1da177e4 4150 } else {
2064ee33
MH
4151 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4152 handle);
1da177e4
LT
4153 }
4154
4155 kfree_skb(skb);
4156}
4157
9238f36a
JH
4158static bool hci_req_is_complete(struct hci_dev *hdev)
4159{
4160 struct sk_buff *skb;
4161
4162 skb = skb_peek(&hdev->cmd_q);
4163 if (!skb)
4164 return true;
4165
44d27137 4166 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
9238f36a
JH
4167}
4168
42c6b129
JH
4169static void hci_resend_last(struct hci_dev *hdev)
4170{
4171 struct hci_command_hdr *sent;
4172 struct sk_buff *skb;
4173 u16 opcode;
4174
4175 if (!hdev->sent_cmd)
4176 return;
4177
4178 sent = (void *) hdev->sent_cmd->data;
4179 opcode = __le16_to_cpu(sent->opcode);
4180 if (opcode == HCI_OP_RESET)
4181 return;
4182
4183 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4184 if (!skb)
4185 return;
4186
4187 skb_queue_head(&hdev->cmd_q, skb);
4188 queue_work(hdev->workqueue, &hdev->cmd_work);
4189}
4190
e6214487
JH
4191void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4192 hci_req_complete_t *req_complete,
4193 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4194{
9238f36a
JH
4195 struct sk_buff *skb;
4196 unsigned long flags;
4197
4198 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4199
42c6b129
JH
4200 /* If the completed command doesn't match the last one that was
4201 * sent we need to do special handling of it.
9238f36a 4202 */
42c6b129
JH
4203 if (!hci_sent_cmd_data(hdev, opcode)) {
4204 /* Some CSR based controllers generate a spontaneous
4205 * reset complete event during init and any pending
4206 * command will never be completed. In such a case we
4207 * need to resend whatever was the last sent
4208 * command.
4209 */
4210 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4211 hci_resend_last(hdev);
4212
9238f36a 4213 return;
42c6b129 4214 }
9238f36a
JH
4215
4216 /* If the command succeeded and there's still more commands in
4217 * this request the request is not yet complete.
4218 */
4219 if (!status && !hci_req_is_complete(hdev))
4220 return;
4221
4222 /* If this was the last command in a request the complete
4223 * callback would be found in hdev->sent_cmd instead of the
4224 * command queue (hdev->cmd_q).
4225 */
44d27137
JH
4226 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4227 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
e6214487
JH
4228 return;
4229 }
53e21fbc 4230
44d27137
JH
4231 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4232 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
e6214487 4233 return;
9238f36a
JH
4234 }
4235
4236 /* Remove all pending commands belonging to this request */
4237 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4238 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
44d27137 4239 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
9238f36a
JH
4240 __skb_queue_head(&hdev->cmd_q, skb);
4241 break;
4242 }
4243
3bd7594e
DA
4244 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4245 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4246 else
4247 *req_complete = bt_cb(skb)->hci.req_complete;
9238f36a
JH
4248 kfree_skb(skb);
4249 }
4250 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4251}
4252
b78752cc 4253static void hci_rx_work(struct work_struct *work)
1da177e4 4254{
b78752cc 4255 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4256 struct sk_buff *skb;
4257
4258 BT_DBG("%s", hdev->name);
4259
1da177e4 4260 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4261 /* Send copy to monitor */
4262 hci_send_to_monitor(hdev, skb);
4263
1da177e4
LT
4264 if (atomic_read(&hdev->promisc)) {
4265 /* Send copy to the sockets */
470fe1b5 4266 hci_send_to_sock(hdev, skb);
1da177e4
LT
4267 }
4268
d7a5a11d 4269 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1da177e4
LT
4270 kfree_skb(skb);
4271 continue;
4272 }
4273
4274 if (test_bit(HCI_INIT, &hdev->flags)) {
4275 /* Don't process data packets in this states. */
d79f34e3 4276 switch (hci_skb_pkt_type(skb)) {
1da177e4
LT
4277 case HCI_ACLDATA_PKT:
4278 case HCI_SCODATA_PKT:
4279 kfree_skb(skb);
4280 continue;
3ff50b79 4281 }
1da177e4
LT
4282 }
4283
4284 /* Process frame */
d79f34e3 4285 switch (hci_skb_pkt_type(skb)) {
1da177e4 4286 case HCI_EVENT_PKT:
b78752cc 4287 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4288 hci_event_packet(hdev, skb);
4289 break;
4290
4291 case HCI_ACLDATA_PKT:
4292 BT_DBG("%s ACL data packet", hdev->name);
4293 hci_acldata_packet(hdev, skb);
4294 break;
4295
4296 case HCI_SCODATA_PKT:
4297 BT_DBG("%s SCO data packet", hdev->name);
4298 hci_scodata_packet(hdev, skb);
4299 break;
4300
4301 default:
4302 kfree_skb(skb);
4303 break;
4304 }
4305 }
1da177e4
LT
4306}
4307
c347b765 4308static void hci_cmd_work(struct work_struct *work)
1da177e4 4309{
c347b765 4310 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4311 struct sk_buff *skb;
4312
2104786b
AE
4313 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4314 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4315
1da177e4 4316 /* Send queued commands */
5a08ecce
AE
4317 if (atomic_read(&hdev->cmd_cnt)) {
4318 skb = skb_dequeue(&hdev->cmd_q);
4319 if (!skb)
4320 return;
4321
7585b97a 4322 kfree_skb(hdev->sent_cmd);
1da177e4 4323
a675d7f1 4324 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4325 if (hdev->sent_cmd) {
1da177e4 4326 atomic_dec(&hdev->cmd_cnt);
57d17d70 4327 hci_send_frame(hdev, skb);
7bdb8a5c 4328 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4329 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4330 else
65cc2b49
MH
4331 schedule_delayed_work(&hdev->cmd_timer,
4332 HCI_CMD_TIMEOUT);
1da177e4
LT
4333 } else {
4334 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4335 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4336 }
4337 }
4338}